]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.8.2-201303041742.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.2-201303041742.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b89a739..dba90c5 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125 -linux
126 +lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130 @@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134 -media
135 mconf
136 +mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143 +mkpiggy
144 mkprep
145 mkregtable
146 mktables
147 @@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151 +parse-events*
152 +pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156 @@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160 +pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164 @@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168 +realmode.lds
169 +realmode.relocs
170 recordmcount
171 +regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175 @@ -213,8 +238,11 @@ series
176 setup
177 setup.bin
178 setup.elf
179 +size_overflow_hash.h
180 sImage
181 +slabinfo
182 sm_tbl*
183 +sortextable
184 split-include
185 syscalltab.h
186 tables.c
187 @@ -224,6 +252,7 @@ tftpboot.img
188 timeconst.h
189 times.h*
190 trix_boot.h
191 +user_constants.h
192 utsrelease.h*
193 vdso-syms.lds
194 vdso.lds
195 @@ -235,13 +264,17 @@ vdso32.lds
196 vdso32.so.dbg
197 vdso64.lds
198 vdso64.so.dbg
199 +vdsox32.lds
200 +vdsox32-syms.lds
201 version.h*
202 vmImage
203 vmlinux
204 vmlinux-*
205 vmlinux.aout
206 vmlinux.bin.all
207 +vmlinux.bin.bz2
208 vmlinux.lds
209 +vmlinux.relocs
210 vmlinuz
211 voffset.h
212 vsyscall.lds
213 @@ -249,9 +282,11 @@ vsyscall_32.lds
214 wanxlfw.inc
215 uImage
216 unifdef
217 +utsrelease.h
218 wakeup.bin
219 wakeup.elf
220 wakeup.lds
221 zImage*
222 zconf.hash.c
223 +zconf.lex.c
224 zoffset.h
225 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
226 index 986614d..0afd461 100644
227 --- a/Documentation/kernel-parameters.txt
228 +++ b/Documentation/kernel-parameters.txt
229 @@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
230 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
231 Default: 1024
232
233 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
234 + ignore grsecurity's /proc restrictions
235 +
236 +
237 hashdist= [KNL,NUMA] Large hashes allocated during boot
238 are distributed across NUMA nodes. Defaults on
239 for 64-bit NUMA, off otherwise.
240 @@ -2121,6 +2125,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
241 the specified number of seconds. This is to be used if
242 your oopses keep scrolling off the screen.
243
244 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
245 + virtualization environments that don't cope well with the
246 + expand down segment used by UDEREF on X86-32 or the frequent
247 + page table updates on X86-64.
248 +
249 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
250 +
251 pcbit= [HW,ISDN]
252
253 pcd. [PARIDE]
254 diff --git a/Makefile b/Makefile
255 index 20d5318..19c7540 100644
256 --- a/Makefile
257 +++ b/Makefile
258 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
259
260 HOSTCC = gcc
261 HOSTCXX = g++
262 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
263 -HOSTCXXFLAGS = -O2
264 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
265 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
266 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
267
268 # Decide whether to build built-in, modular, or both.
269 # Normally, just do built-in.
270 @@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
271 # Rules shared between *config targets and build targets
272
273 # Basic helpers built in scripts/
274 -PHONY += scripts_basic
275 -scripts_basic:
276 +PHONY += scripts_basic gcc-plugins
277 +scripts_basic: gcc-plugins
278 $(Q)$(MAKE) $(build)=scripts/basic
279 $(Q)rm -f .tmp_quiet_recordmcount
280
281 @@ -575,6 +576,64 @@ else
282 KBUILD_CFLAGS += -O2
283 endif
284
285 +ifndef DISABLE_PAX_PLUGINS
286 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
287 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
288 +else
289 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
290 +endif
291 +ifneq ($(PLUGINCC),)
292 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
293 +ifndef CONFIG_UML
294 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
295 +endif
296 +endif
297 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
298 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
299 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
300 +endif
301 +ifdef CONFIG_KALLOCSTAT_PLUGIN
302 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
303 +endif
304 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
305 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
306 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
307 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
308 +endif
309 +ifdef CONFIG_CHECKER_PLUGIN
310 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
311 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
312 +endif
313 +endif
314 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
315 +ifdef CONFIG_PAX_SIZE_OVERFLOW
316 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
317 +endif
318 +ifdef CONFIG_PAX_LATENT_ENTROPY
319 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
320 +endif
321 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
322 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
323 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
324 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
325 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
326 +ifeq ($(KBUILD_EXTMOD),)
327 +gcc-plugins:
328 + $(Q)$(MAKE) $(build)=tools/gcc
329 +else
330 +gcc-plugins: ;
331 +endif
332 +else
333 +gcc-plugins:
334 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
335 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
336 +else
337 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
338 +endif
339 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
340 +endif
341 +endif
342 +
343 include $(srctree)/arch/$(SRCARCH)/Makefile
344
345 ifdef CONFIG_READABLE_ASM
346 @@ -731,7 +790,7 @@ export mod_sign_cmd
347
348
349 ifeq ($(KBUILD_EXTMOD),)
350 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
351 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
352
353 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
354 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
355 @@ -778,6 +837,8 @@ endif
356
357 # The actual objects are generated when descending,
358 # make sure no implicit rule kicks in
359 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
360 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
361 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
362
363 # Handle descending into subdirectories listed in $(vmlinux-dirs)
364 @@ -787,7 +848,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
365 # Error messages still appears in the original language
366
367 PHONY += $(vmlinux-dirs)
368 -$(vmlinux-dirs): prepare scripts
369 +$(vmlinux-dirs): gcc-plugins prepare scripts
370 $(Q)$(MAKE) $(build)=$@
371
372 # Store (new) KERNELRELASE string in include/config/kernel.release
373 @@ -831,6 +892,7 @@ prepare0: archprepare FORCE
374 $(Q)$(MAKE) $(build)=.
375
376 # All the preparing..
377 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
378 prepare: prepare0
379
380 # Generate some files
381 @@ -938,6 +1000,8 @@ all: modules
382 # using awk while concatenating to the final file.
383
384 PHONY += modules
385 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
386 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
387 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
388 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
389 @$(kecho) ' Building modules, stage 2.';
390 @@ -953,7 +1017,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
391
392 # Target to prepare building external modules
393 PHONY += modules_prepare
394 -modules_prepare: prepare scripts
395 +modules_prepare: gcc-plugins prepare scripts
396
397 # Target to install modules
398 PHONY += modules_install
399 @@ -1019,7 +1083,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
400 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
401 signing_key.priv signing_key.x509 x509.genkey \
402 extra_certificates signing_key.x509.keyid \
403 - signing_key.x509.signer
404 + signing_key.x509.signer tools/gcc/size_overflow_hash.h
405
406 # clean - Delete most, but leave enough to build external modules
407 #
408 @@ -1059,6 +1123,7 @@ distclean: mrproper
409 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
410 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
411 -o -name '.*.rej' \
412 + -o -name '.*.rej' -o -name '*.so' \
413 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
414 -type f -print | xargs rm -f
415
416 @@ -1219,6 +1284,8 @@ PHONY += $(module-dirs) modules
417 $(module-dirs): crmodverdir $(objtree)/Module.symvers
418 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
419
420 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
421 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
422 modules: $(module-dirs)
423 @$(kecho) ' Building modules, stage 2.';
424 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
425 @@ -1355,17 +1422,21 @@ else
426 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
427 endif
428
429 -%.s: %.c prepare scripts FORCE
430 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
431 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
432 +%.s: %.c gcc-plugins prepare scripts FORCE
433 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
434 %.i: %.c prepare scripts FORCE
435 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
436 -%.o: %.c prepare scripts FORCE
437 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
438 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
439 +%.o: %.c gcc-plugins prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441 %.lst: %.c prepare scripts FORCE
442 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
443 -%.s: %.S prepare scripts FORCE
444 +%.s: %.S gcc-plugins prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446 -%.o: %.S prepare scripts FORCE
447 +%.o: %.S gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.symtypes: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451 @@ -1375,11 +1446,15 @@ endif
452 $(cmd_crmodverdir)
453 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
454 $(build)=$(build-dir)
455 -%/: prepare scripts FORCE
456 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
457 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
458 +%/: gcc-plugins prepare scripts FORCE
459 $(cmd_crmodverdir)
460 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
461 $(build)=$(build-dir)
462 -%.ko: prepare scripts FORCE
463 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
464 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
465 +%.ko: gcc-plugins prepare scripts FORCE
466 $(cmd_crmodverdir)
467 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
468 $(build)=$(build-dir) $(@:.ko=.o)
469 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
470 index c2cbe4f..f7264b4 100644
471 --- a/arch/alpha/include/asm/atomic.h
472 +++ b/arch/alpha/include/asm/atomic.h
473 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
474 #define atomic_dec(v) atomic_sub(1,(v))
475 #define atomic64_dec(v) atomic64_sub(1,(v))
476
477 +#define atomic64_read_unchecked(v) atomic64_read(v)
478 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
479 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
480 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
481 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
482 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
483 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
484 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
485 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
486 +
487 #define smp_mb__before_atomic_dec() smp_mb()
488 #define smp_mb__after_atomic_dec() smp_mb()
489 #define smp_mb__before_atomic_inc() smp_mb()
490 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
491 index ad368a9..fbe0f25 100644
492 --- a/arch/alpha/include/asm/cache.h
493 +++ b/arch/alpha/include/asm/cache.h
494 @@ -4,19 +4,19 @@
495 #ifndef __ARCH_ALPHA_CACHE_H
496 #define __ARCH_ALPHA_CACHE_H
497
498 +#include <linux/const.h>
499
500 /* Bytes per L1 (data) cache line. */
501 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
502 -# define L1_CACHE_BYTES 64
503 # define L1_CACHE_SHIFT 6
504 #else
505 /* Both EV4 and EV5 are write-through, read-allocate,
506 direct-mapped, physical.
507 */
508 -# define L1_CACHE_BYTES 32
509 # define L1_CACHE_SHIFT 5
510 #endif
511
512 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
513 #define SMP_CACHE_BYTES L1_CACHE_BYTES
514
515 #endif
516 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
517 index 968d999..d36b2df 100644
518 --- a/arch/alpha/include/asm/elf.h
519 +++ b/arch/alpha/include/asm/elf.h
520 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
521
522 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
523
524 +#ifdef CONFIG_PAX_ASLR
525 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
526 +
527 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
528 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
529 +#endif
530 +
531 /* $0 is set by ld.so to a pointer to a function which might be
532 registered using atexit. This provides a mean for the dynamic
533 linker to call DT_FINI functions for shared libraries that have
534 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
535 index bc2a0da..8ad11ee 100644
536 --- a/arch/alpha/include/asm/pgalloc.h
537 +++ b/arch/alpha/include/asm/pgalloc.h
538 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
539 pgd_set(pgd, pmd);
540 }
541
542 +static inline void
543 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
544 +{
545 + pgd_populate(mm, pgd, pmd);
546 +}
547 +
548 extern pgd_t *pgd_alloc(struct mm_struct *mm);
549
550 static inline void
551 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
552 index 81a4342..348b927 100644
553 --- a/arch/alpha/include/asm/pgtable.h
554 +++ b/arch/alpha/include/asm/pgtable.h
555 @@ -102,6 +102,17 @@ struct vm_area_struct;
556 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
557 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
558 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
559 +
560 +#ifdef CONFIG_PAX_PAGEEXEC
561 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
562 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
563 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
564 +#else
565 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
566 +# define PAGE_COPY_NOEXEC PAGE_COPY
567 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
568 +#endif
569 +
570 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
571
572 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
573 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
574 index 2fd00b7..cfd5069 100644
575 --- a/arch/alpha/kernel/module.c
576 +++ b/arch/alpha/kernel/module.c
577 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
578
579 /* The small sections were sorted to the end of the segment.
580 The following should definitely cover them. */
581 - gp = (u64)me->module_core + me->core_size - 0x8000;
582 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
583 got = sechdrs[me->arch.gotsecindex].sh_addr;
584
585 for (i = 0; i < n; i++) {
586 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
587 index 14db93e..47bed62 100644
588 --- a/arch/alpha/kernel/osf_sys.c
589 +++ b/arch/alpha/kernel/osf_sys.c
590 @@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
591 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
592
593 static unsigned long
594 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
595 - unsigned long limit)
596 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
597 + unsigned long limit, unsigned long flags)
598 {
599 struct vm_area_struct *vma = find_vma(current->mm, addr);
600 -
601 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
602 while (1) {
603 /* At this point: (!vma || addr < vma->vm_end). */
604 if (limit - len < addr)
605 return -ENOMEM;
606 - if (!vma || addr + len <= vma->vm_start)
607 + if (check_heap_stack_gap(vma, addr, len, offset))
608 return addr;
609 addr = vma->vm_end;
610 vma = vma->vm_next;
611 @@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
612 merely specific addresses, but regions of memory -- perhaps
613 this feature should be incorporated into all ports? */
614
615 +#ifdef CONFIG_PAX_RANDMMAP
616 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
617 +#endif
618 +
619 if (addr) {
620 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
621 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
622 if (addr != (unsigned long) -ENOMEM)
623 return addr;
624 }
625
626 /* Next, try allocating at TASK_UNMAPPED_BASE. */
627 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
628 - len, limit);
629 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
630 +
631 if (addr != (unsigned long) -ENOMEM)
632 return addr;
633
634 /* Finally, try allocating in low memory. */
635 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
636 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
637
638 return addr;
639 }
640 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
641 index 0c4132d..88f0d53 100644
642 --- a/arch/alpha/mm/fault.c
643 +++ b/arch/alpha/mm/fault.c
644 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
645 __reload_thread(pcb);
646 }
647
648 +#ifdef CONFIG_PAX_PAGEEXEC
649 +/*
650 + * PaX: decide what to do with offenders (regs->pc = fault address)
651 + *
652 + * returns 1 when task should be killed
653 + * 2 when patched PLT trampoline was detected
654 + * 3 when unpatched PLT trampoline was detected
655 + */
656 +static int pax_handle_fetch_fault(struct pt_regs *regs)
657 +{
658 +
659 +#ifdef CONFIG_PAX_EMUPLT
660 + int err;
661 +
662 + do { /* PaX: patched PLT emulation #1 */
663 + unsigned int ldah, ldq, jmp;
664 +
665 + err = get_user(ldah, (unsigned int *)regs->pc);
666 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
667 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
668 +
669 + if (err)
670 + break;
671 +
672 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
673 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
674 + jmp == 0x6BFB0000U)
675 + {
676 + unsigned long r27, addr;
677 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
678 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
679 +
680 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
681 + err = get_user(r27, (unsigned long *)addr);
682 + if (err)
683 + break;
684 +
685 + regs->r27 = r27;
686 + regs->pc = r27;
687 + return 2;
688 + }
689 + } while (0);
690 +
691 + do { /* PaX: patched PLT emulation #2 */
692 + unsigned int ldah, lda, br;
693 +
694 + err = get_user(ldah, (unsigned int *)regs->pc);
695 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
696 + err |= get_user(br, (unsigned int *)(regs->pc+8));
697 +
698 + if (err)
699 + break;
700 +
701 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
702 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
703 + (br & 0xFFE00000U) == 0xC3E00000U)
704 + {
705 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
706 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
707 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
708 +
709 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
710 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
711 + return 2;
712 + }
713 + } while (0);
714 +
715 + do { /* PaX: unpatched PLT emulation */
716 + unsigned int br;
717 +
718 + err = get_user(br, (unsigned int *)regs->pc);
719 +
720 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
721 + unsigned int br2, ldq, nop, jmp;
722 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
723 +
724 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
725 + err = get_user(br2, (unsigned int *)addr);
726 + err |= get_user(ldq, (unsigned int *)(addr+4));
727 + err |= get_user(nop, (unsigned int *)(addr+8));
728 + err |= get_user(jmp, (unsigned int *)(addr+12));
729 + err |= get_user(resolver, (unsigned long *)(addr+16));
730 +
731 + if (err)
732 + break;
733 +
734 + if (br2 == 0xC3600000U &&
735 + ldq == 0xA77B000CU &&
736 + nop == 0x47FF041FU &&
737 + jmp == 0x6B7B0000U)
738 + {
739 + regs->r28 = regs->pc+4;
740 + regs->r27 = addr+16;
741 + regs->pc = resolver;
742 + return 3;
743 + }
744 + }
745 + } while (0);
746 +#endif
747 +
748 + return 1;
749 +}
750 +
751 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
752 +{
753 + unsigned long i;
754 +
755 + printk(KERN_ERR "PAX: bytes at PC: ");
756 + for (i = 0; i < 5; i++) {
757 + unsigned int c;
758 + if (get_user(c, (unsigned int *)pc+i))
759 + printk(KERN_CONT "???????? ");
760 + else
761 + printk(KERN_CONT "%08x ", c);
762 + }
763 + printk("\n");
764 +}
765 +#endif
766
767 /*
768 * This routine handles page faults. It determines the address,
769 @@ -133,8 +251,29 @@ retry:
770 good_area:
771 si_code = SEGV_ACCERR;
772 if (cause < 0) {
773 - if (!(vma->vm_flags & VM_EXEC))
774 + if (!(vma->vm_flags & VM_EXEC)) {
775 +
776 +#ifdef CONFIG_PAX_PAGEEXEC
777 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
778 + goto bad_area;
779 +
780 + up_read(&mm->mmap_sem);
781 + switch (pax_handle_fetch_fault(regs)) {
782 +
783 +#ifdef CONFIG_PAX_EMUPLT
784 + case 2:
785 + case 3:
786 + return;
787 +#endif
788 +
789 + }
790 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
791 + do_group_exit(SIGKILL);
792 +#else
793 goto bad_area;
794 +#endif
795 +
796 + }
797 } else if (!cause) {
798 /* Allow reads even for write-only mappings */
799 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
800 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
801 index 67874b8..0e40765 100644
802 --- a/arch/arm/Kconfig
803 +++ b/arch/arm/Kconfig
804 @@ -1813,7 +1813,7 @@ config ALIGNMENT_TRAP
805
806 config UACCESS_WITH_MEMCPY
807 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
808 - depends on MMU
809 + depends on MMU && !PAX_MEMORY_UDEREF
810 default y if CPU_FEROCEON
811 help
812 Implement faster copy_to_user and clear_user methods for CPU
813 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
814 index c79f61f..9ac0642 100644
815 --- a/arch/arm/include/asm/atomic.h
816 +++ b/arch/arm/include/asm/atomic.h
817 @@ -17,17 +17,35 @@
818 #include <asm/barrier.h>
819 #include <asm/cmpxchg.h>
820
821 +#ifdef CONFIG_GENERIC_ATOMIC64
822 +#include <asm-generic/atomic64.h>
823 +#endif
824 +
825 #define ATOMIC_INIT(i) { (i) }
826
827 #ifdef __KERNEL__
828
829 +#define _ASM_EXTABLE(from, to) \
830 +" .pushsection __ex_table,\"a\"\n"\
831 +" .align 3\n" \
832 +" .long " #from ", " #to"\n" \
833 +" .popsection"
834 +
835 /*
836 * On ARM, ordinary assignment (str instruction) doesn't clear the local
837 * strex/ldrex monitor on some implementations. The reason we can use it for
838 * atomic_set() is the clrex or dummy strex done on every exception return.
839 */
840 #define atomic_read(v) (*(volatile int *)&(v)->counter)
841 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
842 +{
843 + return v->counter;
844 +}
845 #define atomic_set(v,i) (((v)->counter) = (i))
846 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
847 +{
848 + v->counter = i;
849 +}
850
851 #if __LINUX_ARM_ARCH__ >= 6
852
853 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
854 int result;
855
856 __asm__ __volatile__("@ atomic_add\n"
857 +"1: ldrex %1, [%3]\n"
858 +" adds %0, %1, %4\n"
859 +
860 +#ifdef CONFIG_PAX_REFCOUNT
861 +" bvc 3f\n"
862 +"2: bkpt 0xf103\n"
863 +"3:\n"
864 +#endif
865 +
866 +" strex %1, %0, [%3]\n"
867 +" teq %1, #0\n"
868 +" bne 1b"
869 +
870 +#ifdef CONFIG_PAX_REFCOUNT
871 +"\n4:\n"
872 + _ASM_EXTABLE(2b, 4b)
873 +#endif
874 +
875 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
876 + : "r" (&v->counter), "Ir" (i)
877 + : "cc");
878 +}
879 +
880 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
881 +{
882 + unsigned long tmp;
883 + int result;
884 +
885 + __asm__ __volatile__("@ atomic_add_unchecked\n"
886 "1: ldrex %0, [%3]\n"
887 " add %0, %0, %4\n"
888 " strex %1, %0, [%3]\n"
889 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
890 smp_mb();
891
892 __asm__ __volatile__("@ atomic_add_return\n"
893 +"1: ldrex %1, [%3]\n"
894 +" adds %0, %1, %4\n"
895 +
896 +#ifdef CONFIG_PAX_REFCOUNT
897 +" bvc 3f\n"
898 +" mov %0, %1\n"
899 +"2: bkpt 0xf103\n"
900 +"3:\n"
901 +#endif
902 +
903 +" strex %1, %0, [%3]\n"
904 +" teq %1, #0\n"
905 +" bne 1b"
906 +
907 +#ifdef CONFIG_PAX_REFCOUNT
908 +"\n4:\n"
909 + _ASM_EXTABLE(2b, 4b)
910 +#endif
911 +
912 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
913 + : "r" (&v->counter), "Ir" (i)
914 + : "cc");
915 +
916 + smp_mb();
917 +
918 + return result;
919 +}
920 +
921 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
922 +{
923 + unsigned long tmp;
924 + int result;
925 +
926 + smp_mb();
927 +
928 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
929 "1: ldrex %0, [%3]\n"
930 " add %0, %0, %4\n"
931 " strex %1, %0, [%3]\n"
932 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
933 int result;
934
935 __asm__ __volatile__("@ atomic_sub\n"
936 +"1: ldrex %1, [%3]\n"
937 +" subs %0, %1, %4\n"
938 +
939 +#ifdef CONFIG_PAX_REFCOUNT
940 +" bvc 3f\n"
941 +"2: bkpt 0xf103\n"
942 +"3:\n"
943 +#endif
944 +
945 +" strex %1, %0, [%3]\n"
946 +" teq %1, #0\n"
947 +" bne 1b"
948 +
949 +#ifdef CONFIG_PAX_REFCOUNT
950 +"\n4:\n"
951 + _ASM_EXTABLE(2b, 4b)
952 +#endif
953 +
954 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
955 + : "r" (&v->counter), "Ir" (i)
956 + : "cc");
957 +}
958 +
959 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
960 +{
961 + unsigned long tmp;
962 + int result;
963 +
964 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
965 "1: ldrex %0, [%3]\n"
966 " sub %0, %0, %4\n"
967 " strex %1, %0, [%3]\n"
968 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
969 smp_mb();
970
971 __asm__ __volatile__("@ atomic_sub_return\n"
972 -"1: ldrex %0, [%3]\n"
973 -" sub %0, %0, %4\n"
974 +"1: ldrex %1, [%3]\n"
975 +" subs %0, %1, %4\n"
976 +
977 +#ifdef CONFIG_PAX_REFCOUNT
978 +" bvc 3f\n"
979 +" mov %0, %1\n"
980 +"2: bkpt 0xf103\n"
981 +"3:\n"
982 +#endif
983 +
984 " strex %1, %0, [%3]\n"
985 " teq %1, #0\n"
986 " bne 1b"
987 +
988 +#ifdef CONFIG_PAX_REFCOUNT
989 +"\n4:\n"
990 + _ASM_EXTABLE(2b, 4b)
991 +#endif
992 +
993 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
994 : "r" (&v->counter), "Ir" (i)
995 : "cc");
996 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
997 return oldval;
998 }
999
1000 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1001 +{
1002 + unsigned long oldval, res;
1003 +
1004 + smp_mb();
1005 +
1006 + do {
1007 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1008 + "ldrex %1, [%3]\n"
1009 + "mov %0, #0\n"
1010 + "teq %1, %4\n"
1011 + "strexeq %0, %5, [%3]\n"
1012 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1013 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1014 + : "cc");
1015 + } while (res);
1016 +
1017 + smp_mb();
1018 +
1019 + return oldval;
1020 +}
1021 +
1022 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1023 {
1024 unsigned long tmp, tmp2;
1025 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1026
1027 return val;
1028 }
1029 +
1030 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1031 +{
1032 + return atomic_add_return(i, v);
1033 +}
1034 +
1035 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1036 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1037 +{
1038 + (void) atomic_add_return(i, v);
1039 +}
1040
1041 static inline int atomic_sub_return(int i, atomic_t *v)
1042 {
1043 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1044 return val;
1045 }
1046 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1047 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1048 +{
1049 + (void) atomic_sub_return(i, v);
1050 +}
1051
1052 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1053 {
1054 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1055 return ret;
1056 }
1057
1058 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1059 +{
1060 + return atomic_cmpxchg(v, old, new);
1061 +}
1062 +
1063 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1064 {
1065 unsigned long flags;
1066 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1067 #endif /* __LINUX_ARM_ARCH__ */
1068
1069 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1070 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1071 +{
1072 + return xchg(&v->counter, new);
1073 +}
1074
1075 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1076 {
1077 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1078 }
1079
1080 #define atomic_inc(v) atomic_add(1, v)
1081 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1082 +{
1083 + atomic_add_unchecked(1, v);
1084 +}
1085 #define atomic_dec(v) atomic_sub(1, v)
1086 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1087 +{
1088 + atomic_sub_unchecked(1, v);
1089 +}
1090
1091 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1092 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1093 +{
1094 + return atomic_add_return_unchecked(1, v) == 0;
1095 +}
1096 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1097 #define atomic_inc_return(v) (atomic_add_return(1, v))
1098 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1099 +{
1100 + return atomic_add_return_unchecked(1, v);
1101 +}
1102 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1103 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1104
1105 @@ -241,6 +428,14 @@ typedef struct {
1106 u64 __aligned(8) counter;
1107 } atomic64_t;
1108
1109 +#ifdef CONFIG_PAX_REFCOUNT
1110 +typedef struct {
1111 + u64 __aligned(8) counter;
1112 +} atomic64_unchecked_t;
1113 +#else
1114 +typedef atomic64_t atomic64_unchecked_t;
1115 +#endif
1116 +
1117 #define ATOMIC64_INIT(i) { (i) }
1118
1119 static inline u64 atomic64_read(const atomic64_t *v)
1120 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1121 return result;
1122 }
1123
1124 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1125 +{
1126 + u64 result;
1127 +
1128 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1129 +" ldrexd %0, %H0, [%1]"
1130 + : "=&r" (result)
1131 + : "r" (&v->counter), "Qo" (v->counter)
1132 + );
1133 +
1134 + return result;
1135 +}
1136 +
1137 static inline void atomic64_set(atomic64_t *v, u64 i)
1138 {
1139 u64 tmp;
1140 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1141 : "cc");
1142 }
1143
1144 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1145 +{
1146 + u64 tmp;
1147 +
1148 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1149 +"1: ldrexd %0, %H0, [%2]\n"
1150 +" strexd %0, %3, %H3, [%2]\n"
1151 +" teq %0, #0\n"
1152 +" bne 1b"
1153 + : "=&r" (tmp), "=Qo" (v->counter)
1154 + : "r" (&v->counter), "r" (i)
1155 + : "cc");
1156 +}
1157 +
1158 static inline void atomic64_add(u64 i, atomic64_t *v)
1159 {
1160 u64 result;
1161 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1162 __asm__ __volatile__("@ atomic64_add\n"
1163 "1: ldrexd %0, %H0, [%3]\n"
1164 " adds %0, %0, %4\n"
1165 +" adcs %H0, %H0, %H4\n"
1166 +
1167 +#ifdef CONFIG_PAX_REFCOUNT
1168 +" bvc 3f\n"
1169 +"2: bkpt 0xf103\n"
1170 +"3:\n"
1171 +#endif
1172 +
1173 +" strexd %1, %0, %H0, [%3]\n"
1174 +" teq %1, #0\n"
1175 +" bne 1b"
1176 +
1177 +#ifdef CONFIG_PAX_REFCOUNT
1178 +"\n4:\n"
1179 + _ASM_EXTABLE(2b, 4b)
1180 +#endif
1181 +
1182 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1183 + : "r" (&v->counter), "r" (i)
1184 + : "cc");
1185 +}
1186 +
1187 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1188 +{
1189 + u64 result;
1190 + unsigned long tmp;
1191 +
1192 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1193 +"1: ldrexd %0, %H0, [%3]\n"
1194 +" adds %0, %0, %4\n"
1195 " adc %H0, %H0, %H4\n"
1196 " strexd %1, %0, %H0, [%3]\n"
1197 " teq %1, #0\n"
1198 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1199
1200 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1201 {
1202 - u64 result;
1203 - unsigned long tmp;
1204 + u64 result, tmp;
1205
1206 smp_mb();
1207
1208 __asm__ __volatile__("@ atomic64_add_return\n"
1209 +"1: ldrexd %1, %H1, [%3]\n"
1210 +" adds %0, %1, %4\n"
1211 +" adcs %H0, %H1, %H4\n"
1212 +
1213 +#ifdef CONFIG_PAX_REFCOUNT
1214 +" bvc 3f\n"
1215 +" mov %0, %1\n"
1216 +" mov %H0, %H1\n"
1217 +"2: bkpt 0xf103\n"
1218 +"3:\n"
1219 +#endif
1220 +
1221 +" strexd %1, %0, %H0, [%3]\n"
1222 +" teq %1, #0\n"
1223 +" bne 1b"
1224 +
1225 +#ifdef CONFIG_PAX_REFCOUNT
1226 +"\n4:\n"
1227 + _ASM_EXTABLE(2b, 4b)
1228 +#endif
1229 +
1230 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1231 + : "r" (&v->counter), "r" (i)
1232 + : "cc");
1233 +
1234 + smp_mb();
1235 +
1236 + return result;
1237 +}
1238 +
1239 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1240 +{
1241 + u64 result;
1242 + unsigned long tmp;
1243 +
1244 + smp_mb();
1245 +
1246 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1247 "1: ldrexd %0, %H0, [%3]\n"
1248 " adds %0, %0, %4\n"
1249 " adc %H0, %H0, %H4\n"
1250 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1251 __asm__ __volatile__("@ atomic64_sub\n"
1252 "1: ldrexd %0, %H0, [%3]\n"
1253 " subs %0, %0, %4\n"
1254 +" sbcs %H0, %H0, %H4\n"
1255 +
1256 +#ifdef CONFIG_PAX_REFCOUNT
1257 +" bvc 3f\n"
1258 +"2: bkpt 0xf103\n"
1259 +"3:\n"
1260 +#endif
1261 +
1262 +" strexd %1, %0, %H0, [%3]\n"
1263 +" teq %1, #0\n"
1264 +" bne 1b"
1265 +
1266 +#ifdef CONFIG_PAX_REFCOUNT
1267 +"\n4:\n"
1268 + _ASM_EXTABLE(2b, 4b)
1269 +#endif
1270 +
1271 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1272 + : "r" (&v->counter), "r" (i)
1273 + : "cc");
1274 +}
1275 +
1276 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1277 +{
1278 + u64 result;
1279 + unsigned long tmp;
1280 +
1281 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1282 +"1: ldrexd %0, %H0, [%3]\n"
1283 +" subs %0, %0, %4\n"
1284 " sbc %H0, %H0, %H4\n"
1285 " strexd %1, %0, %H0, [%3]\n"
1286 " teq %1, #0\n"
1287 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1288
1289 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1290 {
1291 - u64 result;
1292 - unsigned long tmp;
1293 + u64 result, tmp;
1294
1295 smp_mb();
1296
1297 __asm__ __volatile__("@ atomic64_sub_return\n"
1298 -"1: ldrexd %0, %H0, [%3]\n"
1299 -" subs %0, %0, %4\n"
1300 -" sbc %H0, %H0, %H4\n"
1301 +"1: ldrexd %1, %H1, [%3]\n"
1302 +" subs %0, %1, %4\n"
1303 +" sbcs %H0, %H1, %H4\n"
1304 +
1305 +#ifdef CONFIG_PAX_REFCOUNT
1306 +" bvc 3f\n"
1307 +" mov %0, %1\n"
1308 +" mov %H0, %H1\n"
1309 +"2: bkpt 0xf103\n"
1310 +"3:\n"
1311 +#endif
1312 +
1313 " strexd %1, %0, %H0, [%3]\n"
1314 " teq %1, #0\n"
1315 " bne 1b"
1316 +
1317 +#ifdef CONFIG_PAX_REFCOUNT
1318 +"\n4:\n"
1319 + _ASM_EXTABLE(2b, 4b)
1320 +#endif
1321 +
1322 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1323 : "r" (&v->counter), "r" (i)
1324 : "cc");
1325 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1326 return oldval;
1327 }
1328
1329 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1330 +{
1331 + u64 oldval;
1332 + unsigned long res;
1333 +
1334 + smp_mb();
1335 +
1336 + do {
1337 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1338 + "ldrexd %1, %H1, [%3]\n"
1339 + "mov %0, #0\n"
1340 + "teq %1, %4\n"
1341 + "teqeq %H1, %H4\n"
1342 + "strexdeq %0, %5, %H5, [%3]"
1343 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1344 + : "r" (&ptr->counter), "r" (old), "r" (new)
1345 + : "cc");
1346 + } while (res);
1347 +
1348 + smp_mb();
1349 +
1350 + return oldval;
1351 +}
1352 +
1353 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1354 {
1355 u64 result;
1356 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1357
1358 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1359 {
1360 - u64 result;
1361 - unsigned long tmp;
1362 + u64 result, tmp;
1363
1364 smp_mb();
1365
1366 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1367 -"1: ldrexd %0, %H0, [%3]\n"
1368 -" subs %0, %0, #1\n"
1369 -" sbc %H0, %H0, #0\n"
1370 +"1: ldrexd %1, %H1, [%3]\n"
1371 +" subs %0, %1, #1\n"
1372 +" sbcs %H0, %H1, #0\n"
1373 +
1374 +#ifdef CONFIG_PAX_REFCOUNT
1375 +" bvc 3f\n"
1376 +" mov %0, %1\n"
1377 +" mov %H0, %H1\n"
1378 +"2: bkpt 0xf103\n"
1379 +"3:\n"
1380 +#endif
1381 +
1382 " teq %H0, #0\n"
1383 -" bmi 2f\n"
1384 +" bmi 4f\n"
1385 " strexd %1, %0, %H0, [%3]\n"
1386 " teq %1, #0\n"
1387 " bne 1b\n"
1388 -"2:"
1389 +"4:\n"
1390 +
1391 +#ifdef CONFIG_PAX_REFCOUNT
1392 + _ASM_EXTABLE(2b, 4b)
1393 +#endif
1394 +
1395 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1396 : "r" (&v->counter)
1397 : "cc");
1398 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1399 " teq %0, %5\n"
1400 " teqeq %H0, %H5\n"
1401 " moveq %1, #0\n"
1402 -" beq 2f\n"
1403 +" beq 4f\n"
1404 " adds %0, %0, %6\n"
1405 -" adc %H0, %H0, %H6\n"
1406 +" adcs %H0, %H0, %H6\n"
1407 +
1408 +#ifdef CONFIG_PAX_REFCOUNT
1409 +" bvc 3f\n"
1410 +"2: bkpt 0xf103\n"
1411 +"3:\n"
1412 +#endif
1413 +
1414 " strexd %2, %0, %H0, [%4]\n"
1415 " teq %2, #0\n"
1416 " bne 1b\n"
1417 -"2:"
1418 +"4:\n"
1419 +
1420 +#ifdef CONFIG_PAX_REFCOUNT
1421 + _ASM_EXTABLE(2b, 4b)
1422 +#endif
1423 +
1424 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1425 : "r" (&v->counter), "r" (u), "r" (a)
1426 : "cc");
1427 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1428
1429 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1430 #define atomic64_inc(v) atomic64_add(1LL, (v))
1431 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1432 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1433 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1434 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1435 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1436 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1437 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1438 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1439 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1440 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1441 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1442 index 75fe66b..ba3dee4 100644
1443 --- a/arch/arm/include/asm/cache.h
1444 +++ b/arch/arm/include/asm/cache.h
1445 @@ -4,8 +4,10 @@
1446 #ifndef __ASMARM_CACHE_H
1447 #define __ASMARM_CACHE_H
1448
1449 +#include <linux/const.h>
1450 +
1451 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1452 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1453 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1454
1455 /*
1456 * Memory returned by kmalloc() may be used for DMA, so we must make
1457 @@ -24,5 +26,6 @@
1458 #endif
1459
1460 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1461 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1462
1463 #endif
1464 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1465 index e1489c5..d418304 100644
1466 --- a/arch/arm/include/asm/cacheflush.h
1467 +++ b/arch/arm/include/asm/cacheflush.h
1468 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1469 void (*dma_unmap_area)(const void *, size_t, int);
1470
1471 void (*dma_flush_range)(const void *, const void *);
1472 -};
1473 +} __no_const;
1474
1475 /*
1476 * Select the calling method
1477 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1478 index 6dcc164..b14d917 100644
1479 --- a/arch/arm/include/asm/checksum.h
1480 +++ b/arch/arm/include/asm/checksum.h
1481 @@ -37,7 +37,19 @@ __wsum
1482 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1483
1484 __wsum
1485 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1486 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1487 +
1488 +static inline __wsum
1489 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1490 +{
1491 + __wsum ret;
1492 + pax_open_userland();
1493 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1494 + pax_close_userland();
1495 + return ret;
1496 +}
1497 +
1498 +
1499
1500 /*
1501 * Fold a partial checksum without adding pseudo headers
1502 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1503 index 7eb18c1..e38b6d2 100644
1504 --- a/arch/arm/include/asm/cmpxchg.h
1505 +++ b/arch/arm/include/asm/cmpxchg.h
1506 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1507
1508 #define xchg(ptr,x) \
1509 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1510 +#define xchg_unchecked(ptr,x) \
1511 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1512
1513 #include <asm-generic/cmpxchg-local.h>
1514
1515 diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1516 index ab98fdd..6b19938 100644
1517 --- a/arch/arm/include/asm/delay.h
1518 +++ b/arch/arm/include/asm/delay.h
1519 @@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1520 void (*delay)(unsigned long);
1521 void (*const_udelay)(unsigned long);
1522 void (*udelay)(unsigned long);
1523 -} arm_delay_ops;
1524 +} *arm_delay_ops;
1525
1526 -#define __delay(n) arm_delay_ops.delay(n)
1527 +#define __delay(n) arm_delay_ops->delay(n)
1528
1529 /*
1530 * This function intentionally does not exist; if you see references to
1531 @@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1532 * first constant multiplications gets optimized away if the delay is
1533 * a constant)
1534 */
1535 -#define __udelay(n) arm_delay_ops.udelay(n)
1536 -#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1537 +#define __udelay(n) arm_delay_ops->udelay(n)
1538 +#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1539
1540 #define udelay(n) \
1541 (__builtin_constant_p(n) ? \
1542 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1543 index 6ddbe44..758b5f2 100644
1544 --- a/arch/arm/include/asm/domain.h
1545 +++ b/arch/arm/include/asm/domain.h
1546 @@ -48,18 +48,37 @@
1547 * Domain types
1548 */
1549 #define DOMAIN_NOACCESS 0
1550 -#define DOMAIN_CLIENT 1
1551 #ifdef CONFIG_CPU_USE_DOMAINS
1552 +#define DOMAIN_USERCLIENT 1
1553 +#define DOMAIN_KERNELCLIENT 1
1554 #define DOMAIN_MANAGER 3
1555 +#define DOMAIN_VECTORS DOMAIN_USER
1556 #else
1557 +
1558 +#ifdef CONFIG_PAX_KERNEXEC
1559 #define DOMAIN_MANAGER 1
1560 +#define DOMAIN_KERNEXEC 3
1561 +#else
1562 +#define DOMAIN_MANAGER 1
1563 +#endif
1564 +
1565 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1566 +#define DOMAIN_USERCLIENT 0
1567 +#define DOMAIN_UDEREF 1
1568 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1569 +#else
1570 +#define DOMAIN_USERCLIENT 1
1571 +#define DOMAIN_VECTORS DOMAIN_USER
1572 +#endif
1573 +#define DOMAIN_KERNELCLIENT 1
1574 +
1575 #endif
1576
1577 #define domain_val(dom,type) ((type) << (2*(dom)))
1578
1579 #ifndef __ASSEMBLY__
1580
1581 -#ifdef CONFIG_CPU_USE_DOMAINS
1582 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1583 static inline void set_domain(unsigned val)
1584 {
1585 asm volatile(
1586 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1587 isb();
1588 }
1589
1590 -#define modify_domain(dom,type) \
1591 - do { \
1592 - struct thread_info *thread = current_thread_info(); \
1593 - unsigned int domain = thread->cpu_domain; \
1594 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1595 - thread->cpu_domain = domain | domain_val(dom, type); \
1596 - set_domain(thread->cpu_domain); \
1597 - } while (0)
1598 -
1599 +extern void modify_domain(unsigned int dom, unsigned int type);
1600 #else
1601 static inline void set_domain(unsigned val) { }
1602 static inline void modify_domain(unsigned dom, unsigned type) { }
1603 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1604 index 38050b1..9d90e8b 100644
1605 --- a/arch/arm/include/asm/elf.h
1606 +++ b/arch/arm/include/asm/elf.h
1607 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1608 the loader. We need to make sure that it is out of the way of the program
1609 that it will "exec", and that there is sufficient room for the brk. */
1610
1611 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1612 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1613 +
1614 +#ifdef CONFIG_PAX_ASLR
1615 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1616 +
1617 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1618 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1619 +#endif
1620
1621 /* When the program starts, a1 contains a pointer to a function to be
1622 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1623 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1624 extern void elf_set_personality(const struct elf32_hdr *);
1625 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1626
1627 -struct mm_struct;
1628 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1629 -#define arch_randomize_brk arch_randomize_brk
1630 -
1631 #endif
1632 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1633 index de53547..52b9a28 100644
1634 --- a/arch/arm/include/asm/fncpy.h
1635 +++ b/arch/arm/include/asm/fncpy.h
1636 @@ -81,7 +81,9 @@
1637 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1638 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1639 \
1640 + pax_open_kernel(); \
1641 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1642 + pax_close_kernel(); \
1643 flush_icache_range((unsigned long)(dest_buf), \
1644 (unsigned long)(dest_buf) + (size)); \
1645 \
1646 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1647 index e42cf59..7b94b8f 100644
1648 --- a/arch/arm/include/asm/futex.h
1649 +++ b/arch/arm/include/asm/futex.h
1650 @@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1651 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1652 return -EFAULT;
1653
1654 + pax_open_userland();
1655 +
1656 smp_mb();
1657 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1658 "1: ldrex %1, [%4]\n"
1659 @@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1660 : "cc", "memory");
1661 smp_mb();
1662
1663 + pax_close_userland();
1664 +
1665 *uval = val;
1666 return ret;
1667 }
1668 @@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1669 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1670 return -EFAULT;
1671
1672 + pax_open_userland();
1673 +
1674 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1675 "1: " TUSER(ldr) " %1, [%4]\n"
1676 " teq %1, %2\n"
1677 @@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1678 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1679 : "cc", "memory");
1680
1681 + pax_close_userland();
1682 +
1683 *uval = val;
1684 return ret;
1685 }
1686 @@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1687 return -EFAULT;
1688
1689 pagefault_disable(); /* implies preempt_disable() */
1690 + pax_open_userland();
1691
1692 switch (op) {
1693 case FUTEX_OP_SET:
1694 @@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1695 ret = -ENOSYS;
1696 }
1697
1698 + pax_close_userland();
1699 pagefault_enable(); /* subsumes preempt_enable() */
1700
1701 if (!ret) {
1702 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1703 index 83eb2f7..ed77159 100644
1704 --- a/arch/arm/include/asm/kmap_types.h
1705 +++ b/arch/arm/include/asm/kmap_types.h
1706 @@ -4,6 +4,6 @@
1707 /*
1708 * This is the "bare minimum". AIO seems to require this.
1709 */
1710 -#define KM_TYPE_NR 16
1711 +#define KM_TYPE_NR 17
1712
1713 #endif
1714 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1715 index 9e614a1..3302cca 100644
1716 --- a/arch/arm/include/asm/mach/dma.h
1717 +++ b/arch/arm/include/asm/mach/dma.h
1718 @@ -22,7 +22,7 @@ struct dma_ops {
1719 int (*residue)(unsigned int, dma_t *); /* optional */
1720 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1721 const char *type;
1722 -};
1723 +} __do_const;
1724
1725 struct dma_struct {
1726 void *addr; /* single DMA address */
1727 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1728 index 2fe141f..192dc01 100644
1729 --- a/arch/arm/include/asm/mach/map.h
1730 +++ b/arch/arm/include/asm/mach/map.h
1731 @@ -27,13 +27,16 @@ struct map_desc {
1732 #define MT_MINICLEAN 6
1733 #define MT_LOW_VECTORS 7
1734 #define MT_HIGH_VECTORS 8
1735 -#define MT_MEMORY 9
1736 +#define MT_MEMORY_RWX 9
1737 #define MT_ROM 10
1738 -#define MT_MEMORY_NONCACHED 11
1739 +#define MT_MEMORY_NONCACHED_RX 11
1740 #define MT_MEMORY_DTCM 12
1741 #define MT_MEMORY_ITCM 13
1742 #define MT_MEMORY_SO 14
1743 #define MT_MEMORY_DMA_READY 15
1744 +#define MT_MEMORY_RW 16
1745 +#define MT_MEMORY_RX 17
1746 +#define MT_MEMORY_NONCACHED_RW 18
1747
1748 #ifdef CONFIG_MMU
1749 extern void iotable_init(struct map_desc *, int);
1750 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1751 index 53426c6..c7baff3 100644
1752 --- a/arch/arm/include/asm/outercache.h
1753 +++ b/arch/arm/include/asm/outercache.h
1754 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1755 #endif
1756 void (*set_debug)(unsigned long);
1757 void (*resume)(void);
1758 -};
1759 +} __no_const;
1760
1761 #ifdef CONFIG_OUTER_CACHE
1762
1763 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1764 index 812a494..71fc0b6 100644
1765 --- a/arch/arm/include/asm/page.h
1766 +++ b/arch/arm/include/asm/page.h
1767 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1768 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1769 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1770 unsigned long vaddr, struct vm_area_struct *vma);
1771 -};
1772 +} __no_const;
1773
1774 #ifdef MULTI_USER
1775 extern struct cpu_user_fns cpu_user;
1776 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1777 index 943504f..c37a730 100644
1778 --- a/arch/arm/include/asm/pgalloc.h
1779 +++ b/arch/arm/include/asm/pgalloc.h
1780 @@ -17,6 +17,7 @@
1781 #include <asm/processor.h>
1782 #include <asm/cacheflush.h>
1783 #include <asm/tlbflush.h>
1784 +#include <asm/system_info.h>
1785
1786 #define check_pgt_cache() do { } while (0)
1787
1788 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1789 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1790 }
1791
1792 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1793 +{
1794 + pud_populate(mm, pud, pmd);
1795 +}
1796 +
1797 #else /* !CONFIG_ARM_LPAE */
1798
1799 /*
1800 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1801 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1802 #define pmd_free(mm, pmd) do { } while (0)
1803 #define pud_populate(mm,pmd,pte) BUG()
1804 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1805
1806 #endif /* CONFIG_ARM_LPAE */
1807
1808 @@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1809 __free_page(pte);
1810 }
1811
1812 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1813 +{
1814 +#ifdef CONFIG_ARM_LPAE
1815 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1816 +#else
1817 + if (addr & SECTION_SIZE)
1818 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1819 + else
1820 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1821 +#endif
1822 + flush_pmd_entry(pmdp);
1823 +}
1824 +
1825 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1826 pmdval_t prot)
1827 {
1828 @@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1829 static inline void
1830 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1831 {
1832 - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1833 + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1834 }
1835 #define pmd_pgtable(pmd) pmd_page(pmd)
1836
1837 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1838 index 5cfba15..f415e1a 100644
1839 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1840 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1841 @@ -20,12 +20,15 @@
1842 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1843 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1844 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1845 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1846 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1847 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1848 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1849 +
1850 /*
1851 * - section
1852 */
1853 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1854 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1855 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1856 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1857 @@ -37,6 +40,7 @@
1858 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1859 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1860 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1861 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1862
1863 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1864 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1865 @@ -66,6 +70,7 @@
1866 * - extended small page/tiny page
1867 */
1868 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1869 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1870 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1871 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1872 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1873 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1874 index f97ee02..07f1be5 100644
1875 --- a/arch/arm/include/asm/pgtable-2level.h
1876 +++ b/arch/arm/include/asm/pgtable-2level.h
1877 @@ -125,6 +125,7 @@
1878 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1879 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1880 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1881 +#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1882
1883 /*
1884 * These are the memory types, defined to be compatible with
1885 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1886 index d795282..a43ea90 100644
1887 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1888 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1889 @@ -32,15 +32,18 @@
1890 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1891 #define PMD_BIT4 (_AT(pmdval_t, 0))
1892 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1893 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1894
1895 /*
1896 * - section
1897 */
1898 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1899 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1900 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1901 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1902 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1903 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1904 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1905 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1906 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1907 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1908 @@ -66,6 +69,7 @@
1909 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1910 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1911 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1912 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1913 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1914
1915 /*
1916 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1917 index a3f3792..7b932a6 100644
1918 --- a/arch/arm/include/asm/pgtable-3level.h
1919 +++ b/arch/arm/include/asm/pgtable-3level.h
1920 @@ -74,6 +74,7 @@
1921 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1922 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1923 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1924 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1925 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1926 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1927 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1928 @@ -82,6 +83,7 @@
1929 /*
1930 * To be used in assembly code with the upper page attributes.
1931 */
1932 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
1933 #define L_PTE_XN_HIGH (1 << (54 - 32))
1934 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1935
1936 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1937 index 9c82f988..514705a 100644
1938 --- a/arch/arm/include/asm/pgtable.h
1939 +++ b/arch/arm/include/asm/pgtable.h
1940 @@ -30,6 +30,9 @@
1941 #include <asm/pgtable-2level.h>
1942 #endif
1943
1944 +#define ktla_ktva(addr) (addr)
1945 +#define ktva_ktla(addr) (addr)
1946 +
1947 /*
1948 * Just any arbitrary offset to the start of the vmalloc VM area: the
1949 * current 8MB value just means that there will be a 8MB "hole" after the
1950 @@ -45,6 +48,9 @@
1951 #define LIBRARY_TEXT_START 0x0c000000
1952
1953 #ifndef __ASSEMBLY__
1954 +extern pteval_t __supported_pte_mask;
1955 +extern pmdval_t __supported_pmd_mask;
1956 +
1957 extern void __pte_error(const char *file, int line, pte_t);
1958 extern void __pmd_error(const char *file, int line, pmd_t);
1959 extern void __pgd_error(const char *file, int line, pgd_t);
1960 @@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1961 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1962 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1963
1964 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
1965 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1966 +
1967 +#ifdef CONFIG_PAX_KERNEXEC
1968 +#include <asm/domain.h>
1969 +#include <linux/thread_info.h>
1970 +#include <linux/preempt.h>
1971 +#endif
1972 +
1973 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1974 +static inline int test_domain(int domain, int domaintype)
1975 +{
1976 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
1977 +}
1978 +#endif
1979 +
1980 +#ifdef CONFIG_PAX_KERNEXEC
1981 +static inline unsigned long pax_open_kernel(void) {
1982 +#ifdef CONFIG_ARM_LPAE
1983 + /* TODO */
1984 +#else
1985 + preempt_disable();
1986 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
1987 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
1988 +#endif
1989 + return 0;
1990 +}
1991 +
1992 +static inline unsigned long pax_close_kernel(void) {
1993 +#ifdef CONFIG_ARM_LPAE
1994 + /* TODO */
1995 +#else
1996 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
1997 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
1998 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
1999 + preempt_enable_no_resched();
2000 +#endif
2001 + return 0;
2002 +}
2003 +#else
2004 +static inline unsigned long pax_open_kernel(void) { return 0; }
2005 +static inline unsigned long pax_close_kernel(void) { return 0; }
2006 +#endif
2007 +
2008 /*
2009 * This is the lowest virtual address we can permit any user space
2010 * mapping to be mapped at. This is particularly important for
2011 @@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2012 /*
2013 * The pgprot_* and protection_map entries will be fixed up in runtime
2014 * to include the cachable and bufferable bits based on memory policy,
2015 - * as well as any architecture dependent bits like global/ASID and SMP
2016 - * shared mapping bits.
2017 + * as well as any architecture dependent bits like global/ASID, PXN,
2018 + * and SMP shared mapping bits.
2019 */
2020 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2021
2022 @@ -240,7 +290,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2023
2024 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2025 {
2026 - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
2027 + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE | __supported_pte_mask;
2028 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2029 return pte;
2030 }
2031 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2032 index f3628fb..a0672dd 100644
2033 --- a/arch/arm/include/asm/proc-fns.h
2034 +++ b/arch/arm/include/asm/proc-fns.h
2035 @@ -75,7 +75,7 @@ extern struct processor {
2036 unsigned int suspend_size;
2037 void (*do_suspend)(void *);
2038 void (*do_resume)(void *);
2039 -} processor;
2040 +} __do_const processor;
2041
2042 #ifndef MULTI_CPU
2043 extern void cpu_proc_init(void);
2044 diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2045 index 06e7d50..8a8e251 100644
2046 --- a/arch/arm/include/asm/processor.h
2047 +++ b/arch/arm/include/asm/processor.h
2048 @@ -65,9 +65,8 @@ struct thread_struct {
2049 regs->ARM_cpsr |= PSR_ENDSTATE; \
2050 regs->ARM_pc = pc & ~1; /* pc */ \
2051 regs->ARM_sp = sp; /* sp */ \
2052 - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2053 - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2054 - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2055 + /* r2 (envp), r1 (argv), r0 (argc) */ \
2056 + (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2057 nommu_start_thread(regs); \
2058 })
2059
2060 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2061 index d3a22be..3a69ad5 100644
2062 --- a/arch/arm/include/asm/smp.h
2063 +++ b/arch/arm/include/asm/smp.h
2064 @@ -107,7 +107,7 @@ struct smp_operations {
2065 int (*cpu_disable)(unsigned int cpu);
2066 #endif
2067 #endif
2068 -};
2069 +} __no_const;
2070
2071 /*
2072 * set platform specific SMP operations
2073 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2074 index cddda1f..ff357f7 100644
2075 --- a/arch/arm/include/asm/thread_info.h
2076 +++ b/arch/arm/include/asm/thread_info.h
2077 @@ -77,9 +77,9 @@ struct thread_info {
2078 .flags = 0, \
2079 .preempt_count = INIT_PREEMPT_COUNT, \
2080 .addr_limit = KERNEL_DS, \
2081 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2082 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2083 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2084 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2085 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2086 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2087 .restart_block = { \
2088 .fn = do_no_restart_syscall, \
2089 }, \
2090 @@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2091 #define TIF_SYSCALL_AUDIT 9
2092 #define TIF_SYSCALL_TRACEPOINT 10
2093 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2094 +
2095 +/* within 8 bits of TIF_SYSCALL_TRACE
2096 + * to meet flexible second operand requirements
2097 + */
2098 +#define TIF_GRSEC_SETXID 12
2099 +
2100 #define TIF_USING_IWMMXT 17
2101 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2102 #define TIF_RESTORE_SIGMASK 20
2103 @@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2104 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2105 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2106 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2107 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2108
2109 /* Checks for any syscall work in entry-common.S */
2110 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2111 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2112 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2113
2114 /*
2115 * Change these and you break ASM code in entry-common.S
2116 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2117 index 7e1f760..752fcb7 100644
2118 --- a/arch/arm/include/asm/uaccess.h
2119 +++ b/arch/arm/include/asm/uaccess.h
2120 @@ -18,6 +18,7 @@
2121 #include <asm/domain.h>
2122 #include <asm/unified.h>
2123 #include <asm/compiler.h>
2124 +#include <asm/pgtable.h>
2125
2126 #define VERIFY_READ 0
2127 #define VERIFY_WRITE 1
2128 @@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2129 #define USER_DS TASK_SIZE
2130 #define get_fs() (current_thread_info()->addr_limit)
2131
2132 +static inline void pax_open_userland(void)
2133 +{
2134 +
2135 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2136 + if (get_fs() == USER_DS) {
2137 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2138 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2139 + }
2140 +#endif
2141 +
2142 +}
2143 +
2144 +static inline void pax_close_userland(void)
2145 +{
2146 +
2147 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2148 + if (get_fs() == USER_DS) {
2149 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2150 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2151 + }
2152 +#endif
2153 +
2154 +}
2155 +
2156 static inline void set_fs(mm_segment_t fs)
2157 {
2158 current_thread_info()->addr_limit = fs;
2159 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2160 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2161 }
2162
2163 #define segment_eq(a,b) ((a) == (b))
2164 @@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2165
2166 #define get_user(x,p) \
2167 ({ \
2168 + int __e; \
2169 might_fault(); \
2170 - __get_user_check(x,p); \
2171 + pax_open_userland(); \
2172 + __e = __get_user_check(x,p); \
2173 + pax_close_userland(); \
2174 + __e; \
2175 })
2176
2177 extern int __put_user_1(void *, unsigned int);
2178 @@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2179
2180 #define put_user(x,p) \
2181 ({ \
2182 + int __e; \
2183 might_fault(); \
2184 - __put_user_check(x,p); \
2185 + pax_open_userland(); \
2186 + __e = __put_user_check(x,p); \
2187 + pax_close_userland(); \
2188 + __e; \
2189 })
2190
2191 #else /* CONFIG_MMU */
2192 @@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2193 #define __get_user(x,ptr) \
2194 ({ \
2195 long __gu_err = 0; \
2196 + pax_open_userland(); \
2197 __get_user_err((x),(ptr),__gu_err); \
2198 + pax_close_userland(); \
2199 __gu_err; \
2200 })
2201
2202 #define __get_user_error(x,ptr,err) \
2203 ({ \
2204 + pax_open_userland(); \
2205 __get_user_err((x),(ptr),err); \
2206 + pax_close_userland(); \
2207 (void) 0; \
2208 })
2209
2210 @@ -312,13 +349,17 @@ do { \
2211 #define __put_user(x,ptr) \
2212 ({ \
2213 long __pu_err = 0; \
2214 + pax_open_userland(); \
2215 __put_user_err((x),(ptr),__pu_err); \
2216 + pax_close_userland(); \
2217 __pu_err; \
2218 })
2219
2220 #define __put_user_error(x,ptr,err) \
2221 ({ \
2222 + pax_open_userland(); \
2223 __put_user_err((x),(ptr),err); \
2224 + pax_close_userland(); \
2225 (void) 0; \
2226 })
2227
2228 @@ -418,11 +459,44 @@ do { \
2229
2230
2231 #ifdef CONFIG_MMU
2232 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2233 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2234 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2235 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2236 +
2237 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2238 +{
2239 + unsigned long ret;
2240 +
2241 + check_object_size(to, n, false);
2242 + pax_open_userland();
2243 + ret = ___copy_from_user(to, from, n);
2244 + pax_close_userland();
2245 + return ret;
2246 +}
2247 +
2248 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2249 +{
2250 + unsigned long ret;
2251 +
2252 + check_object_size(from, n, true);
2253 + pax_open_userland();
2254 + ret = ___copy_to_user(to, from, n);
2255 + pax_close_userland();
2256 + return ret;
2257 +}
2258 +
2259 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2260 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2261 +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2262 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2263 +
2264 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2265 +{
2266 + unsigned long ret;
2267 + pax_open_userland();
2268 + ret = ___clear_user(addr, n);
2269 + pax_close_userland();
2270 + return ret;
2271 +}
2272 +
2273 #else
2274 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2275 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2276 @@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2277
2278 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2279 {
2280 + if ((long)n < 0)
2281 + return n;
2282 +
2283 if (access_ok(VERIFY_READ, from, n))
2284 n = __copy_from_user(to, from, n);
2285 else /* security hole - plug it */
2286 @@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2287
2288 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2289 {
2290 + if ((long)n < 0)
2291 + return n;
2292 +
2293 if (access_ok(VERIFY_WRITE, to, n))
2294 n = __copy_to_user(to, from, n);
2295 return n;
2296 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2297 index 96ee092..37f1844 100644
2298 --- a/arch/arm/include/uapi/asm/ptrace.h
2299 +++ b/arch/arm/include/uapi/asm/ptrace.h
2300 @@ -73,7 +73,7 @@
2301 * ARMv7 groups of PSR bits
2302 */
2303 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2304 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2305 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2306 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2307 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2308
2309 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2310 index 60d3b73..d27ee09 100644
2311 --- a/arch/arm/kernel/armksyms.c
2312 +++ b/arch/arm/kernel/armksyms.c
2313 @@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2314 #ifdef CONFIG_MMU
2315 EXPORT_SYMBOL(copy_page);
2316
2317 -EXPORT_SYMBOL(__copy_from_user);
2318 -EXPORT_SYMBOL(__copy_to_user);
2319 -EXPORT_SYMBOL(__clear_user);
2320 +EXPORT_SYMBOL(___copy_from_user);
2321 +EXPORT_SYMBOL(___copy_to_user);
2322 +EXPORT_SYMBOL(___clear_user);
2323
2324 EXPORT_SYMBOL(__get_user_1);
2325 EXPORT_SYMBOL(__get_user_2);
2326 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2327 index 0f82098..3dbd3ee 100644
2328 --- a/arch/arm/kernel/entry-armv.S
2329 +++ b/arch/arm/kernel/entry-armv.S
2330 @@ -47,6 +47,87 @@
2331 9997:
2332 .endm
2333
2334 + .macro pax_enter_kernel
2335 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2336 + @ make aligned space for saved DACR
2337 + sub sp, sp, #8
2338 + @ save regs
2339 + stmdb sp!, {r1, r2}
2340 + @ read DACR from cpu_domain into r1
2341 + mov r2, sp
2342 + @ assume 8K pages, since we have to split the immediate in two
2343 + bic r2, r2, #(0x1fc0)
2344 + bic r2, r2, #(0x3f)
2345 + ldr r1, [r2, #TI_CPU_DOMAIN]
2346 + @ store old DACR on stack
2347 + str r1, [sp, #8]
2348 +#ifdef CONFIG_PAX_KERNEXEC
2349 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2350 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2351 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2352 +#endif
2353 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2354 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2355 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2356 +#endif
2357 + @ write r1 to current_thread_info()->cpu_domain
2358 + str r1, [r2, #TI_CPU_DOMAIN]
2359 + @ write r1 to DACR
2360 + mcr p15, 0, r1, c3, c0, 0
2361 + @ instruction sync
2362 + instr_sync
2363 + @ restore regs
2364 + ldmia sp!, {r1, r2}
2365 +#endif
2366 + .endm
2367 +
2368 + .macro pax_open_userland
2369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2370 + @ save regs
2371 + stmdb sp!, {r0, r1}
2372 + @ read DACR from cpu_domain into r1
2373 + mov r0, sp
2374 + @ assume 8K pages, since we have to split the immediate in two
2375 + bic r0, r0, #(0x1fc0)
2376 + bic r0, r0, #(0x3f)
2377 + ldr r1, [r0, #TI_CPU_DOMAIN]
2378 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2379 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2380 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2381 + @ write r1 to current_thread_info()->cpu_domain
2382 + str r1, [r0, #TI_CPU_DOMAIN]
2383 + @ write r1 to DACR
2384 + mcr p15, 0, r1, c3, c0, 0
2385 + @ instruction sync
2386 + instr_sync
2387 + @ restore regs
2388 + ldmia sp!, {r0, r1}
2389 +#endif
2390 + .endm
2391 +
2392 + .macro pax_close_userland
2393 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2394 + @ save regs
2395 + stmdb sp!, {r0, r1}
2396 + @ read DACR from cpu_domain into r1
2397 + mov r0, sp
2398 + @ assume 8K pages, since we have to split the immediate in two
2399 + bic r0, r0, #(0x1fc0)
2400 + bic r0, r0, #(0x3f)
2401 + ldr r1, [r0, #TI_CPU_DOMAIN]
2402 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2403 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2404 + @ write r1 to current_thread_info()->cpu_domain
2405 + str r1, [r0, #TI_CPU_DOMAIN]
2406 + @ write r1 to DACR
2407 + mcr p15, 0, r1, c3, c0, 0
2408 + @ instruction sync
2409 + instr_sync
2410 + @ restore regs
2411 + ldmia sp!, {r0, r1}
2412 +#endif
2413 + .endm
2414 +
2415 .macro pabt_helper
2416 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2417 #ifdef MULTI_PABORT
2418 @@ -89,11 +170,15 @@
2419 * Invalid mode handlers
2420 */
2421 .macro inv_entry, reason
2422 +
2423 + pax_enter_kernel
2424 +
2425 sub sp, sp, #S_FRAME_SIZE
2426 ARM( stmib sp, {r1 - lr} )
2427 THUMB( stmia sp, {r0 - r12} )
2428 THUMB( str sp, [sp, #S_SP] )
2429 THUMB( str lr, [sp, #S_LR] )
2430 +
2431 mov r1, #\reason
2432 .endm
2433
2434 @@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2435 .macro svc_entry, stack_hole=0
2436 UNWIND(.fnstart )
2437 UNWIND(.save {r0 - pc} )
2438 +
2439 + pax_enter_kernel
2440 +
2441 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2442 +
2443 #ifdef CONFIG_THUMB2_KERNEL
2444 SPFIX( str r0, [sp] ) @ temporarily saved
2445 SPFIX( mov r0, sp )
2446 @@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2447 ldmia r0, {r3 - r5}
2448 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2449 mov r6, #-1 @ "" "" "" ""
2450 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2451 + @ offset sp by 8 as done in pax_enter_kernel
2452 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2453 +#else
2454 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2455 +#endif
2456 SPFIX( addeq r2, r2, #4 )
2457 str r3, [sp, #-4]! @ save the "real" r0 copied
2458 @ from the exception stack
2459 @@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2460 .macro usr_entry
2461 UNWIND(.fnstart )
2462 UNWIND(.cantunwind ) @ don't unwind the user space
2463 +
2464 + pax_enter_kernel_user
2465 +
2466 sub sp, sp, #S_FRAME_SIZE
2467 ARM( stmib sp, {r1 - r12} )
2468 THUMB( stmia sp, {r0 - r12} )
2469 @@ -456,7 +553,9 @@ __und_usr:
2470 tst r3, #PSR_T_BIT @ Thumb mode?
2471 bne __und_usr_thumb
2472 sub r4, r2, #4 @ ARM instr at LR - 4
2473 + pax_open_userland
2474 1: ldrt r0, [r4]
2475 + pax_close_userland
2476 #ifdef CONFIG_CPU_ENDIAN_BE8
2477 rev r0, r0 @ little endian instruction
2478 #endif
2479 @@ -491,10 +590,14 @@ __und_usr_thumb:
2480 */
2481 .arch armv6t2
2482 #endif
2483 + pax_open_userland
2484 2: ldrht r5, [r4]
2485 + pax_close_userland
2486 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2487 blo __und_usr_fault_16 @ 16bit undefined instruction
2488 + pax_open_userland
2489 3: ldrht r0, [r2]
2490 + pax_close_userland
2491 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2492 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2493 orr r0, r0, r5, lsl #16
2494 @@ -733,7 +836,7 @@ ENTRY(__switch_to)
2495 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2496 THUMB( str sp, [ip], #4 )
2497 THUMB( str lr, [ip], #4 )
2498 -#ifdef CONFIG_CPU_USE_DOMAINS
2499 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2500 ldr r6, [r2, #TI_CPU_DOMAIN]
2501 #endif
2502 set_tls r3, r4, r5
2503 @@ -742,7 +845,7 @@ ENTRY(__switch_to)
2504 ldr r8, =__stack_chk_guard
2505 ldr r7, [r7, #TSK_STACK_CANARY]
2506 #endif
2507 -#ifdef CONFIG_CPU_USE_DOMAINS
2508 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2509 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2510 #endif
2511 mov r5, r0
2512 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2513 index a6c301e..908821b 100644
2514 --- a/arch/arm/kernel/entry-common.S
2515 +++ b/arch/arm/kernel/entry-common.S
2516 @@ -10,18 +10,46 @@
2517
2518 #include <asm/unistd.h>
2519 #include <asm/ftrace.h>
2520 +#include <asm/domain.h>
2521 #include <asm/unwind.h>
2522
2523 +#include "entry-header.S"
2524 +
2525 #ifdef CONFIG_NEED_RET_TO_USER
2526 #include <mach/entry-macro.S>
2527 #else
2528 .macro arch_ret_to_user, tmp1, tmp2
2529 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2530 + @ save regs
2531 + stmdb sp!, {r1, r2}
2532 + @ read DACR from cpu_domain into r1
2533 + mov r2, sp
2534 + @ assume 8K pages, since we have to split the immediate in two
2535 + bic r2, r2, #(0x1fc0)
2536 + bic r2, r2, #(0x3f)
2537 + ldr r1, [r2, #TI_CPU_DOMAIN]
2538 +#ifdef CONFIG_PAX_KERNEXEC
2539 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2540 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2541 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2542 +#endif
2543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2544 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2545 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2546 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2547 +#endif
2548 + @ write r1 to current_thread_info()->cpu_domain
2549 + str r1, [r2, #TI_CPU_DOMAIN]
2550 + @ write r1 to DACR
2551 + mcr p15, 0, r1, c3, c0, 0
2552 + @ instruction sync
2553 + instr_sync
2554 + @ restore regs
2555 + ldmia sp!, {r1, r2}
2556 +#endif
2557 .endm
2558 #endif
2559
2560 -#include "entry-header.S"
2561 -
2562 -
2563 .align 5
2564 /*
2565 * This is the fast syscall return path. We do as little as
2566 @@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2567
2568 .align 5
2569 ENTRY(vector_swi)
2570 +
2571 sub sp, sp, #S_FRAME_SIZE
2572 stmia sp, {r0 - r12} @ Calling r0 - r12
2573 ARM( add r8, sp, #S_PC )
2574 @@ -388,6 +417,12 @@ ENTRY(vector_swi)
2575 ldr scno, [lr, #-4] @ get SWI instruction
2576 #endif
2577
2578 + /*
2579 + * do this here to avoid a performance hit of wrapping the code above
2580 + * that directly dereferences userland to parse the SWI instruction
2581 + */
2582 + pax_enter_kernel_user
2583 +
2584 #ifdef CONFIG_ALIGNMENT_TRAP
2585 ldr ip, __cr_alignment
2586 ldr ip, [ip]
2587 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2588 index 9a8531e..812e287 100644
2589 --- a/arch/arm/kernel/entry-header.S
2590 +++ b/arch/arm/kernel/entry-header.S
2591 @@ -73,9 +73,66 @@
2592 msr cpsr_c, \rtemp @ switch back to the SVC mode
2593 .endm
2594
2595 + .macro pax_enter_kernel_user
2596 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 + @ save regs
2598 + stmdb sp!, {r0, r1}
2599 + @ read DACR from cpu_domain into r1
2600 + mov r0, sp
2601 + @ assume 8K pages, since we have to split the immediate in two
2602 + bic r0, r0, #(0x1fc0)
2603 + bic r0, r0, #(0x3f)
2604 + ldr r1, [r0, #TI_CPU_DOMAIN]
2605 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2606 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2607 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2608 +#endif
2609 +#ifdef CONFIG_PAX_KERNEXEC
2610 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2611 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2612 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2613 +#endif
2614 + @ write r1 to current_thread_info()->cpu_domain
2615 + str r1, [r0, #TI_CPU_DOMAIN]
2616 + @ write r1 to DACR
2617 + mcr p15, 0, r1, c3, c0, 0
2618 + @ instruction sync
2619 + instr_sync
2620 + @ restore regs
2621 + ldmia sp!, {r0, r1}
2622 +#endif
2623 + .endm
2624 +
2625 + .macro pax_exit_kernel
2626 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2627 + @ save regs
2628 + stmdb sp!, {r0, r1}
2629 + @ read old DACR from stack into r1
2630 + ldr r1, [sp, #(8 + S_SP)]
2631 + sub r1, r1, #8
2632 + ldr r1, [r1]
2633 +
2634 + @ write r1 to current_thread_info()->cpu_domain
2635 + mov r0, sp
2636 + @ assume 8K pages, since we have to split the immediate in two
2637 + bic r0, r0, #(0x1fc0)
2638 + bic r0, r0, #(0x3f)
2639 + str r1, [r0, #TI_CPU_DOMAIN]
2640 + @ write r1 to DACR
2641 + mcr p15, 0, r1, c3, c0, 0
2642 + @ instruction sync
2643 + instr_sync
2644 + @ restore regs
2645 + ldmia sp!, {r0, r1}
2646 +#endif
2647 + .endm
2648 +
2649 #ifndef CONFIG_THUMB2_KERNEL
2650 .macro svc_exit, rpsr
2651 msr spsr_cxsf, \rpsr
2652 +
2653 + pax_exit_kernel
2654 +
2655 #if defined(CONFIG_CPU_V6)
2656 ldr r0, [sp]
2657 strex r1, r2, [sp] @ clear the exclusive monitor
2658 @@ -121,6 +178,9 @@
2659 .endm
2660 #else /* CONFIG_THUMB2_KERNEL */
2661 .macro svc_exit, rpsr
2662 +
2663 + pax_exit_kernel
2664 +
2665 ldr lr, [sp, #S_SP] @ top of the stack
2666 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2667 clrex @ clear the exclusive monitor
2668 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2669 index 2adda11..7fbe958 100644
2670 --- a/arch/arm/kernel/fiq.c
2671 +++ b/arch/arm/kernel/fiq.c
2672 @@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2673 #if defined(CONFIG_CPU_USE_DOMAINS)
2674 memcpy((void *)0xffff001c, start, length);
2675 #else
2676 + pax_open_kernel();
2677 memcpy(vectors_page + 0x1c, start, length);
2678 + pax_close_kernel();
2679 #endif
2680 flush_icache_range(0xffff001c, 0xffff001c + length);
2681 if (!vectors_high())
2682 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2683 index 486a15a..2d6880e 100644
2684 --- a/arch/arm/kernel/head.S
2685 +++ b/arch/arm/kernel/head.S
2686 @@ -52,7 +52,9 @@
2687 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2688
2689 .macro pgtbl, rd, phys
2690 - add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2691 + mov \rd, #TEXT_OFFSET
2692 + sub \rd, #PG_DIR_SIZE
2693 + add \rd, \rd, \phys
2694 .endm
2695
2696 /*
2697 @@ -416,7 +418,7 @@ __enable_mmu:
2698 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2699 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2700 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2701 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2702 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2703 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2704 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2705 #endif
2706 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2707 index 5ff2e77..556d030 100644
2708 --- a/arch/arm/kernel/hw_breakpoint.c
2709 +++ b/arch/arm/kernel/hw_breakpoint.c
2710 @@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2711 return NOTIFY_OK;
2712 }
2713
2714 -static struct notifier_block __cpuinitdata dbg_reset_nb = {
2715 +static struct notifier_block dbg_reset_nb = {
2716 .notifier_call = dbg_reset_notify,
2717 };
2718
2719 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2720 index 1e9be5d..03edbc2 100644
2721 --- a/arch/arm/kernel/module.c
2722 +++ b/arch/arm/kernel/module.c
2723 @@ -37,12 +37,37 @@
2724 #endif
2725
2726 #ifdef CONFIG_MMU
2727 -void *module_alloc(unsigned long size)
2728 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2729 {
2730 + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2731 + return NULL;
2732 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2733 - GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2734 + GFP_KERNEL, prot, -1,
2735 __builtin_return_address(0));
2736 }
2737 +
2738 +void *module_alloc(unsigned long size)
2739 +{
2740 +
2741 +#ifdef CONFIG_PAX_KERNEXEC
2742 + return __module_alloc(size, PAGE_KERNEL);
2743 +#else
2744 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2745 +#endif
2746 +
2747 +}
2748 +
2749 +#ifdef CONFIG_PAX_KERNEXEC
2750 +void module_free_exec(struct module *mod, void *module_region)
2751 +{
2752 + module_free(mod, module_region);
2753 +}
2754 +
2755 +void *module_alloc_exec(unsigned long size)
2756 +{
2757 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2758 +}
2759 +#endif
2760 #endif
2761
2762 int
2763 diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2764 index 5f66206..dce492f 100644
2765 --- a/arch/arm/kernel/perf_event_cpu.c
2766 +++ b/arch/arm/kernel/perf_event_cpu.c
2767 @@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2768 return NOTIFY_OK;
2769 }
2770
2771 -static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2772 +static struct notifier_block cpu_pmu_hotplug_notifier = {
2773 .notifier_call = cpu_pmu_notify,
2774 };
2775
2776 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2777 index c6dec5f..f853532 100644
2778 --- a/arch/arm/kernel/process.c
2779 +++ b/arch/arm/kernel/process.c
2780 @@ -28,7 +28,6 @@
2781 #include <linux/tick.h>
2782 #include <linux/utsname.h>
2783 #include <linux/uaccess.h>
2784 -#include <linux/random.h>
2785 #include <linux/hw_breakpoint.h>
2786 #include <linux/cpuidle.h>
2787 #include <linux/leds.h>
2788 @@ -256,9 +255,10 @@ void machine_power_off(void)
2789 machine_shutdown();
2790 if (pm_power_off)
2791 pm_power_off();
2792 + BUG();
2793 }
2794
2795 -void machine_restart(char *cmd)
2796 +__noreturn void machine_restart(char *cmd)
2797 {
2798 machine_shutdown();
2799
2800 @@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2801 init_utsname()->release,
2802 (int)strcspn(init_utsname()->version, " "),
2803 init_utsname()->version);
2804 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2805 - print_symbol("LR is at %s\n", regs->ARM_lr);
2806 + printk("PC is at %pA\n", instruction_pointer(regs));
2807 + printk("LR is at %pA\n", regs->ARM_lr);
2808 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2809 "sp : %08lx ip : %08lx fp : %08lx\n",
2810 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2811 @@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2812 return 0;
2813 }
2814
2815 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2816 -{
2817 - unsigned long range_end = mm->brk + 0x02000000;
2818 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2819 -}
2820 -
2821 #ifdef CONFIG_MMU
2822 /*
2823 * The vectors page is always readable from user space for the
2824 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2825 index 03deeff..741ce88 100644
2826 --- a/arch/arm/kernel/ptrace.c
2827 +++ b/arch/arm/kernel/ptrace.c
2828 @@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2829 return current_thread_info()->syscall;
2830 }
2831
2832 +#ifdef CONFIG_GRKERNSEC_SETXID
2833 +extern void gr_delayed_cred_worker(void);
2834 +#endif
2835 +
2836 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2837 {
2838 current_thread_info()->syscall = scno;
2839
2840 +#ifdef CONFIG_GRKERNSEC_SETXID
2841 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2842 + gr_delayed_cred_worker();
2843 +#endif
2844 +
2845 /* Do the secure computing check first; failures should be fast. */
2846 if (secure_computing(scno) == -1)
2847 return -1;
2848 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2849 index 3f6cbb2..6d856f5 100644
2850 --- a/arch/arm/kernel/setup.c
2851 +++ b/arch/arm/kernel/setup.c
2852 @@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2853 unsigned int elf_hwcap __read_mostly;
2854 EXPORT_SYMBOL(elf_hwcap);
2855
2856 +pteval_t __supported_pte_mask __read_only;
2857 +pmdval_t __supported_pmd_mask __read_only;
2858
2859 #ifdef MULTI_CPU
2860 -struct processor processor __read_mostly;
2861 +struct processor processor;
2862 #endif
2863 #ifdef MULTI_TLB
2864 -struct cpu_tlb_fns cpu_tlb __read_mostly;
2865 +struct cpu_tlb_fns cpu_tlb __read_only;
2866 #endif
2867 #ifdef MULTI_USER
2868 -struct cpu_user_fns cpu_user __read_mostly;
2869 +struct cpu_user_fns cpu_user __read_only;
2870 #endif
2871 #ifdef MULTI_CACHE
2872 -struct cpu_cache_fns cpu_cache __read_mostly;
2873 +struct cpu_cache_fns cpu_cache __read_only;
2874 #endif
2875 #ifdef CONFIG_OUTER_CACHE
2876 -struct outer_cache_fns outer_cache __read_mostly;
2877 +struct outer_cache_fns outer_cache __read_only;
2878 EXPORT_SYMBOL(outer_cache);
2879 #endif
2880
2881 @@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2882 asm("mrc p15, 0, %0, c0, c1, 4"
2883 : "=r" (mmfr0));
2884 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2885 - (mmfr0 & 0x000000f0) >= 0x00000030)
2886 + (mmfr0 & 0x000000f0) >= 0x00000030) {
2887 cpu_arch = CPU_ARCH_ARMv7;
2888 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2889 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2890 + __supported_pte_mask |= L_PTE_PXN;
2891 + __supported_pmd_mask |= PMD_PXNTABLE;
2892 + }
2893 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2894 (mmfr0 & 0x000000f0) == 0x00000020)
2895 cpu_arch = CPU_ARCH_ARMv6;
2896 else
2897 @@ -462,7 +468,7 @@ static void __init setup_processor(void)
2898 __cpu_architecture = __get_cpu_architecture();
2899
2900 #ifdef MULTI_CPU
2901 - processor = *list->proc;
2902 + memcpy((void *)&processor, list->proc, sizeof processor);
2903 #endif
2904 #ifdef MULTI_TLB
2905 cpu_tlb = *list->tlb;
2906 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2907 index 84f4cbf..672f5b8 100644
2908 --- a/arch/arm/kernel/smp.c
2909 +++ b/arch/arm/kernel/smp.c
2910 @@ -70,7 +70,7 @@ enum ipi_msg_type {
2911
2912 static DECLARE_COMPLETION(cpu_running);
2913
2914 -static struct smp_operations smp_ops;
2915 +static struct smp_operations smp_ops __read_only;
2916
2917 void __init smp_set_ops(struct smp_operations *ops)
2918 {
2919 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2920 index b0179b8..b7b16c7 100644
2921 --- a/arch/arm/kernel/traps.c
2922 +++ b/arch/arm/kernel/traps.c
2923 @@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
2924 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
2925 {
2926 #ifdef CONFIG_KALLSYMS
2927 - printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
2928 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
2929 #else
2930 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
2931 #endif
2932 @@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2933 static int die_owner = -1;
2934 static unsigned int die_nest_count;
2935
2936 +extern void gr_handle_kernel_exploit(void);
2937 +
2938 static unsigned long oops_begin(void)
2939 {
2940 int cpu;
2941 @@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
2942 panic("Fatal exception in interrupt");
2943 if (panic_on_oops)
2944 panic("Fatal exception");
2945 +
2946 + gr_handle_kernel_exploit();
2947 +
2948 if (signr)
2949 do_exit(signr);
2950 }
2951 @@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
2952 * The user helper at 0xffff0fe0 must be used instead.
2953 * (see entry-armv.S for details)
2954 */
2955 + pax_open_kernel();
2956 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
2957 + pax_close_kernel();
2958 }
2959 return 0;
2960
2961 @@ -849,5 +856,9 @@ void __init early_trap_init(void *vectors_base)
2962 sigreturn_codes, sizeof(sigreturn_codes));
2963
2964 flush_icache_range(vectors, vectors + PAGE_SIZE);
2965 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
2966 +
2967 +#ifndef CONFIG_PAX_MEMORY_UDEREF
2968 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
2969 +#endif
2970 +
2971 }
2972 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
2973 index 11c1785..c67d54c 100644
2974 --- a/arch/arm/kernel/vmlinux.lds.S
2975 +++ b/arch/arm/kernel/vmlinux.lds.S
2976 @@ -8,7 +8,11 @@
2977 #include <asm/thread_info.h>
2978 #include <asm/memory.h>
2979 #include <asm/page.h>
2980 -
2981 +
2982 +#ifdef CONFIG_PAX_KERNEXEC
2983 +#include <asm/pgtable.h>
2984 +#endif
2985 +
2986 #define PROC_INFO \
2987 . = ALIGN(4); \
2988 VMLINUX_SYMBOL(__proc_info_begin) = .; \
2989 @@ -90,6 +94,11 @@ SECTIONS
2990 _text = .;
2991 HEAD_TEXT
2992 }
2993 +
2994 +#ifdef CONFIG_PAX_KERNEXEC
2995 + . = ALIGN(1<<SECTION_SHIFT);
2996 +#endif
2997 +
2998 .text : { /* Real text segment */
2999 _stext = .; /* Text and read-only data */
3000 __exception_text_start = .;
3001 @@ -144,6 +153,10 @@ SECTIONS
3002
3003 _etext = .; /* End of text and rodata section */
3004
3005 +#ifdef CONFIG_PAX_KERNEXEC
3006 + . = ALIGN(1<<SECTION_SHIFT);
3007 +#endif
3008 +
3009 #ifndef CONFIG_XIP_KERNEL
3010 . = ALIGN(PAGE_SIZE);
3011 __init_begin = .;
3012 @@ -203,6 +216,11 @@ SECTIONS
3013 . = PAGE_OFFSET + TEXT_OFFSET;
3014 #else
3015 __init_end = .;
3016 +
3017 +#ifdef CONFIG_PAX_KERNEXEC
3018 + . = ALIGN(1<<SECTION_SHIFT);
3019 +#endif
3020 +
3021 . = ALIGN(THREAD_SIZE);
3022 __data_loc = .;
3023 #endif
3024 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3025 index 14a0d98..7771a7d 100644
3026 --- a/arch/arm/lib/clear_user.S
3027 +++ b/arch/arm/lib/clear_user.S
3028 @@ -12,14 +12,14 @@
3029
3030 .text
3031
3032 -/* Prototype: int __clear_user(void *addr, size_t sz)
3033 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3034 * Purpose : clear some user memory
3035 * Params : addr - user memory address to clear
3036 * : sz - number of bytes to clear
3037 * Returns : number of bytes NOT cleared
3038 */
3039 ENTRY(__clear_user_std)
3040 -WEAK(__clear_user)
3041 +WEAK(___clear_user)
3042 stmfd sp!, {r1, lr}
3043 mov r2, #0
3044 cmp r1, #4
3045 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3046 USER( strnebt r2, [r0])
3047 mov r0, #0
3048 ldmfd sp!, {r1, pc}
3049 -ENDPROC(__clear_user)
3050 +ENDPROC(___clear_user)
3051 ENDPROC(__clear_user_std)
3052
3053 .pushsection .fixup,"ax"
3054 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3055 index 66a477a..bee61d3 100644
3056 --- a/arch/arm/lib/copy_from_user.S
3057 +++ b/arch/arm/lib/copy_from_user.S
3058 @@ -16,7 +16,7 @@
3059 /*
3060 * Prototype:
3061 *
3062 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3063 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3064 *
3065 * Purpose:
3066 *
3067 @@ -84,11 +84,11 @@
3068
3069 .text
3070
3071 -ENTRY(__copy_from_user)
3072 +ENTRY(___copy_from_user)
3073
3074 #include "copy_template.S"
3075
3076 -ENDPROC(__copy_from_user)
3077 +ENDPROC(___copy_from_user)
3078
3079 .pushsection .fixup,"ax"
3080 .align 0
3081 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3082 index 6ee2f67..d1cce76 100644
3083 --- a/arch/arm/lib/copy_page.S
3084 +++ b/arch/arm/lib/copy_page.S
3085 @@ -10,6 +10,7 @@
3086 * ASM optimised string functions
3087 */
3088 #include <linux/linkage.h>
3089 +#include <linux/const.h>
3090 #include <asm/assembler.h>
3091 #include <asm/asm-offsets.h>
3092 #include <asm/cache.h>
3093 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3094 index d066df6..df28194 100644
3095 --- a/arch/arm/lib/copy_to_user.S
3096 +++ b/arch/arm/lib/copy_to_user.S
3097 @@ -16,7 +16,7 @@
3098 /*
3099 * Prototype:
3100 *
3101 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3102 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3103 *
3104 * Purpose:
3105 *
3106 @@ -88,11 +88,11 @@
3107 .text
3108
3109 ENTRY(__copy_to_user_std)
3110 -WEAK(__copy_to_user)
3111 +WEAK(___copy_to_user)
3112
3113 #include "copy_template.S"
3114
3115 -ENDPROC(__copy_to_user)
3116 +ENDPROC(___copy_to_user)
3117 ENDPROC(__copy_to_user_std)
3118
3119 .pushsection .fixup,"ax"
3120 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3121 index 7d08b43..f7ca7ea 100644
3122 --- a/arch/arm/lib/csumpartialcopyuser.S
3123 +++ b/arch/arm/lib/csumpartialcopyuser.S
3124 @@ -57,8 +57,8 @@
3125 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3126 */
3127
3128 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3129 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3130 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3131 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3132
3133 #include "csumpartialcopygeneric.S"
3134
3135 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3136 index 0dc5385..45833ef 100644
3137 --- a/arch/arm/lib/delay.c
3138 +++ b/arch/arm/lib/delay.c
3139 @@ -28,12 +28,14 @@
3140 /*
3141 * Default to the loop-based delay implementation.
3142 */
3143 -struct arm_delay_ops arm_delay_ops = {
3144 +static struct arm_delay_ops arm_loop_delay_ops = {
3145 .delay = __loop_delay,
3146 .const_udelay = __loop_const_udelay,
3147 .udelay = __loop_udelay,
3148 };
3149
3150 +struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3151 +
3152 static const struct delay_timer *delay_timer;
3153 static bool delay_calibrated;
3154
3155 @@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
3156 __timer_const_udelay(usecs * UDELAY_MULT);
3157 }
3158
3159 +static struct arm_delay_ops arm_timer_delay_ops = {
3160 + .delay = __timer_delay,
3161 + .const_udelay = __timer_const_udelay,
3162 + .udelay = __timer_udelay,
3163 +};
3164 +
3165 void __init register_current_timer_delay(const struct delay_timer *timer)
3166 {
3167 if (!delay_calibrated) {
3168 @@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3169 delay_timer = timer;
3170 lpj_fine = timer->freq / HZ;
3171 loops_per_jiffy = lpj_fine;
3172 - arm_delay_ops.delay = __timer_delay;
3173 - arm_delay_ops.const_udelay = __timer_const_udelay;
3174 - arm_delay_ops.udelay = __timer_udelay;
3175 + arm_delay_ops = &arm_timer_delay_ops;
3176 delay_calibrated = true;
3177 } else {
3178 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3179 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3180 index 025f742..8432b08 100644
3181 --- a/arch/arm/lib/uaccess_with_memcpy.c
3182 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3183 @@ -104,7 +104,7 @@ out:
3184 }
3185
3186 unsigned long
3187 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3188 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3189 {
3190 /*
3191 * This test is stubbed out of the main function above to keep
3192 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3193 index bac21a5..b67ef8e 100644
3194 --- a/arch/arm/mach-kirkwood/common.c
3195 +++ b/arch/arm/mach-kirkwood/common.c
3196 @@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3197 clk_gate_ops.disable(hw);
3198 }
3199
3200 -static struct clk_ops clk_gate_fn_ops;
3201 +static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3202 +{
3203 + return clk_gate_ops.is_enabled(hw);
3204 +}
3205 +
3206 +static struct clk_ops clk_gate_fn_ops = {
3207 + .enable = clk_gate_fn_enable,
3208 + .disable = clk_gate_fn_disable,
3209 + .is_enabled = clk_gate_fn_is_enabled,
3210 +};
3211
3212 static struct clk __init *clk_register_gate_fn(struct device *dev,
3213 const char *name,
3214 @@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3215 gate_fn->fn_en = fn_en;
3216 gate_fn->fn_dis = fn_dis;
3217
3218 - /* ops is the gate ops, but with our enable/disable functions */
3219 - if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3220 - clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3221 - clk_gate_fn_ops = clk_gate_ops;
3222 - clk_gate_fn_ops.enable = clk_gate_fn_enable;
3223 - clk_gate_fn_ops.disable = clk_gate_fn_disable;
3224 - }
3225 -
3226 clk = clk_register(dev, &gate_fn->gate.hw);
3227
3228 if (IS_ERR(clk))
3229 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3230 index 0abb30f..54064da 100644
3231 --- a/arch/arm/mach-omap2/board-n8x0.c
3232 +++ b/arch/arm/mach-omap2/board-n8x0.c
3233 @@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3234 }
3235 #endif
3236
3237 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3238 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3239 .late_init = n8x0_menelaus_late_init,
3240 };
3241
3242 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3243 index 5d3b4f4..ddba3c0 100644
3244 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3245 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3246 @@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3247 return NOTIFY_OK;
3248 }
3249
3250 -static struct notifier_block __refdata irq_hotplug_notifier = {
3251 +static struct notifier_block irq_hotplug_notifier = {
3252 .notifier_call = irq_cpu_hotplug_notify,
3253 };
3254
3255 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3256 index 4653efb..8c60bf7 100644
3257 --- a/arch/arm/mach-omap2/omap_hwmod.c
3258 +++ b/arch/arm/mach-omap2/omap_hwmod.c
3259 @@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3260 int (*init_clkdm)(struct omap_hwmod *oh);
3261 void (*update_context_lost)(struct omap_hwmod *oh);
3262 int (*get_context_lost)(struct omap_hwmod *oh);
3263 -};
3264 +} __no_const;
3265
3266 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3267 -static struct omap_hwmod_soc_ops soc_ops;
3268 +static struct omap_hwmod_soc_ops soc_ops __read_only;
3269
3270 /* omap_hwmod_list contains all registered struct omap_hwmods */
3271 static LIST_HEAD(omap_hwmod_list);
3272 diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3273 index 7c2b4ed..b2ea51f 100644
3274 --- a/arch/arm/mach-omap2/wd_timer.c
3275 +++ b/arch/arm/mach-omap2/wd_timer.c
3276 @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3277 struct omap_hwmod *oh;
3278 char *oh_name = "wd_timer2";
3279 char *dev_name = "omap_wdt";
3280 - struct omap_wd_timer_platform_data pdata;
3281 + static struct omap_wd_timer_platform_data pdata = {
3282 + .read_reset_sources = prm_read_reset_sources
3283 + };
3284
3285 if (!cpu_class_is_omap2() || of_have_populated_dt())
3286 return 0;
3287 @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3288 return -EINVAL;
3289 }
3290
3291 - pdata.read_reset_sources = prm_read_reset_sources;
3292 -
3293 pdev = omap_device_build(dev_name, id, oh, &pdata,
3294 sizeof(struct omap_wd_timer_platform_data),
3295 NULL, 0, 0);
3296 diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3297 index 6be4c4d..32ac32a 100644
3298 --- a/arch/arm/mach-ux500/include/mach/setup.h
3299 +++ b/arch/arm/mach-ux500/include/mach/setup.h
3300 @@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3301 .type = MT_DEVICE, \
3302 }
3303
3304 -#define __MEM_DEV_DESC(x, sz) { \
3305 - .virtual = IO_ADDRESS(x), \
3306 - .pfn = __phys_to_pfn(x), \
3307 - .length = sz, \
3308 - .type = MT_MEMORY, \
3309 -}
3310 -
3311 extern struct smp_operations ux500_smp_ops;
3312 extern void ux500_cpu_die(unsigned int cpu);
3313
3314 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3315 index 3fd629d..8b1aca9 100644
3316 --- a/arch/arm/mm/Kconfig
3317 +++ b/arch/arm/mm/Kconfig
3318 @@ -425,7 +425,7 @@ config CPU_32v5
3319
3320 config CPU_32v6
3321 bool
3322 - select CPU_USE_DOMAINS if CPU_V6 && MMU
3323 + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3324 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3325
3326 config CPU_32v6K
3327 @@ -577,6 +577,7 @@ config CPU_CP15_MPU
3328
3329 config CPU_USE_DOMAINS
3330 bool
3331 + depends on !ARM_LPAE && !PAX_KERNEXEC
3332 help
3333 This option enables or disables the use of domain switching
3334 via the set_fs() function.
3335 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3336 index 5dbf13f..6393f55 100644
3337 --- a/arch/arm/mm/fault.c
3338 +++ b/arch/arm/mm/fault.c
3339 @@ -25,6 +25,7 @@
3340 #include <asm/system_misc.h>
3341 #include <asm/system_info.h>
3342 #include <asm/tlbflush.h>
3343 +#include <asm/sections.h>
3344
3345 #include "fault.h"
3346
3347 @@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3348 if (fixup_exception(regs))
3349 return;
3350
3351 +#ifdef CONFIG_PAX_KERNEXEC
3352 + if ((fsr & FSR_WRITE) &&
3353 + (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3354 + (MODULES_VADDR <= addr && addr < MODULES_END)))
3355 + {
3356 + if (current->signal->curr_ip)
3357 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3358 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
3359 + else
3360 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3361 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
3362 + }
3363 +#endif
3364 +
3365 /*
3366 * No handler, we'll have to terminate things with extreme prejudice.
3367 */
3368 @@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3369 }
3370 #endif
3371
3372 +#ifdef CONFIG_PAX_PAGEEXEC
3373 + if (fsr & FSR_LNX_PF) {
3374 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3375 + do_group_exit(SIGKILL);
3376 + }
3377 +#endif
3378 +
3379 tsk->thread.address = addr;
3380 tsk->thread.error_code = fsr;
3381 tsk->thread.trap_no = 14;
3382 @@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3383 }
3384 #endif /* CONFIG_MMU */
3385
3386 +#ifdef CONFIG_PAX_PAGEEXEC
3387 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3388 +{
3389 + long i;
3390 +
3391 + printk(KERN_ERR "PAX: bytes at PC: ");
3392 + for (i = 0; i < 20; i++) {
3393 + unsigned char c;
3394 + if (get_user(c, (__force unsigned char __user *)pc+i))
3395 + printk(KERN_CONT "?? ");
3396 + else
3397 + printk(KERN_CONT "%02x ", c);
3398 + }
3399 + printk("\n");
3400 +
3401 + printk(KERN_ERR "PAX: bytes at SP-4: ");
3402 + for (i = -1; i < 20; i++) {
3403 + unsigned long c;
3404 + if (get_user(c, (__force unsigned long __user *)sp+i))
3405 + printk(KERN_CONT "???????? ");
3406 + else
3407 + printk(KERN_CONT "%08lx ", c);
3408 + }
3409 + printk("\n");
3410 +}
3411 +#endif
3412 +
3413 /*
3414 * First Level Translation Fault Handler
3415 *
3416 @@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3417 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3418 struct siginfo info;
3419
3420 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3421 + if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3422 + if (current->signal->curr_ip)
3423 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3424 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
3425 + else
3426 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3427 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
3428 + goto die;
3429 + }
3430 +#endif
3431 +
3432 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3433 return;
3434
3435 +die:
3436 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3437 inf->name, fsr, addr);
3438
3439 @@ -575,9 +637,38 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3440 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3441 struct siginfo info;
3442
3443 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3444 + if (!user_mode(regs) && (is_domain_fault(ifsr) || is_xn_fault(ifsr))) {
3445 + if (current->signal->curr_ip)
3446 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3447 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
3448 + addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3449 + else
3450 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3451 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
3452 + addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3453 + goto die;
3454 + }
3455 +#endif
3456 +
3457 +#ifdef CONFIG_PAX_REFCOUNT
3458 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3459 + unsigned int bkpt;
3460 +
3461 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3462 + current->thread.error_code = ifsr;
3463 + current->thread.trap_no = 0;
3464 + pax_report_refcount_overflow(regs);
3465 + fixup_exception(regs);
3466 + return;
3467 + }
3468 + }
3469 +#endif
3470 +
3471 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3472 return;
3473
3474 +die:
3475 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3476 inf->name, ifsr, addr);
3477
3478 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3479 index cf08bdf..772656c 100644
3480 --- a/arch/arm/mm/fault.h
3481 +++ b/arch/arm/mm/fault.h
3482 @@ -3,6 +3,7 @@
3483
3484 /*
3485 * Fault status register encodings. We steal bit 31 for our own purposes.
3486 + * Set when the FSR value is from an instruction fault.
3487 */
3488 #define FSR_LNX_PF (1 << 31)
3489 #define FSR_WRITE (1 << 11)
3490 @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3491 }
3492 #endif
3493
3494 +/* valid for LPAE and !LPAE */
3495 +static inline int is_xn_fault(unsigned int fsr)
3496 +{
3497 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
3498 +}
3499 +
3500 +static inline int is_domain_fault(unsigned int fsr)
3501 +{
3502 + return ((fsr_fs(fsr) & 0xD) == 0x9);
3503 +}
3504 +
3505 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3506 unsigned long search_exception_table(unsigned long addr);
3507
3508 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3509 index ad722f1..763fdd3 100644
3510 --- a/arch/arm/mm/init.c
3511 +++ b/arch/arm/mm/init.c
3512 @@ -30,6 +30,8 @@
3513 #include <asm/setup.h>
3514 #include <asm/tlb.h>
3515 #include <asm/fixmap.h>
3516 +#include <asm/system_info.h>
3517 +#include <asm/cp15.h>
3518
3519 #include <asm/mach/arch.h>
3520 #include <asm/mach/map.h>
3521 @@ -736,7 +738,46 @@ void free_initmem(void)
3522 {
3523 #ifdef CONFIG_HAVE_TCM
3524 extern char __tcm_start, __tcm_end;
3525 +#endif
3526
3527 +#ifdef CONFIG_PAX_KERNEXEC
3528 + unsigned long addr;
3529 + pgd_t *pgd;
3530 + pud_t *pud;
3531 + pmd_t *pmd;
3532 + int cpu_arch = cpu_architecture();
3533 + unsigned int cr = get_cr();
3534 +
3535 + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3536 + /* make pages tables, etc before .text NX */
3537 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3538 + pgd = pgd_offset_k(addr);
3539 + pud = pud_offset(pgd, addr);
3540 + pmd = pmd_offset(pud, addr);
3541 + __section_update(pmd, addr, PMD_SECT_XN);
3542 + }
3543 + /* make init NX */
3544 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3545 + pgd = pgd_offset_k(addr);
3546 + pud = pud_offset(pgd, addr);
3547 + pmd = pmd_offset(pud, addr);
3548 + __section_update(pmd, addr, PMD_SECT_XN);
3549 + }
3550 + /* make kernel code/rodata RX */
3551 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3552 + pgd = pgd_offset_k(addr);
3553 + pud = pud_offset(pgd, addr);
3554 + pmd = pmd_offset(pud, addr);
3555 +#ifdef CONFIG_ARM_LPAE
3556 + __section_update(pmd, addr, PMD_SECT_RDONLY);
3557 +#else
3558 + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3559 +#endif
3560 + }
3561 + }
3562 +#endif
3563 +
3564 +#ifdef CONFIG_HAVE_TCM
3565 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3566 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
3567 __phys_to_pfn(__pa(&__tcm_end)),
3568 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3569 index 88fd86c..7a224ce 100644
3570 --- a/arch/arm/mm/ioremap.c
3571 +++ b/arch/arm/mm/ioremap.c
3572 @@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3573 unsigned int mtype;
3574
3575 if (cached)
3576 - mtype = MT_MEMORY;
3577 + mtype = MT_MEMORY_RX;
3578 else
3579 - mtype = MT_MEMORY_NONCACHED;
3580 + mtype = MT_MEMORY_NONCACHED_RX;
3581
3582 return __arm_ioremap_caller(phys_addr, size, mtype,
3583 __builtin_return_address(0));
3584 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3585 index 10062ce..aa96dd7 100644
3586 --- a/arch/arm/mm/mmap.c
3587 +++ b/arch/arm/mm/mmap.c
3588 @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3589 struct vm_area_struct *vma;
3590 int do_align = 0;
3591 int aliasing = cache_is_vipt_aliasing();
3592 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3593 struct vm_unmapped_area_info info;
3594
3595 /*
3596 @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3597 if (len > TASK_SIZE)
3598 return -ENOMEM;
3599
3600 +#ifdef CONFIG_PAX_RANDMMAP
3601 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3602 +#endif
3603 +
3604 if (addr) {
3605 if (do_align)
3606 addr = COLOUR_ALIGN(addr, pgoff);
3607 @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3608 addr = PAGE_ALIGN(addr);
3609
3610 vma = find_vma(mm, addr);
3611 - if (TASK_SIZE - len >= addr &&
3612 - (!vma || addr + len <= vma->vm_start))
3613 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3614 return addr;
3615 }
3616
3617 @@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3618 unsigned long addr = addr0;
3619 int do_align = 0;
3620 int aliasing = cache_is_vipt_aliasing();
3621 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3622 struct vm_unmapped_area_info info;
3623
3624 /*
3625 @@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3626 return addr;
3627 }
3628
3629 +#ifdef CONFIG_PAX_RANDMMAP
3630 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3631 +#endif
3632 +
3633 /* requesting a specific address */
3634 if (addr) {
3635 if (do_align)
3636 @@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3637 else
3638 addr = PAGE_ALIGN(addr);
3639 vma = find_vma(mm, addr);
3640 - if (TASK_SIZE - len >= addr &&
3641 - (!vma || addr + len <= vma->vm_start))
3642 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3643 return addr;
3644 }
3645
3646 @@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3647 VM_BUG_ON(addr != -ENOMEM);
3648 info.flags = 0;
3649 info.low_limit = mm->mmap_base;
3650 +
3651 +#ifdef CONFIG_PAX_RANDMMAP
3652 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3653 + info.low_limit += mm->delta_mmap;
3654 +#endif
3655 +
3656 info.high_limit = TASK_SIZE;
3657 addr = vm_unmapped_area(&info);
3658 }
3659 @@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3660 {
3661 unsigned long random_factor = 0UL;
3662
3663 +#ifdef CONFIG_PAX_RANDMMAP
3664 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3665 +#endif
3666 +
3667 /* 8 bits of randomness in 20 address space bits */
3668 if ((current->flags & PF_RANDOMIZE) &&
3669 !(current->personality & ADDR_NO_RANDOMIZE))
3670 @@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3671
3672 if (mmap_is_legacy()) {
3673 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3674 +
3675 +#ifdef CONFIG_PAX_RANDMMAP
3676 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3677 + mm->mmap_base += mm->delta_mmap;
3678 +#endif
3679 +
3680 mm->get_unmapped_area = arch_get_unmapped_area;
3681 mm->unmap_area = arch_unmap_area;
3682 } else {
3683 mm->mmap_base = mmap_base(random_factor);
3684 +
3685 +#ifdef CONFIG_PAX_RANDMMAP
3686 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3687 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3688 +#endif
3689 +
3690 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3691 mm->unmap_area = arch_unmap_area_topdown;
3692 }
3693 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3694 index ce328c7..f82bebb 100644
3695 --- a/arch/arm/mm/mmu.c
3696 +++ b/arch/arm/mm/mmu.c
3697 @@ -35,6 +35,23 @@
3698
3699 #include "mm.h"
3700
3701 +
3702 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3703 +void modify_domain(unsigned int dom, unsigned int type)
3704 +{
3705 + struct thread_info *thread = current_thread_info();
3706 + unsigned int domain = thread->cpu_domain;
3707 + /*
3708 + * DOMAIN_MANAGER might be defined to some other value,
3709 + * use the arch-defined constant
3710 + */
3711 + domain &= ~domain_val(dom, 3);
3712 + thread->cpu_domain = domain | domain_val(dom, type);
3713 + set_domain(thread->cpu_domain);
3714 +}
3715 +EXPORT_SYMBOL(modify_domain);
3716 +#endif
3717 +
3718 /*
3719 * empty_zero_page is a special page that is used for
3720 * zero-initialized data and COW.
3721 @@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
3722 }
3723 #endif
3724
3725 -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
3726 +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
3727 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
3728
3729 -static struct mem_type mem_types[] = {
3730 +#ifdef CONFIG_PAX_KERNEXEC
3731 +#define L_PTE_KERNEXEC L_PTE_RDONLY
3732 +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
3733 +#else
3734 +#define L_PTE_KERNEXEC L_PTE_DIRTY
3735 +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
3736 +#endif
3737 +
3738 +static struct mem_type mem_types[] __read_only = {
3739 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
3740 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
3741 L_PTE_SHARED,
3742 @@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
3743 [MT_UNCACHED] = {
3744 .prot_pte = PROT_PTE_DEVICE,
3745 .prot_l1 = PMD_TYPE_TABLE,
3746 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3747 + .prot_sect = PROT_SECT_DEVICE,
3748 .domain = DOMAIN_IO,
3749 },
3750 [MT_CACHECLEAN] = {
3751 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3752 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3753 .domain = DOMAIN_KERNEL,
3754 },
3755 #ifndef CONFIG_ARM_LPAE
3756 [MT_MINICLEAN] = {
3757 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
3758 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
3759 .domain = DOMAIN_KERNEL,
3760 },
3761 #endif
3762 @@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
3763 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3764 L_PTE_RDONLY,
3765 .prot_l1 = PMD_TYPE_TABLE,
3766 - .domain = DOMAIN_USER,
3767 + .domain = DOMAIN_VECTORS,
3768 },
3769 [MT_HIGH_VECTORS] = {
3770 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3771 L_PTE_USER | L_PTE_RDONLY,
3772 .prot_l1 = PMD_TYPE_TABLE,
3773 - .domain = DOMAIN_USER,
3774 + .domain = DOMAIN_VECTORS,
3775 },
3776 - [MT_MEMORY] = {
3777 + [MT_MEMORY_RWX] = {
3778 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3779 .prot_l1 = PMD_TYPE_TABLE,
3780 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3781 .domain = DOMAIN_KERNEL,
3782 },
3783 + [MT_MEMORY_RW] = {
3784 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3785 + .prot_l1 = PMD_TYPE_TABLE,
3786 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3787 + .domain = DOMAIN_KERNEL,
3788 + },
3789 + [MT_MEMORY_RX] = {
3790 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
3791 + .prot_l1 = PMD_TYPE_TABLE,
3792 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
3793 + .domain = DOMAIN_KERNEL,
3794 + },
3795 [MT_ROM] = {
3796 - .prot_sect = PMD_TYPE_SECT,
3797 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3798 .domain = DOMAIN_KERNEL,
3799 },
3800 - [MT_MEMORY_NONCACHED] = {
3801 + [MT_MEMORY_NONCACHED_RW] = {
3802 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3803 L_PTE_MT_BUFFERABLE,
3804 .prot_l1 = PMD_TYPE_TABLE,
3805 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3806 .domain = DOMAIN_KERNEL,
3807 },
3808 + [MT_MEMORY_NONCACHED_RX] = {
3809 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
3810 + L_PTE_MT_BUFFERABLE,
3811 + .prot_l1 = PMD_TYPE_TABLE,
3812 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
3813 + .domain = DOMAIN_KERNEL,
3814 + },
3815 [MT_MEMORY_DTCM] = {
3816 - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3817 - L_PTE_XN,
3818 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3819 .prot_l1 = PMD_TYPE_TABLE,
3820 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3821 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3822 .domain = DOMAIN_KERNEL,
3823 },
3824 [MT_MEMORY_ITCM] = {
3825 @@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
3826 },
3827 [MT_MEMORY_SO] = {
3828 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3829 - L_PTE_MT_UNCACHED | L_PTE_XN,
3830 + L_PTE_MT_UNCACHED,
3831 .prot_l1 = PMD_TYPE_TABLE,
3832 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
3833 - PMD_SECT_UNCACHED | PMD_SECT_XN,
3834 + PMD_SECT_UNCACHED,
3835 .domain = DOMAIN_KERNEL,
3836 },
3837 [MT_MEMORY_DMA_READY] = {
3838 @@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
3839 * to prevent speculative instruction fetches.
3840 */
3841 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
3842 + mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
3843 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
3844 + mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
3845 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
3846 + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
3847 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
3848 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
3849 +
3850 + /* Mark other regions on ARMv6+ as execute-never */
3851 +
3852 +#ifdef CONFIG_PAX_KERNEXEC
3853 + mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
3854 + mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
3855 + mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
3856 + mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
3857 +#ifndef CONFIG_ARM_LPAE
3858 + mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
3859 + mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
3860 +#endif
3861 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
3862 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
3863 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
3864 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
3865 + mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
3866 + mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
3867 +#endif
3868 +
3869 + mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
3870 + mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
3871 }
3872 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
3873 /*
3874 @@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
3875 * from SVC mode and no access from userspace.
3876 */
3877 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3878 +#ifdef CONFIG_PAX_KERNEXEC
3879 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3880 +#endif
3881 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3882 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3883 #endif
3884 @@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
3885 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
3886 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
3887 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
3888 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
3889 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
3890 + mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
3891 + mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
3892 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
3893 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
3894 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
3895 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
3896 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
3897 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
3898 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
3899 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
3900 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
3901 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
3902 + mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
3903 }
3904 }
3905
3906 @@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
3907 if (cpu_arch >= CPU_ARCH_ARMv6) {
3908 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
3909 /* Non-cacheable Normal is XCB = 001 */
3910 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
3911 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
3912 + PMD_SECT_BUFFERED;
3913 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
3914 PMD_SECT_BUFFERED;
3915 } else {
3916 /* For both ARMv6 and non-TEX-remapping ARMv7 */
3917 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
3918 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
3919 + PMD_SECT_TEX(1);
3920 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
3921 PMD_SECT_TEX(1);
3922 }
3923 } else {
3924 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
3925 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
3926 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
3927 }
3928
3929 #ifdef CONFIG_ARM_LPAE
3930 @@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
3931 vecs_pgprot |= PTE_EXT_AF;
3932 #endif
3933
3934 + user_pgprot |= __supported_pte_mask;
3935 +
3936 for (i = 0; i < 16; i++) {
3937 pteval_t v = pgprot_val(protection_map[i]);
3938 protection_map[i] = __pgprot(v | user_pgprot);
3939 @@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
3940
3941 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
3942 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
3943 - mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
3944 - mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
3945 + mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
3946 + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
3947 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
3948 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
3949 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
3950 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
3951 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
3952 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
3953 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
3954 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
3955 mem_types[MT_ROM].prot_sect |= cp->pmd;
3956
3957 switch (cp->pmd) {
3958 @@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
3959 * called function. This means you can't use any function or debugging
3960 * method which may touch any device, otherwise the kernel _will_ crash.
3961 */
3962 +
3963 +static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
3964 +
3965 static void __init devicemaps_init(struct machine_desc *mdesc)
3966 {
3967 struct map_desc map;
3968 unsigned long addr;
3969 - void *vectors;
3970
3971 - /*
3972 - * Allocate the vector page early.
3973 - */
3974 - vectors = early_alloc(PAGE_SIZE);
3975 -
3976 - early_trap_init(vectors);
3977 + early_trap_init(&vectors);
3978
3979 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
3980 pmd_clear(pmd_off_k(addr));
3981 @@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
3982 * location (0xffff0000). If we aren't using high-vectors, also
3983 * create a mapping at the low-vectors virtual address.
3984 */
3985 - map.pfn = __phys_to_pfn(virt_to_phys(vectors));
3986 + map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
3987 map.virtual = 0xffff0000;
3988 map.length = PAGE_SIZE;
3989 map.type = MT_HIGH_VECTORS;
3990 @@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
3991 map.pfn = __phys_to_pfn(start);
3992 map.virtual = __phys_to_virt(start);
3993 map.length = end - start;
3994 - map.type = MT_MEMORY;
3995
3996 +#ifdef CONFIG_PAX_KERNEXEC
3997 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
3998 + struct map_desc kernel;
3999 + struct map_desc initmap;
4000 +
4001 + /* when freeing initmem we will make this RW */
4002 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4003 + initmap.virtual = (unsigned long)__init_begin;
4004 + initmap.length = _sdata - __init_begin;
4005 + initmap.type = MT_MEMORY_RWX;
4006 + create_mapping(&initmap);
4007 +
4008 + /* when freeing initmem we will make this RX */
4009 + kernel.pfn = __phys_to_pfn(__pa(_stext));
4010 + kernel.virtual = (unsigned long)_stext;
4011 + kernel.length = __init_begin - _stext;
4012 + kernel.type = MT_MEMORY_RWX;
4013 + create_mapping(&kernel);
4014 +
4015 + if (map.virtual < (unsigned long)_stext) {
4016 + map.length = (unsigned long)_stext - map.virtual;
4017 + map.type = MT_MEMORY_RWX;
4018 + create_mapping(&map);
4019 + }
4020 +
4021 + map.pfn = __phys_to_pfn(__pa(_sdata));
4022 + map.virtual = (unsigned long)_sdata;
4023 + map.length = end - __pa(_sdata);
4024 + }
4025 +#endif
4026 +
4027 + map.type = MT_MEMORY_RW;
4028 create_mapping(&map);
4029 }
4030 }
4031 diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4032 index 6d98c13..3cfb174 100644
4033 --- a/arch/arm/mm/proc-v7-2level.S
4034 +++ b/arch/arm/mm/proc-v7-2level.S
4035 @@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4036 tst r1, #L_PTE_XN
4037 orrne r3, r3, #PTE_EXT_XN
4038
4039 + tst r1, #L_PTE_PXN
4040 + orrne r3, r3, #PTE_EXT_PXN
4041 +
4042 tst r1, #L_PTE_YOUNG
4043 tstne r1, #L_PTE_VALID
4044 #ifndef CONFIG_CPU_USE_DOMAINS
4045 diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4046 index a5bc92d..0bb4730 100644
4047 --- a/arch/arm/plat-omap/sram.c
4048 +++ b/arch/arm/plat-omap/sram.c
4049 @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4050 * Looks like we need to preserve some bootloader code at the
4051 * beginning of SRAM for jumping to flash for reboot to work...
4052 */
4053 + pax_open_kernel();
4054 memset_io(omap_sram_base + omap_sram_skip, 0,
4055 omap_sram_size - omap_sram_skip);
4056 + pax_close_kernel();
4057 }
4058 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
4059 index b76c065..b6e766b 100644
4060 --- a/arch/arm/plat-orion/include/plat/addr-map.h
4061 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
4062 @@ -27,7 +27,7 @@ struct orion_addr_map_cfg {
4063 value in bridge_virt_base */
4064 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
4065 const int win);
4066 -};
4067 +} __no_const;
4068
4069 /*
4070 * Information needed to setup one address mapping.
4071 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4072 index f5144cd..71f6d1f 100644
4073 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4074 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4075 @@ -47,7 +47,7 @@ struct samsung_dma_ops {
4076 int (*started)(unsigned ch);
4077 int (*flush)(unsigned ch);
4078 int (*stop)(unsigned ch);
4079 -};
4080 +} __no_const;
4081
4082 extern void *samsung_dmadev_get_ops(void);
4083 extern void *s3c_dma_get_ops(void);
4084 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4085 index 0c3ba9f..95722b3 100644
4086 --- a/arch/arm64/kernel/debug-monitors.c
4087 +++ b/arch/arm64/kernel/debug-monitors.c
4088 @@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4089 return NOTIFY_OK;
4090 }
4091
4092 -static struct notifier_block __cpuinitdata os_lock_nb = {
4093 +static struct notifier_block os_lock_nb = {
4094 .notifier_call = os_lock_notify,
4095 };
4096
4097 diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4098 index 5ab825c..96aaec8 100644
4099 --- a/arch/arm64/kernel/hw_breakpoint.c
4100 +++ b/arch/arm64/kernel/hw_breakpoint.c
4101 @@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4102 return NOTIFY_OK;
4103 }
4104
4105 -static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4106 +static struct notifier_block hw_breakpoint_reset_nb = {
4107 .notifier_call = hw_breakpoint_reset_notify,
4108 };
4109
4110 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4111 index c3a58a1..78fbf54 100644
4112 --- a/arch/avr32/include/asm/cache.h
4113 +++ b/arch/avr32/include/asm/cache.h
4114 @@ -1,8 +1,10 @@
4115 #ifndef __ASM_AVR32_CACHE_H
4116 #define __ASM_AVR32_CACHE_H
4117
4118 +#include <linux/const.h>
4119 +
4120 #define L1_CACHE_SHIFT 5
4121 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4122 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4123
4124 /*
4125 * Memory returned by kmalloc() may be used for DMA, so we must make
4126 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4127 index e2c3287..6c4f98c 100644
4128 --- a/arch/avr32/include/asm/elf.h
4129 +++ b/arch/avr32/include/asm/elf.h
4130 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4131 the loader. We need to make sure that it is out of the way of the program
4132 that it will "exec", and that there is sufficient room for the brk. */
4133
4134 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4135 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4136
4137 +#ifdef CONFIG_PAX_ASLR
4138 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4139 +
4140 +#define PAX_DELTA_MMAP_LEN 15
4141 +#define PAX_DELTA_STACK_LEN 15
4142 +#endif
4143
4144 /* This yields a mask that user programs can use to figure out what
4145 instruction set this CPU supports. This could be done in user space,
4146 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4147 index 479330b..53717a8 100644
4148 --- a/arch/avr32/include/asm/kmap_types.h
4149 +++ b/arch/avr32/include/asm/kmap_types.h
4150 @@ -2,9 +2,9 @@
4151 #define __ASM_AVR32_KMAP_TYPES_H
4152
4153 #ifdef CONFIG_DEBUG_HIGHMEM
4154 -# define KM_TYPE_NR 29
4155 +# define KM_TYPE_NR 30
4156 #else
4157 -# define KM_TYPE_NR 14
4158 +# define KM_TYPE_NR 15
4159 #endif
4160
4161 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4162 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4163 index b2f2d2d..d1c85cb 100644
4164 --- a/arch/avr32/mm/fault.c
4165 +++ b/arch/avr32/mm/fault.c
4166 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4167
4168 int exception_trace = 1;
4169
4170 +#ifdef CONFIG_PAX_PAGEEXEC
4171 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4172 +{
4173 + unsigned long i;
4174 +
4175 + printk(KERN_ERR "PAX: bytes at PC: ");
4176 + for (i = 0; i < 20; i++) {
4177 + unsigned char c;
4178 + if (get_user(c, (unsigned char *)pc+i))
4179 + printk(KERN_CONT "???????? ");
4180 + else
4181 + printk(KERN_CONT "%02x ", c);
4182 + }
4183 + printk("\n");
4184 +}
4185 +#endif
4186 +
4187 /*
4188 * This routine handles page faults. It determines the address and the
4189 * problem, and then passes it off to one of the appropriate routines.
4190 @@ -174,6 +191,16 @@ bad_area:
4191 up_read(&mm->mmap_sem);
4192
4193 if (user_mode(regs)) {
4194 +
4195 +#ifdef CONFIG_PAX_PAGEEXEC
4196 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4197 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4198 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4199 + do_group_exit(SIGKILL);
4200 + }
4201 + }
4202 +#endif
4203 +
4204 if (exception_trace && printk_ratelimit())
4205 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4206 "sp %08lx ecr %lu\n",
4207 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4208 index 568885a..f8008df 100644
4209 --- a/arch/blackfin/include/asm/cache.h
4210 +++ b/arch/blackfin/include/asm/cache.h
4211 @@ -7,6 +7,7 @@
4212 #ifndef __ARCH_BLACKFIN_CACHE_H
4213 #define __ARCH_BLACKFIN_CACHE_H
4214
4215 +#include <linux/const.h>
4216 #include <linux/linkage.h> /* for asmlinkage */
4217
4218 /*
4219 @@ -14,7 +15,7 @@
4220 * Blackfin loads 32 bytes for cache
4221 */
4222 #define L1_CACHE_SHIFT 5
4223 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4224 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4225 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4226
4227 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4228 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4229 index aea2718..3639a60 100644
4230 --- a/arch/cris/include/arch-v10/arch/cache.h
4231 +++ b/arch/cris/include/arch-v10/arch/cache.h
4232 @@ -1,8 +1,9 @@
4233 #ifndef _ASM_ARCH_CACHE_H
4234 #define _ASM_ARCH_CACHE_H
4235
4236 +#include <linux/const.h>
4237 /* Etrax 100LX have 32-byte cache-lines. */
4238 -#define L1_CACHE_BYTES 32
4239 #define L1_CACHE_SHIFT 5
4240 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4241
4242 #endif /* _ASM_ARCH_CACHE_H */
4243 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4244 index 7caf25d..ee65ac5 100644
4245 --- a/arch/cris/include/arch-v32/arch/cache.h
4246 +++ b/arch/cris/include/arch-v32/arch/cache.h
4247 @@ -1,11 +1,12 @@
4248 #ifndef _ASM_CRIS_ARCH_CACHE_H
4249 #define _ASM_CRIS_ARCH_CACHE_H
4250
4251 +#include <linux/const.h>
4252 #include <arch/hwregs/dma.h>
4253
4254 /* A cache-line is 32 bytes. */
4255 -#define L1_CACHE_BYTES 32
4256 #define L1_CACHE_SHIFT 5
4257 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4258
4259 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4260
4261 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4262 index b86329d..6709906 100644
4263 --- a/arch/frv/include/asm/atomic.h
4264 +++ b/arch/frv/include/asm/atomic.h
4265 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4266 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4267 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4268
4269 +#define atomic64_read_unchecked(v) atomic64_read(v)
4270 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4271 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4272 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4273 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4274 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4275 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4276 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4277 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4278 +
4279 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4280 {
4281 int c, old;
4282 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4283 index 2797163..c2a401d 100644
4284 --- a/arch/frv/include/asm/cache.h
4285 +++ b/arch/frv/include/asm/cache.h
4286 @@ -12,10 +12,11 @@
4287 #ifndef __ASM_CACHE_H
4288 #define __ASM_CACHE_H
4289
4290 +#include <linux/const.h>
4291
4292 /* bytes per L1 cache line */
4293 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4294 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4295 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4296
4297 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4298 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4299 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4300 index 43901f2..0d8b865 100644
4301 --- a/arch/frv/include/asm/kmap_types.h
4302 +++ b/arch/frv/include/asm/kmap_types.h
4303 @@ -2,6 +2,6 @@
4304 #ifndef _ASM_KMAP_TYPES_H
4305 #define _ASM_KMAP_TYPES_H
4306
4307 -#define KM_TYPE_NR 17
4308 +#define KM_TYPE_NR 18
4309
4310 #endif
4311 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4312 index 385fd30..3aaf4fe 100644
4313 --- a/arch/frv/mm/elf-fdpic.c
4314 +++ b/arch/frv/mm/elf-fdpic.c
4315 @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4316 {
4317 struct vm_area_struct *vma;
4318 unsigned long limit;
4319 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4320
4321 if (len > TASK_SIZE)
4322 return -ENOMEM;
4323 @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4324 if (addr) {
4325 addr = PAGE_ALIGN(addr);
4326 vma = find_vma(current->mm, addr);
4327 - if (TASK_SIZE - len >= addr &&
4328 - (!vma || addr + len <= vma->vm_start))
4329 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4330 goto success;
4331 }
4332
4333 @@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4334 for (; vma; vma = vma->vm_next) {
4335 if (addr > limit)
4336 break;
4337 - if (addr + len <= vma->vm_start)
4338 + if (check_heap_stack_gap(vma, addr, len, offset))
4339 goto success;
4340 addr = vma->vm_end;
4341 }
4342 @@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4343 for (; vma; vma = vma->vm_next) {
4344 if (addr > limit)
4345 break;
4346 - if (addr + len <= vma->vm_start)
4347 + if (check_heap_stack_gap(vma, addr, len, offset))
4348 goto success;
4349 addr = vma->vm_end;
4350 }
4351 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4352 index f4ca594..adc72fd6 100644
4353 --- a/arch/hexagon/include/asm/cache.h
4354 +++ b/arch/hexagon/include/asm/cache.h
4355 @@ -21,9 +21,11 @@
4356 #ifndef __ASM_CACHE_H
4357 #define __ASM_CACHE_H
4358
4359 +#include <linux/const.h>
4360 +
4361 /* Bytes per L1 cache line */
4362 -#define L1_CACHE_SHIFT (5)
4363 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4364 +#define L1_CACHE_SHIFT 5
4365 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4366
4367 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4368 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4369 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4370 index 6e6fe18..a6ae668 100644
4371 --- a/arch/ia64/include/asm/atomic.h
4372 +++ b/arch/ia64/include/asm/atomic.h
4373 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4374 #define atomic64_inc(v) atomic64_add(1, (v))
4375 #define atomic64_dec(v) atomic64_sub(1, (v))
4376
4377 +#define atomic64_read_unchecked(v) atomic64_read(v)
4378 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4379 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4380 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4381 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4382 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4383 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4384 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4385 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4386 +
4387 /* Atomic operations are already serializing */
4388 #define smp_mb__before_atomic_dec() barrier()
4389 #define smp_mb__after_atomic_dec() barrier()
4390 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4391 index 988254a..e1ee885 100644
4392 --- a/arch/ia64/include/asm/cache.h
4393 +++ b/arch/ia64/include/asm/cache.h
4394 @@ -1,6 +1,7 @@
4395 #ifndef _ASM_IA64_CACHE_H
4396 #define _ASM_IA64_CACHE_H
4397
4398 +#include <linux/const.h>
4399
4400 /*
4401 * Copyright (C) 1998-2000 Hewlett-Packard Co
4402 @@ -9,7 +10,7 @@
4403
4404 /* Bytes per L1 (data) cache line. */
4405 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4406 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4407 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4408
4409 #ifdef CONFIG_SMP
4410 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4411 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4412 index b5298eb..67c6e62 100644
4413 --- a/arch/ia64/include/asm/elf.h
4414 +++ b/arch/ia64/include/asm/elf.h
4415 @@ -42,6 +42,13 @@
4416 */
4417 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4418
4419 +#ifdef CONFIG_PAX_ASLR
4420 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4421 +
4422 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4423 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4424 +#endif
4425 +
4426 #define PT_IA_64_UNWIND 0x70000001
4427
4428 /* IA-64 relocations: */
4429 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4430 index 96a8d92..617a1cf 100644
4431 --- a/arch/ia64/include/asm/pgalloc.h
4432 +++ b/arch/ia64/include/asm/pgalloc.h
4433 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4434 pgd_val(*pgd_entry) = __pa(pud);
4435 }
4436
4437 +static inline void
4438 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4439 +{
4440 + pgd_populate(mm, pgd_entry, pud);
4441 +}
4442 +
4443 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4444 {
4445 return quicklist_alloc(0, GFP_KERNEL, NULL);
4446 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4447 pud_val(*pud_entry) = __pa(pmd);
4448 }
4449
4450 +static inline void
4451 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4452 +{
4453 + pud_populate(mm, pud_entry, pmd);
4454 +}
4455 +
4456 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4457 {
4458 return quicklist_alloc(0, GFP_KERNEL, NULL);
4459 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4460 index 815810c..d60bd4c 100644
4461 --- a/arch/ia64/include/asm/pgtable.h
4462 +++ b/arch/ia64/include/asm/pgtable.h
4463 @@ -12,7 +12,7 @@
4464 * David Mosberger-Tang <davidm@hpl.hp.com>
4465 */
4466
4467 -
4468 +#include <linux/const.h>
4469 #include <asm/mman.h>
4470 #include <asm/page.h>
4471 #include <asm/processor.h>
4472 @@ -142,6 +142,17 @@
4473 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4474 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4475 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4476 +
4477 +#ifdef CONFIG_PAX_PAGEEXEC
4478 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4479 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4480 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4481 +#else
4482 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4483 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4484 +# define PAGE_COPY_NOEXEC PAGE_COPY
4485 +#endif
4486 +
4487 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4488 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4489 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4490 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4491 index 54ff557..70c88b7 100644
4492 --- a/arch/ia64/include/asm/spinlock.h
4493 +++ b/arch/ia64/include/asm/spinlock.h
4494 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4495 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4496
4497 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4498 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4499 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4500 }
4501
4502 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4503 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4504 index 449c8c0..50cdf87 100644
4505 --- a/arch/ia64/include/asm/uaccess.h
4506 +++ b/arch/ia64/include/asm/uaccess.h
4507 @@ -42,6 +42,8 @@
4508 #include <asm/pgtable.h>
4509 #include <asm/io.h>
4510
4511 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4512 +
4513 /*
4514 * For historical reasons, the following macros are grossly misnamed:
4515 */
4516 @@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4517 static inline unsigned long
4518 __copy_to_user (void __user *to, const void *from, unsigned long count)
4519 {
4520 + if (count > INT_MAX)
4521 + return count;
4522 +
4523 + if (!__builtin_constant_p(count))
4524 + check_object_size(from, count, true);
4525 +
4526 return __copy_user(to, (__force void __user *) from, count);
4527 }
4528
4529 static inline unsigned long
4530 __copy_from_user (void *to, const void __user *from, unsigned long count)
4531 {
4532 + if (count > INT_MAX)
4533 + return count;
4534 +
4535 + if (!__builtin_constant_p(count))
4536 + check_object_size(to, count, false);
4537 +
4538 return __copy_user((__force void __user *) to, from, count);
4539 }
4540
4541 @@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4542 ({ \
4543 void __user *__cu_to = (to); \
4544 const void *__cu_from = (from); \
4545 - long __cu_len = (n); \
4546 + unsigned long __cu_len = (n); \
4547 \
4548 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
4549 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4550 + if (!__builtin_constant_p(n)) \
4551 + check_object_size(__cu_from, __cu_len, true); \
4552 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4553 + } \
4554 __cu_len; \
4555 })
4556
4557 @@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4558 ({ \
4559 void *__cu_to = (to); \
4560 const void __user *__cu_from = (from); \
4561 - long __cu_len = (n); \
4562 + unsigned long __cu_len = (n); \
4563 \
4564 __chk_user_ptr(__cu_from); \
4565 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
4566 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4567 + if (!__builtin_constant_p(n)) \
4568 + check_object_size(__cu_to, __cu_len, false); \
4569 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4570 + } \
4571 __cu_len; \
4572 })
4573
4574 diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4575 index 2d67317..07d8bfa 100644
4576 --- a/arch/ia64/kernel/err_inject.c
4577 +++ b/arch/ia64/kernel/err_inject.c
4578 @@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4579 return NOTIFY_OK;
4580 }
4581
4582 -static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4583 +static struct notifier_block err_inject_cpu_notifier =
4584 {
4585 .notifier_call = err_inject_cpu_callback,
4586 };
4587 diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4588 index 65bf9cd..794f06b 100644
4589 --- a/arch/ia64/kernel/mca.c
4590 +++ b/arch/ia64/kernel/mca.c
4591 @@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4592 return NOTIFY_OK;
4593 }
4594
4595 -static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4596 +static struct notifier_block mca_cpu_notifier = {
4597 .notifier_call = mca_cpu_callback
4598 };
4599
4600 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4601 index 24603be..948052d 100644
4602 --- a/arch/ia64/kernel/module.c
4603 +++ b/arch/ia64/kernel/module.c
4604 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4605 void
4606 module_free (struct module *mod, void *module_region)
4607 {
4608 - if (mod && mod->arch.init_unw_table &&
4609 - module_region == mod->module_init) {
4610 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4611 unw_remove_unwind_table(mod->arch.init_unw_table);
4612 mod->arch.init_unw_table = NULL;
4613 }
4614 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4615 }
4616
4617 static inline int
4618 +in_init_rx (const struct module *mod, uint64_t addr)
4619 +{
4620 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4621 +}
4622 +
4623 +static inline int
4624 +in_init_rw (const struct module *mod, uint64_t addr)
4625 +{
4626 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4627 +}
4628 +
4629 +static inline int
4630 in_init (const struct module *mod, uint64_t addr)
4631 {
4632 - return addr - (uint64_t) mod->module_init < mod->init_size;
4633 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4634 +}
4635 +
4636 +static inline int
4637 +in_core_rx (const struct module *mod, uint64_t addr)
4638 +{
4639 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4640 +}
4641 +
4642 +static inline int
4643 +in_core_rw (const struct module *mod, uint64_t addr)
4644 +{
4645 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4646 }
4647
4648 static inline int
4649 in_core (const struct module *mod, uint64_t addr)
4650 {
4651 - return addr - (uint64_t) mod->module_core < mod->core_size;
4652 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4653 }
4654
4655 static inline int
4656 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4657 break;
4658
4659 case RV_BDREL:
4660 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4661 + if (in_init_rx(mod, val))
4662 + val -= (uint64_t) mod->module_init_rx;
4663 + else if (in_init_rw(mod, val))
4664 + val -= (uint64_t) mod->module_init_rw;
4665 + else if (in_core_rx(mod, val))
4666 + val -= (uint64_t) mod->module_core_rx;
4667 + else if (in_core_rw(mod, val))
4668 + val -= (uint64_t) mod->module_core_rw;
4669 break;
4670
4671 case RV_LTV:
4672 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4673 * addresses have been selected...
4674 */
4675 uint64_t gp;
4676 - if (mod->core_size > MAX_LTOFF)
4677 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4678 /*
4679 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4680 * at the end of the module.
4681 */
4682 - gp = mod->core_size - MAX_LTOFF / 2;
4683 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4684 else
4685 - gp = mod->core_size / 2;
4686 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4687 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4688 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4689 mod->arch.gp = gp;
4690 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4691 }
4692 diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4693 index 77597e5..6f28f3f 100644
4694 --- a/arch/ia64/kernel/palinfo.c
4695 +++ b/arch/ia64/kernel/palinfo.c
4696 @@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4697 return NOTIFY_OK;
4698 }
4699
4700 -static struct notifier_block __refdata palinfo_cpu_notifier =
4701 +static struct notifier_block palinfo_cpu_notifier =
4702 {
4703 .notifier_call = palinfo_cpu_callback,
4704 .priority = 0,
4705 diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4706 index 79802e5..1a89ec5 100644
4707 --- a/arch/ia64/kernel/salinfo.c
4708 +++ b/arch/ia64/kernel/salinfo.c
4709 @@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4710 return NOTIFY_OK;
4711 }
4712
4713 -static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4714 +static struct notifier_block salinfo_cpu_notifier =
4715 {
4716 .notifier_call = salinfo_cpu_callback,
4717 .priority = 0,
4718 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4719 index d9439ef..d0cac6b 100644
4720 --- a/arch/ia64/kernel/sys_ia64.c
4721 +++ b/arch/ia64/kernel/sys_ia64.c
4722 @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4723 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
4724 struct mm_struct *mm = current->mm;
4725 struct vm_area_struct *vma;
4726 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4727
4728 if (len > RGN_MAP_LIMIT)
4729 return -ENOMEM;
4730 @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4731 if (REGION_NUMBER(addr) == RGN_HPAGE)
4732 addr = 0;
4733 #endif
4734 +
4735 +#ifdef CONFIG_PAX_RANDMMAP
4736 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4737 + addr = mm->free_area_cache;
4738 + else
4739 +#endif
4740 +
4741 if (!addr)
4742 addr = mm->free_area_cache;
4743
4744 @@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4745 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
4746 /* At this point: (!vma || addr < vma->vm_end). */
4747 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
4748 - if (start_addr != TASK_UNMAPPED_BASE) {
4749 + if (start_addr != mm->mmap_base) {
4750 /* Start a new search --- just in case we missed some holes. */
4751 - addr = TASK_UNMAPPED_BASE;
4752 + addr = mm->mmap_base;
4753 goto full_search;
4754 }
4755 return -ENOMEM;
4756 }
4757 - if (!vma || addr + len <= vma->vm_start) {
4758 + if (check_heap_stack_gap(vma, addr, len, offset)) {
4759 /* Remember the address where we stopped this search: */
4760 mm->free_area_cache = addr + len;
4761 return addr;
4762 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
4763 index dc00b2c..cce53c2 100644
4764 --- a/arch/ia64/kernel/topology.c
4765 +++ b/arch/ia64/kernel/topology.c
4766 @@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
4767 return NOTIFY_OK;
4768 }
4769
4770 -static struct notifier_block __cpuinitdata cache_cpu_notifier =
4771 +static struct notifier_block cache_cpu_notifier =
4772 {
4773 .notifier_call = cache_cpu_callback
4774 };
4775 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
4776 index 0ccb28f..8992469 100644
4777 --- a/arch/ia64/kernel/vmlinux.lds.S
4778 +++ b/arch/ia64/kernel/vmlinux.lds.S
4779 @@ -198,7 +198,7 @@ SECTIONS {
4780 /* Per-cpu data: */
4781 . = ALIGN(PERCPU_PAGE_SIZE);
4782 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
4783 - __phys_per_cpu_start = __per_cpu_load;
4784 + __phys_per_cpu_start = per_cpu_load;
4785 /*
4786 * ensure percpu data fits
4787 * into percpu page size
4788 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
4789 index 6cf0341..d352594 100644
4790 --- a/arch/ia64/mm/fault.c
4791 +++ b/arch/ia64/mm/fault.c
4792 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
4793 return pte_present(pte);
4794 }
4795
4796 +#ifdef CONFIG_PAX_PAGEEXEC
4797 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4798 +{
4799 + unsigned long i;
4800 +
4801 + printk(KERN_ERR "PAX: bytes at PC: ");
4802 + for (i = 0; i < 8; i++) {
4803 + unsigned int c;
4804 + if (get_user(c, (unsigned int *)pc+i))
4805 + printk(KERN_CONT "???????? ");
4806 + else
4807 + printk(KERN_CONT "%08x ", c);
4808 + }
4809 + printk("\n");
4810 +}
4811 +#endif
4812 +
4813 # define VM_READ_BIT 0
4814 # define VM_WRITE_BIT 1
4815 # define VM_EXEC_BIT 2
4816 @@ -149,8 +166,21 @@ retry:
4817 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
4818 goto bad_area;
4819
4820 - if ((vma->vm_flags & mask) != mask)
4821 + if ((vma->vm_flags & mask) != mask) {
4822 +
4823 +#ifdef CONFIG_PAX_PAGEEXEC
4824 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
4825 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
4826 + goto bad_area;
4827 +
4828 + up_read(&mm->mmap_sem);
4829 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
4830 + do_group_exit(SIGKILL);
4831 + }
4832 +#endif
4833 +
4834 goto bad_area;
4835 + }
4836
4837 /*
4838 * If for any reason at all we couldn't handle the fault, make
4839 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
4840 index 5ca674b..127c3cb 100644
4841 --- a/arch/ia64/mm/hugetlbpage.c
4842 +++ b/arch/ia64/mm/hugetlbpage.c
4843 @@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
4844 unsigned long pgoff, unsigned long flags)
4845 {
4846 struct vm_area_struct *vmm;
4847 + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
4848
4849 if (len > RGN_MAP_LIMIT)
4850 return -ENOMEM;
4851 @@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
4852 /* At this point: (!vmm || addr < vmm->vm_end). */
4853 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
4854 return -ENOMEM;
4855 - if (!vmm || (addr + len) <= vmm->vm_start)
4856 + if (check_heap_stack_gap(vmm, addr, len, offset))
4857 return addr;
4858 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
4859 }
4860 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
4861 index b755ea9..b9a969e 100644
4862 --- a/arch/ia64/mm/init.c
4863 +++ b/arch/ia64/mm/init.c
4864 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
4865 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
4866 vma->vm_end = vma->vm_start + PAGE_SIZE;
4867 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
4868 +
4869 +#ifdef CONFIG_PAX_PAGEEXEC
4870 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
4871 + vma->vm_flags &= ~VM_EXEC;
4872 +
4873 +#ifdef CONFIG_PAX_MPROTECT
4874 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
4875 + vma->vm_flags &= ~VM_MAYEXEC;
4876 +#endif
4877 +
4878 + }
4879 +#endif
4880 +
4881 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4882 down_write(&current->mm->mmap_sem);
4883 if (insert_vm_struct(current->mm, vma)) {
4884 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
4885 index 40b3ee9..8c2c112 100644
4886 --- a/arch/m32r/include/asm/cache.h
4887 +++ b/arch/m32r/include/asm/cache.h
4888 @@ -1,8 +1,10 @@
4889 #ifndef _ASM_M32R_CACHE_H
4890 #define _ASM_M32R_CACHE_H
4891
4892 +#include <linux/const.h>
4893 +
4894 /* L1 cache line size */
4895 #define L1_CACHE_SHIFT 4
4896 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4897 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4898
4899 #endif /* _ASM_M32R_CACHE_H */
4900 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
4901 index 82abd15..d95ae5d 100644
4902 --- a/arch/m32r/lib/usercopy.c
4903 +++ b/arch/m32r/lib/usercopy.c
4904 @@ -14,6 +14,9 @@
4905 unsigned long
4906 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
4907 {
4908 + if ((long)n < 0)
4909 + return n;
4910 +
4911 prefetch(from);
4912 if (access_ok(VERIFY_WRITE, to, n))
4913 __copy_user(to,from,n);
4914 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
4915 unsigned long
4916 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
4917 {
4918 + if ((long)n < 0)
4919 + return n;
4920 +
4921 prefetchw(to);
4922 if (access_ok(VERIFY_READ, from, n))
4923 __copy_user_zeroing(to,from,n);
4924 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
4925 index 0395c51..5f26031 100644
4926 --- a/arch/m68k/include/asm/cache.h
4927 +++ b/arch/m68k/include/asm/cache.h
4928 @@ -4,9 +4,11 @@
4929 #ifndef __ARCH_M68K_CACHE_H
4930 #define __ARCH_M68K_CACHE_H
4931
4932 +#include <linux/const.h>
4933 +
4934 /* bytes per L1 cache line */
4935 #define L1_CACHE_SHIFT 4
4936 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
4937 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4938
4939 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4940
4941 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
4942 index 4efe96a..60e8699 100644
4943 --- a/arch/microblaze/include/asm/cache.h
4944 +++ b/arch/microblaze/include/asm/cache.h
4945 @@ -13,11 +13,12 @@
4946 #ifndef _ASM_MICROBLAZE_CACHE_H
4947 #define _ASM_MICROBLAZE_CACHE_H
4948
4949 +#include <linux/const.h>
4950 #include <asm/registers.h>
4951
4952 #define L1_CACHE_SHIFT 5
4953 /* word-granular cache in microblaze */
4954 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4955 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4956
4957 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4958
4959 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
4960 index 01cc6ba..bcb7a5d 100644
4961 --- a/arch/mips/include/asm/atomic.h
4962 +++ b/arch/mips/include/asm/atomic.h
4963 @@ -21,6 +21,10 @@
4964 #include <asm/cmpxchg.h>
4965 #include <asm/war.h>
4966
4967 +#ifdef CONFIG_GENERIC_ATOMIC64
4968 +#include <asm-generic/atomic64.h>
4969 +#endif
4970 +
4971 #define ATOMIC_INIT(i) { (i) }
4972
4973 /*
4974 @@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
4975 */
4976 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
4977
4978 +#define atomic64_read_unchecked(v) atomic64_read(v)
4979 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4980 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4981 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4982 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4983 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4984 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4985 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4986 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4987 +
4988 #endif /* CONFIG_64BIT */
4989
4990 /*
4991 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
4992 index b4db69f..8f3b093 100644
4993 --- a/arch/mips/include/asm/cache.h
4994 +++ b/arch/mips/include/asm/cache.h
4995 @@ -9,10 +9,11 @@
4996 #ifndef _ASM_CACHE_H
4997 #define _ASM_CACHE_H
4998
4999 +#include <linux/const.h>
5000 #include <kmalloc.h>
5001
5002 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5003 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5004 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5005
5006 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5007 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5008 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5009 index 455c0ac..ad65fbe 100644
5010 --- a/arch/mips/include/asm/elf.h
5011 +++ b/arch/mips/include/asm/elf.h
5012 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
5013 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5014 #endif
5015
5016 +#ifdef CONFIG_PAX_ASLR
5017 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5018 +
5019 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5020 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5021 +#endif
5022 +
5023 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5024 struct linux_binprm;
5025 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5026 int uses_interp);
5027
5028 -struct mm_struct;
5029 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5030 -#define arch_randomize_brk arch_randomize_brk
5031 -
5032 #endif /* _ASM_ELF_H */
5033 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5034 index c1f6afa..38cc6e9 100644
5035 --- a/arch/mips/include/asm/exec.h
5036 +++ b/arch/mips/include/asm/exec.h
5037 @@ -12,6 +12,6 @@
5038 #ifndef _ASM_EXEC_H
5039 #define _ASM_EXEC_H
5040
5041 -extern unsigned long arch_align_stack(unsigned long sp);
5042 +#define arch_align_stack(x) ((x) & ~0xfUL)
5043
5044 #endif /* _ASM_EXEC_H */
5045 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5046 index dbaec94..6a14935 100644
5047 --- a/arch/mips/include/asm/page.h
5048 +++ b/arch/mips/include/asm/page.h
5049 @@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5050 #ifdef CONFIG_CPU_MIPS32
5051 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5052 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5053 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5054 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5055 #else
5056 typedef struct { unsigned long long pte; } pte_t;
5057 #define pte_val(x) ((x).pte)
5058 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5059 index 881d18b..cea38bc 100644
5060 --- a/arch/mips/include/asm/pgalloc.h
5061 +++ b/arch/mips/include/asm/pgalloc.h
5062 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5063 {
5064 set_pud(pud, __pud((unsigned long)pmd));
5065 }
5066 +
5067 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5068 +{
5069 + pud_populate(mm, pud, pmd);
5070 +}
5071 #endif
5072
5073 /*
5074 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5075 index b2050b9..d71bb1b 100644
5076 --- a/arch/mips/include/asm/thread_info.h
5077 +++ b/arch/mips/include/asm/thread_info.h
5078 @@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5079 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5080 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5081 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5082 +/* li takes a 32bit immediate */
5083 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5084 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5085
5086 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5087 @@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5088 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5089 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5090 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5091 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5092 +
5093 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5094
5095 /* work to do in syscall_trace_leave() */
5096 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5097 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5098
5099 /* work to do on interrupt/exception return */
5100 #define _TIF_WORK_MASK \
5101 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5102 /* work to do on any return to u-space */
5103 -#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5104 +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5105
5106 #endif /* __KERNEL__ */
5107
5108 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5109 index 9fdd8bc..4bd7f1a 100644
5110 --- a/arch/mips/kernel/binfmt_elfn32.c
5111 +++ b/arch/mips/kernel/binfmt_elfn32.c
5112 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5113 #undef ELF_ET_DYN_BASE
5114 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5115
5116 +#ifdef CONFIG_PAX_ASLR
5117 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5118 +
5119 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5120 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5121 +#endif
5122 +
5123 #include <asm/processor.h>
5124 #include <linux/module.h>
5125 #include <linux/elfcore.h>
5126 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5127 index ff44823..97f8906 100644
5128 --- a/arch/mips/kernel/binfmt_elfo32.c
5129 +++ b/arch/mips/kernel/binfmt_elfo32.c
5130 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5131 #undef ELF_ET_DYN_BASE
5132 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5133
5134 +#ifdef CONFIG_PAX_ASLR
5135 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5136 +
5137 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5138 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5139 +#endif
5140 +
5141 #include <asm/processor.h>
5142
5143 /*
5144 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5145 index a11c6f9..be5e164 100644
5146 --- a/arch/mips/kernel/process.c
5147 +++ b/arch/mips/kernel/process.c
5148 @@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5149 out:
5150 return pc;
5151 }
5152 -
5153 -/*
5154 - * Don't forget that the stack pointer must be aligned on a 8 bytes
5155 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5156 - */
5157 -unsigned long arch_align_stack(unsigned long sp)
5158 -{
5159 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5160 - sp -= get_random_int() & ~PAGE_MASK;
5161 -
5162 - return sp & ALMASK;
5163 -}
5164 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5165 index 4812c6d..2069554 100644
5166 --- a/arch/mips/kernel/ptrace.c
5167 +++ b/arch/mips/kernel/ptrace.c
5168 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
5169 return arch;
5170 }
5171
5172 +#ifdef CONFIG_GRKERNSEC_SETXID
5173 +extern void gr_delayed_cred_worker(void);
5174 +#endif
5175 +
5176 /*
5177 * Notification of system call entry/exit
5178 * - triggered by current->work.syscall_trace
5179 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5180 /* do the secure computing check first */
5181 secure_computing_strict(regs->regs[2]);
5182
5183 +#ifdef CONFIG_GRKERNSEC_SETXID
5184 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5185 + gr_delayed_cred_worker();
5186 +#endif
5187 +
5188 if (!(current->ptrace & PT_PTRACED))
5189 goto out;
5190
5191 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5192 index d20a4bc..7096ae5 100644
5193 --- a/arch/mips/kernel/scall32-o32.S
5194 +++ b/arch/mips/kernel/scall32-o32.S
5195 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5196
5197 stack_done:
5198 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5199 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5200 + li t1, _TIF_SYSCALL_WORK
5201 and t0, t1
5202 bnez t0, syscall_trace_entry # -> yes
5203
5204 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5205 index b64f642..0fe6eab 100644
5206 --- a/arch/mips/kernel/scall64-64.S
5207 +++ b/arch/mips/kernel/scall64-64.S
5208 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5209
5210 sd a3, PT_R26(sp) # save a3 for syscall restarting
5211
5212 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5213 + li t1, _TIF_SYSCALL_WORK
5214 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5215 and t0, t1, t0
5216 bnez t0, syscall_trace_entry
5217 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5218 index c29ac19..c592d05 100644
5219 --- a/arch/mips/kernel/scall64-n32.S
5220 +++ b/arch/mips/kernel/scall64-n32.S
5221 @@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5222
5223 sd a3, PT_R26(sp) # save a3 for syscall restarting
5224
5225 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5226 + li t1, _TIF_SYSCALL_WORK
5227 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5228 and t0, t1, t0
5229 bnez t0, n32_syscall_trace_entry
5230 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5231 index cf3e75e..72e93fe 100644
5232 --- a/arch/mips/kernel/scall64-o32.S
5233 +++ b/arch/mips/kernel/scall64-o32.S
5234 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5235 PTR 4b, bad_stack
5236 .previous
5237
5238 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5239 + li t1, _TIF_SYSCALL_WORK
5240 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5241 and t0, t1, t0
5242 bnez t0, trace_a_syscall
5243 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5244 index ddcec1e..c7f983e 100644
5245 --- a/arch/mips/mm/fault.c
5246 +++ b/arch/mips/mm/fault.c
5247 @@ -27,6 +27,23 @@
5248 #include <asm/highmem.h> /* For VMALLOC_END */
5249 #include <linux/kdebug.h>
5250
5251 +#ifdef CONFIG_PAX_PAGEEXEC
5252 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5253 +{
5254 + unsigned long i;
5255 +
5256 + printk(KERN_ERR "PAX: bytes at PC: ");
5257 + for (i = 0; i < 5; i++) {
5258 + unsigned int c;
5259 + if (get_user(c, (unsigned int *)pc+i))
5260 + printk(KERN_CONT "???????? ");
5261 + else
5262 + printk(KERN_CONT "%08x ", c);
5263 + }
5264 + printk("\n");
5265 +}
5266 +#endif
5267 +
5268 /*
5269 * This routine handles page faults. It determines the address,
5270 * and the problem, and then passes it off to one of the appropriate
5271 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5272 index 7e5fe27..479a219 100644
5273 --- a/arch/mips/mm/mmap.c
5274 +++ b/arch/mips/mm/mmap.c
5275 @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5276 struct vm_area_struct *vma;
5277 unsigned long addr = addr0;
5278 int do_color_align;
5279 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5280 struct vm_unmapped_area_info info;
5281
5282 if (unlikely(len > TASK_SIZE))
5283 @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5284 do_color_align = 1;
5285
5286 /* requesting a specific address */
5287 +
5288 +#ifdef CONFIG_PAX_RANDMMAP
5289 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5290 +#endif
5291 +
5292 if (addr) {
5293 if (do_color_align)
5294 addr = COLOUR_ALIGN(addr, pgoff);
5295 @@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5296 addr = PAGE_ALIGN(addr);
5297
5298 vma = find_vma(mm, addr);
5299 - if (TASK_SIZE - len >= addr &&
5300 - (!vma || addr + len <= vma->vm_start))
5301 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5302 return addr;
5303 }
5304
5305 @@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5306 {
5307 unsigned long random_factor = 0UL;
5308
5309 +#ifdef CONFIG_PAX_RANDMMAP
5310 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5311 +#endif
5312 +
5313 if (current->flags & PF_RANDOMIZE) {
5314 random_factor = get_random_int();
5315 random_factor = random_factor << PAGE_SHIFT;
5316 @@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5317
5318 if (mmap_is_legacy()) {
5319 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5320 +
5321 +#ifdef CONFIG_PAX_RANDMMAP
5322 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5323 + mm->mmap_base += mm->delta_mmap;
5324 +#endif
5325 +
5326 mm->get_unmapped_area = arch_get_unmapped_area;
5327 mm->unmap_area = arch_unmap_area;
5328 } else {
5329 mm->mmap_base = mmap_base(random_factor);
5330 +
5331 +#ifdef CONFIG_PAX_RANDMMAP
5332 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5333 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5334 +#endif
5335 +
5336 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5337 mm->unmap_area = arch_unmap_area_topdown;
5338 }
5339 }
5340
5341 -static inline unsigned long brk_rnd(void)
5342 -{
5343 - unsigned long rnd = get_random_int();
5344 -
5345 - rnd = rnd << PAGE_SHIFT;
5346 - /* 8MB for 32bit, 256MB for 64bit */
5347 - if (TASK_IS_32BIT_ADDR)
5348 - rnd = rnd & 0x7ffffful;
5349 - else
5350 - rnd = rnd & 0xffffffful;
5351 -
5352 - return rnd;
5353 -}
5354 -
5355 -unsigned long arch_randomize_brk(struct mm_struct *mm)
5356 -{
5357 - unsigned long base = mm->brk;
5358 - unsigned long ret;
5359 -
5360 - ret = PAGE_ALIGN(base + brk_rnd());
5361 -
5362 - if (ret < mm->brk)
5363 - return mm->brk;
5364 -
5365 - return ret;
5366 -}
5367 -
5368 int __virt_addr_valid(const volatile void *kaddr)
5369 {
5370 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5371 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5372 index 967d144..db12197 100644
5373 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5374 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5375 @@ -11,12 +11,14 @@
5376 #ifndef _ASM_PROC_CACHE_H
5377 #define _ASM_PROC_CACHE_H
5378
5379 +#include <linux/const.h>
5380 +
5381 /* L1 cache */
5382
5383 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5384 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5385 -#define L1_CACHE_BYTES 16 /* bytes per entry */
5386 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5387 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5388 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5389
5390 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5391 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5392 index bcb5df2..84fabd2 100644
5393 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5394 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5395 @@ -16,13 +16,15 @@
5396 #ifndef _ASM_PROC_CACHE_H
5397 #define _ASM_PROC_CACHE_H
5398
5399 +#include <linux/const.h>
5400 +
5401 /*
5402 * L1 cache
5403 */
5404 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5405 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5406 -#define L1_CACHE_BYTES 32 /* bytes per entry */
5407 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5408 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5409 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5410
5411 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5412 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5413 index 4ce7a01..449202a 100644
5414 --- a/arch/openrisc/include/asm/cache.h
5415 +++ b/arch/openrisc/include/asm/cache.h
5416 @@ -19,11 +19,13 @@
5417 #ifndef __ASM_OPENRISC_CACHE_H
5418 #define __ASM_OPENRISC_CACHE_H
5419
5420 +#include <linux/const.h>
5421 +
5422 /* FIXME: How can we replace these with values from the CPU...
5423 * they shouldn't be hard-coded!
5424 */
5425
5426 -#define L1_CACHE_BYTES 16
5427 #define L1_CACHE_SHIFT 4
5428 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5429
5430 #endif /* __ASM_OPENRISC_CACHE_H */
5431 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5432 index af9cf30..2aae9b2 100644
5433 --- a/arch/parisc/include/asm/atomic.h
5434 +++ b/arch/parisc/include/asm/atomic.h
5435 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5436
5437 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5438
5439 +#define atomic64_read_unchecked(v) atomic64_read(v)
5440 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5441 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5442 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5443 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5444 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5445 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5446 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5447 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5448 +
5449 #endif /* !CONFIG_64BIT */
5450
5451
5452 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5453 index 47f11c7..3420df2 100644
5454 --- a/arch/parisc/include/asm/cache.h
5455 +++ b/arch/parisc/include/asm/cache.h
5456 @@ -5,6 +5,7 @@
5457 #ifndef __ARCH_PARISC_CACHE_H
5458 #define __ARCH_PARISC_CACHE_H
5459
5460 +#include <linux/const.h>
5461
5462 /*
5463 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5464 @@ -15,13 +16,13 @@
5465 * just ruin performance.
5466 */
5467 #ifdef CONFIG_PA20
5468 -#define L1_CACHE_BYTES 64
5469 #define L1_CACHE_SHIFT 6
5470 #else
5471 -#define L1_CACHE_BYTES 32
5472 #define L1_CACHE_SHIFT 5
5473 #endif
5474
5475 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5476 +
5477 #ifndef __ASSEMBLY__
5478
5479 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5480 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5481 index 19f6cb1..6c78cf2 100644
5482 --- a/arch/parisc/include/asm/elf.h
5483 +++ b/arch/parisc/include/asm/elf.h
5484 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5485
5486 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5487
5488 +#ifdef CONFIG_PAX_ASLR
5489 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5490 +
5491 +#define PAX_DELTA_MMAP_LEN 16
5492 +#define PAX_DELTA_STACK_LEN 16
5493 +#endif
5494 +
5495 /* This yields a mask that user programs can use to figure out what
5496 instruction set this CPU supports. This could be done in user space,
5497 but it's not easy, and we've already done it here. */
5498 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5499 index fc987a1..6e068ef 100644
5500 --- a/arch/parisc/include/asm/pgalloc.h
5501 +++ b/arch/parisc/include/asm/pgalloc.h
5502 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5503 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5504 }
5505
5506 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5507 +{
5508 + pgd_populate(mm, pgd, pmd);
5509 +}
5510 +
5511 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5512 {
5513 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5514 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5515 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5516 #define pmd_free(mm, x) do { } while (0)
5517 #define pgd_populate(mm, pmd, pte) BUG()
5518 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
5519
5520 #endif
5521
5522 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5523 index 7df49fa..38b62bf 100644
5524 --- a/arch/parisc/include/asm/pgtable.h
5525 +++ b/arch/parisc/include/asm/pgtable.h
5526 @@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5527 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5528 #define PAGE_COPY PAGE_EXECREAD
5529 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5530 +
5531 +#ifdef CONFIG_PAX_PAGEEXEC
5532 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5533 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5534 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5535 +#else
5536 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5537 +# define PAGE_COPY_NOEXEC PAGE_COPY
5538 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5539 +#endif
5540 +
5541 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5542 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5543 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5544 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5545 index 4ba2c93..f5e3974 100644
5546 --- a/arch/parisc/include/asm/uaccess.h
5547 +++ b/arch/parisc/include/asm/uaccess.h
5548 @@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5549 const void __user *from,
5550 unsigned long n)
5551 {
5552 - int sz = __compiletime_object_size(to);
5553 + size_t sz = __compiletime_object_size(to);
5554 int ret = -EFAULT;
5555
5556 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5557 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5558 ret = __copy_from_user(to, from, n);
5559 else
5560 copy_from_user_overflow();
5561 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5562 index 2a625fb..9908930 100644
5563 --- a/arch/parisc/kernel/module.c
5564 +++ b/arch/parisc/kernel/module.c
5565 @@ -98,16 +98,38 @@
5566
5567 /* three functions to determine where in the module core
5568 * or init pieces the location is */
5569 +static inline int in_init_rx(struct module *me, void *loc)
5570 +{
5571 + return (loc >= me->module_init_rx &&
5572 + loc < (me->module_init_rx + me->init_size_rx));
5573 +}
5574 +
5575 +static inline int in_init_rw(struct module *me, void *loc)
5576 +{
5577 + return (loc >= me->module_init_rw &&
5578 + loc < (me->module_init_rw + me->init_size_rw));
5579 +}
5580 +
5581 static inline int in_init(struct module *me, void *loc)
5582 {
5583 - return (loc >= me->module_init &&
5584 - loc <= (me->module_init + me->init_size));
5585 + return in_init_rx(me, loc) || in_init_rw(me, loc);
5586 +}
5587 +
5588 +static inline int in_core_rx(struct module *me, void *loc)
5589 +{
5590 + return (loc >= me->module_core_rx &&
5591 + loc < (me->module_core_rx + me->core_size_rx));
5592 +}
5593 +
5594 +static inline int in_core_rw(struct module *me, void *loc)
5595 +{
5596 + return (loc >= me->module_core_rw &&
5597 + loc < (me->module_core_rw + me->core_size_rw));
5598 }
5599
5600 static inline int in_core(struct module *me, void *loc)
5601 {
5602 - return (loc >= me->module_core &&
5603 - loc <= (me->module_core + me->core_size));
5604 + return in_core_rx(me, loc) || in_core_rw(me, loc);
5605 }
5606
5607 static inline int in_local(struct module *me, void *loc)
5608 @@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5609 }
5610
5611 /* align things a bit */
5612 - me->core_size = ALIGN(me->core_size, 16);
5613 - me->arch.got_offset = me->core_size;
5614 - me->core_size += gots * sizeof(struct got_entry);
5615 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
5616 + me->arch.got_offset = me->core_size_rw;
5617 + me->core_size_rw += gots * sizeof(struct got_entry);
5618
5619 - me->core_size = ALIGN(me->core_size, 16);
5620 - me->arch.fdesc_offset = me->core_size;
5621 - me->core_size += fdescs * sizeof(Elf_Fdesc);
5622 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
5623 + me->arch.fdesc_offset = me->core_size_rw;
5624 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5625
5626 me->arch.got_max = gots;
5627 me->arch.fdesc_max = fdescs;
5628 @@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5629
5630 BUG_ON(value == 0);
5631
5632 - got = me->module_core + me->arch.got_offset;
5633 + got = me->module_core_rw + me->arch.got_offset;
5634 for (i = 0; got[i].addr; i++)
5635 if (got[i].addr == value)
5636 goto out;
5637 @@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5638 #ifdef CONFIG_64BIT
5639 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5640 {
5641 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5642 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5643
5644 if (!value) {
5645 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5646 @@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5647
5648 /* Create new one */
5649 fdesc->addr = value;
5650 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5651 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5652 return (Elf_Addr)fdesc;
5653 }
5654 #endif /* CONFIG_64BIT */
5655 @@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5656
5657 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5658 end = table + sechdrs[me->arch.unwind_section].sh_size;
5659 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5660 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5661
5662 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5663 me->arch.unwind_section, table, end, gp);
5664 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5665 index f76c108..92bad82 100644
5666 --- a/arch/parisc/kernel/sys_parisc.c
5667 +++ b/arch/parisc/kernel/sys_parisc.c
5668 @@ -33,9 +33,11 @@
5669 #include <linux/utsname.h>
5670 #include <linux/personality.h>
5671
5672 -static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5673 +static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5674 + unsigned long flags)
5675 {
5676 struct vm_area_struct *vma;
5677 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5678
5679 addr = PAGE_ALIGN(addr);
5680
5681 @@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5682 /* At this point: (!vma || addr < vma->vm_end). */
5683 if (TASK_SIZE - len < addr)
5684 return -ENOMEM;
5685 - if (!vma || addr + len <= vma->vm_start)
5686 + if (check_heap_stack_gap(vma, addr, len, offset))
5687 return addr;
5688 addr = vma->vm_end;
5689 }
5690 @@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
5691 return offset & 0x3FF000;
5692 }
5693
5694 -static unsigned long get_shared_area(struct address_space *mapping,
5695 - unsigned long addr, unsigned long len, unsigned long pgoff)
5696 +static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5697 + unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5698 {
5699 struct vm_area_struct *vma;
5700 int offset = mapping ? get_offset(mapping) : 0;
5701 + unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5702
5703 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
5704
5705 @@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5706 /* At this point: (!vma || addr < vma->vm_end). */
5707 if (TASK_SIZE - len < addr)
5708 return -ENOMEM;
5709 - if (!vma || addr + len <= vma->vm_start)
5710 + if (check_heap_stack_gap(vma, addr, len, rand_offset))
5711 return addr;
5712 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
5713 if (addr < vma->vm_end) /* handle wraparound */
5714 @@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5715 if (flags & MAP_FIXED)
5716 return addr;
5717 if (!addr)
5718 - addr = TASK_UNMAPPED_BASE;
5719 + addr = current->mm->mmap_base;
5720
5721 if (filp) {
5722 - addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
5723 + addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
5724 } else if(flags & MAP_SHARED) {
5725 - addr = get_shared_area(NULL, addr, len, pgoff);
5726 + addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
5727 } else {
5728 - addr = get_unshared_area(addr, len);
5729 + addr = get_unshared_area(filp, addr, len, flags);
5730 }
5731 return addr;
5732 }
5733 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
5734 index 45ba99f..8e22c33 100644
5735 --- a/arch/parisc/kernel/traps.c
5736 +++ b/arch/parisc/kernel/traps.c
5737 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
5738
5739 down_read(&current->mm->mmap_sem);
5740 vma = find_vma(current->mm,regs->iaoq[0]);
5741 - if (vma && (regs->iaoq[0] >= vma->vm_start)
5742 - && (vma->vm_flags & VM_EXEC)) {
5743 -
5744 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
5745 fault_address = regs->iaoq[0];
5746 fault_space = regs->iasq[0];
5747
5748 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
5749 index 18162ce..94de376 100644
5750 --- a/arch/parisc/mm/fault.c
5751 +++ b/arch/parisc/mm/fault.c
5752 @@ -15,6 +15,7 @@
5753 #include <linux/sched.h>
5754 #include <linux/interrupt.h>
5755 #include <linux/module.h>
5756 +#include <linux/unistd.h>
5757
5758 #include <asm/uaccess.h>
5759 #include <asm/traps.h>
5760 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
5761 static unsigned long
5762 parisc_acctyp(unsigned long code, unsigned int inst)
5763 {
5764 - if (code == 6 || code == 16)
5765 + if (code == 6 || code == 7 || code == 16)
5766 return VM_EXEC;
5767
5768 switch (inst & 0xf0000000) {
5769 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
5770 }
5771 #endif
5772
5773 +#ifdef CONFIG_PAX_PAGEEXEC
5774 +/*
5775 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
5776 + *
5777 + * returns 1 when task should be killed
5778 + * 2 when rt_sigreturn trampoline was detected
5779 + * 3 when unpatched PLT trampoline was detected
5780 + */
5781 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5782 +{
5783 +
5784 +#ifdef CONFIG_PAX_EMUPLT
5785 + int err;
5786 +
5787 + do { /* PaX: unpatched PLT emulation */
5788 + unsigned int bl, depwi;
5789 +
5790 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
5791 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
5792 +
5793 + if (err)
5794 + break;
5795 +
5796 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
5797 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
5798 +
5799 + err = get_user(ldw, (unsigned int *)addr);
5800 + err |= get_user(bv, (unsigned int *)(addr+4));
5801 + err |= get_user(ldw2, (unsigned int *)(addr+8));
5802 +
5803 + if (err)
5804 + break;
5805 +
5806 + if (ldw == 0x0E801096U &&
5807 + bv == 0xEAC0C000U &&
5808 + ldw2 == 0x0E881095U)
5809 + {
5810 + unsigned int resolver, map;
5811 +
5812 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
5813 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
5814 + if (err)
5815 + break;
5816 +
5817 + regs->gr[20] = instruction_pointer(regs)+8;
5818 + regs->gr[21] = map;
5819 + regs->gr[22] = resolver;
5820 + regs->iaoq[0] = resolver | 3UL;
5821 + regs->iaoq[1] = regs->iaoq[0] + 4;
5822 + return 3;
5823 + }
5824 + }
5825 + } while (0);
5826 +#endif
5827 +
5828 +#ifdef CONFIG_PAX_EMUTRAMP
5829 +
5830 +#ifndef CONFIG_PAX_EMUSIGRT
5831 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
5832 + return 1;
5833 +#endif
5834 +
5835 + do { /* PaX: rt_sigreturn emulation */
5836 + unsigned int ldi1, ldi2, bel, nop;
5837 +
5838 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
5839 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
5840 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
5841 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
5842 +
5843 + if (err)
5844 + break;
5845 +
5846 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
5847 + ldi2 == 0x3414015AU &&
5848 + bel == 0xE4008200U &&
5849 + nop == 0x08000240U)
5850 + {
5851 + regs->gr[25] = (ldi1 & 2) >> 1;
5852 + regs->gr[20] = __NR_rt_sigreturn;
5853 + regs->gr[31] = regs->iaoq[1] + 16;
5854 + regs->sr[0] = regs->iasq[1];
5855 + regs->iaoq[0] = 0x100UL;
5856 + regs->iaoq[1] = regs->iaoq[0] + 4;
5857 + regs->iasq[0] = regs->sr[2];
5858 + regs->iasq[1] = regs->sr[2];
5859 + return 2;
5860 + }
5861 + } while (0);
5862 +#endif
5863 +
5864 + return 1;
5865 +}
5866 +
5867 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5868 +{
5869 + unsigned long i;
5870 +
5871 + printk(KERN_ERR "PAX: bytes at PC: ");
5872 + for (i = 0; i < 5; i++) {
5873 + unsigned int c;
5874 + if (get_user(c, (unsigned int *)pc+i))
5875 + printk(KERN_CONT "???????? ");
5876 + else
5877 + printk(KERN_CONT "%08x ", c);
5878 + }
5879 + printk("\n");
5880 +}
5881 +#endif
5882 +
5883 int fixup_exception(struct pt_regs *regs)
5884 {
5885 const struct exception_table_entry *fix;
5886 @@ -192,8 +303,33 @@ good_area:
5887
5888 acc_type = parisc_acctyp(code,regs->iir);
5889
5890 - if ((vma->vm_flags & acc_type) != acc_type)
5891 + if ((vma->vm_flags & acc_type) != acc_type) {
5892 +
5893 +#ifdef CONFIG_PAX_PAGEEXEC
5894 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
5895 + (address & ~3UL) == instruction_pointer(regs))
5896 + {
5897 + up_read(&mm->mmap_sem);
5898 + switch (pax_handle_fetch_fault(regs)) {
5899 +
5900 +#ifdef CONFIG_PAX_EMUPLT
5901 + case 3:
5902 + return;
5903 +#endif
5904 +
5905 +#ifdef CONFIG_PAX_EMUTRAMP
5906 + case 2:
5907 + return;
5908 +#endif
5909 +
5910 + }
5911 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
5912 + do_group_exit(SIGKILL);
5913 + }
5914 +#endif
5915 +
5916 goto bad_area;
5917 + }
5918
5919 /*
5920 * If for any reason at all we couldn't handle the fault, make
5921 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
5922 index e3b1d41..8e81edf 100644
5923 --- a/arch/powerpc/include/asm/atomic.h
5924 +++ b/arch/powerpc/include/asm/atomic.h
5925 @@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
5926 return t1;
5927 }
5928
5929 +#define atomic64_read_unchecked(v) atomic64_read(v)
5930 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5931 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5932 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5933 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5934 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5935 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5936 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5937 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5938 +
5939 #endif /* __powerpc64__ */
5940
5941 #endif /* __KERNEL__ */
5942 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
5943 index 9e495c9..b6878e5 100644
5944 --- a/arch/powerpc/include/asm/cache.h
5945 +++ b/arch/powerpc/include/asm/cache.h
5946 @@ -3,6 +3,7 @@
5947
5948 #ifdef __KERNEL__
5949
5950 +#include <linux/const.h>
5951
5952 /* bytes per L1 cache line */
5953 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
5954 @@ -22,7 +23,7 @@
5955 #define L1_CACHE_SHIFT 7
5956 #endif
5957
5958 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5959 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5960
5961 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5962
5963 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
5964 index 6abf0a1..459d0f1 100644
5965 --- a/arch/powerpc/include/asm/elf.h
5966 +++ b/arch/powerpc/include/asm/elf.h
5967 @@ -28,8 +28,19 @@
5968 the loader. We need to make sure that it is out of the way of the program
5969 that it will "exec", and that there is sufficient room for the brk. */
5970
5971 -extern unsigned long randomize_et_dyn(unsigned long base);
5972 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
5973 +#define ELF_ET_DYN_BASE (0x20000000)
5974 +
5975 +#ifdef CONFIG_PAX_ASLR
5976 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
5977 +
5978 +#ifdef __powerpc64__
5979 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
5980 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
5981 +#else
5982 +#define PAX_DELTA_MMAP_LEN 15
5983 +#define PAX_DELTA_STACK_LEN 15
5984 +#endif
5985 +#endif
5986
5987 /*
5988 * Our registers are always unsigned longs, whether we're a 32 bit
5989 @@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5990 (0x7ff >> (PAGE_SHIFT - 12)) : \
5991 (0x3ffff >> (PAGE_SHIFT - 12)))
5992
5993 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5994 -#define arch_randomize_brk arch_randomize_brk
5995 -
5996 -
5997 #ifdef CONFIG_SPU_BASE
5998 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
5999 #define NT_SPU 1
6000 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6001 index 8196e9c..d83a9f3 100644
6002 --- a/arch/powerpc/include/asm/exec.h
6003 +++ b/arch/powerpc/include/asm/exec.h
6004 @@ -4,6 +4,6 @@
6005 #ifndef _ASM_POWERPC_EXEC_H
6006 #define _ASM_POWERPC_EXEC_H
6007
6008 -extern unsigned long arch_align_stack(unsigned long sp);
6009 +#define arch_align_stack(x) ((x) & ~0xfUL)
6010
6011 #endif /* _ASM_POWERPC_EXEC_H */
6012 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6013 index 5acabbd..7ea14fa 100644
6014 --- a/arch/powerpc/include/asm/kmap_types.h
6015 +++ b/arch/powerpc/include/asm/kmap_types.h
6016 @@ -10,7 +10,7 @@
6017 * 2 of the License, or (at your option) any later version.
6018 */
6019
6020 -#define KM_TYPE_NR 16
6021 +#define KM_TYPE_NR 17
6022
6023 #endif /* __KERNEL__ */
6024 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6025 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6026 index 8565c25..2865190 100644
6027 --- a/arch/powerpc/include/asm/mman.h
6028 +++ b/arch/powerpc/include/asm/mman.h
6029 @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6030 }
6031 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6032
6033 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6034 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6035 {
6036 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6037 }
6038 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6039 index f072e97..b436dee 100644
6040 --- a/arch/powerpc/include/asm/page.h
6041 +++ b/arch/powerpc/include/asm/page.h
6042 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6043 * and needs to be executable. This means the whole heap ends
6044 * up being executable.
6045 */
6046 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6047 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6048 +#define VM_DATA_DEFAULT_FLAGS32 \
6049 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6050 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6051
6052 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6053 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6054 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6055 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6056 #endif
6057
6058 +#define ktla_ktva(addr) (addr)
6059 +#define ktva_ktla(addr) (addr)
6060 +
6061 /*
6062 * Use the top bit of the higher-level page table entries to indicate whether
6063 * the entries we point to contain hugepages. This works because we know that
6064 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6065 index cd915d6..c10cee8 100644
6066 --- a/arch/powerpc/include/asm/page_64.h
6067 +++ b/arch/powerpc/include/asm/page_64.h
6068 @@ -154,15 +154,18 @@ do { \
6069 * stack by default, so in the absence of a PT_GNU_STACK program header
6070 * we turn execute permission off.
6071 */
6072 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6073 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6074 +#define VM_STACK_DEFAULT_FLAGS32 \
6075 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6076 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6077
6078 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6079 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6080
6081 +#ifndef CONFIG_PAX_PAGEEXEC
6082 #define VM_STACK_DEFAULT_FLAGS \
6083 (is_32bit_task() ? \
6084 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6085 +#endif
6086
6087 #include <asm-generic/getorder.h>
6088
6089 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6090 index 292725c..f87ae14 100644
6091 --- a/arch/powerpc/include/asm/pgalloc-64.h
6092 +++ b/arch/powerpc/include/asm/pgalloc-64.h
6093 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6094 #ifndef CONFIG_PPC_64K_PAGES
6095
6096 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6097 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6098
6099 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6100 {
6101 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6102 pud_set(pud, (unsigned long)pmd);
6103 }
6104
6105 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6106 +{
6107 + pud_populate(mm, pud, pmd);
6108 +}
6109 +
6110 #define pmd_populate(mm, pmd, pte_page) \
6111 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6112 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6113 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6114 #else /* CONFIG_PPC_64K_PAGES */
6115
6116 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6117 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6118
6119 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6120 pte_t *pte)
6121 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6122 index a9cbd3b..3b67efa 100644
6123 --- a/arch/powerpc/include/asm/pgtable.h
6124 +++ b/arch/powerpc/include/asm/pgtable.h
6125 @@ -2,6 +2,7 @@
6126 #define _ASM_POWERPC_PGTABLE_H
6127 #ifdef __KERNEL__
6128
6129 +#include <linux/const.h>
6130 #ifndef __ASSEMBLY__
6131 #include <asm/processor.h> /* For TASK_SIZE */
6132 #include <asm/mmu.h>
6133 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6134 index 4aad413..85d86bf 100644
6135 --- a/arch/powerpc/include/asm/pte-hash32.h
6136 +++ b/arch/powerpc/include/asm/pte-hash32.h
6137 @@ -21,6 +21,7 @@
6138 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6139 #define _PAGE_USER 0x004 /* usermode access allowed */
6140 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6141 +#define _PAGE_EXEC _PAGE_GUARDED
6142 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6143 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6144 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6145 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6146 index 3d5c9dc..62f8414 100644
6147 --- a/arch/powerpc/include/asm/reg.h
6148 +++ b/arch/powerpc/include/asm/reg.h
6149 @@ -215,6 +215,7 @@
6150 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6151 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6152 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6153 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6154 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6155 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6156 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6157 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6158 index 406b7b9..af63426 100644
6159 --- a/arch/powerpc/include/asm/thread_info.h
6160 +++ b/arch/powerpc/include/asm/thread_info.h
6161 @@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6162 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6163 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6164 #define TIF_SINGLESTEP 8 /* singlestepping active */
6165 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6166 #define TIF_SECCOMP 10 /* secure computing */
6167 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6168 #define TIF_NOERROR 12 /* Force successful syscall return */
6169 @@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6170 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6171 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6172 for stack store? */
6173 +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6174 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6175 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6176
6177 /* as above, but as bit values */
6178 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6179 @@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6180 #define _TIF_UPROBE (1<<TIF_UPROBE)
6181 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6182 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6183 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6184 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6185 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6186 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6187 + _TIF_GRSEC_SETXID)
6188
6189 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6190 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6191 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6192 index 4db4959..335e00c 100644
6193 --- a/arch/powerpc/include/asm/uaccess.h
6194 +++ b/arch/powerpc/include/asm/uaccess.h
6195 @@ -13,6 +13,8 @@
6196 #define VERIFY_READ 0
6197 #define VERIFY_WRITE 1
6198
6199 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
6200 +
6201 /*
6202 * The fs value determines whether argument validity checking should be
6203 * performed or not. If get_fs() == USER_DS, checking is performed, with
6204 @@ -318,52 +320,6 @@ do { \
6205 extern unsigned long __copy_tofrom_user(void __user *to,
6206 const void __user *from, unsigned long size);
6207
6208 -#ifndef __powerpc64__
6209 -
6210 -static inline unsigned long copy_from_user(void *to,
6211 - const void __user *from, unsigned long n)
6212 -{
6213 - unsigned long over;
6214 -
6215 - if (access_ok(VERIFY_READ, from, n))
6216 - return __copy_tofrom_user((__force void __user *)to, from, n);
6217 - if ((unsigned long)from < TASK_SIZE) {
6218 - over = (unsigned long)from + n - TASK_SIZE;
6219 - return __copy_tofrom_user((__force void __user *)to, from,
6220 - n - over) + over;
6221 - }
6222 - return n;
6223 -}
6224 -
6225 -static inline unsigned long copy_to_user(void __user *to,
6226 - const void *from, unsigned long n)
6227 -{
6228 - unsigned long over;
6229 -
6230 - if (access_ok(VERIFY_WRITE, to, n))
6231 - return __copy_tofrom_user(to, (__force void __user *)from, n);
6232 - if ((unsigned long)to < TASK_SIZE) {
6233 - over = (unsigned long)to + n - TASK_SIZE;
6234 - return __copy_tofrom_user(to, (__force void __user *)from,
6235 - n - over) + over;
6236 - }
6237 - return n;
6238 -}
6239 -
6240 -#else /* __powerpc64__ */
6241 -
6242 -#define __copy_in_user(to, from, size) \
6243 - __copy_tofrom_user((to), (from), (size))
6244 -
6245 -extern unsigned long copy_from_user(void *to, const void __user *from,
6246 - unsigned long n);
6247 -extern unsigned long copy_to_user(void __user *to, const void *from,
6248 - unsigned long n);
6249 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
6250 - unsigned long n);
6251 -
6252 -#endif /* __powerpc64__ */
6253 -
6254 static inline unsigned long __copy_from_user_inatomic(void *to,
6255 const void __user *from, unsigned long n)
6256 {
6257 @@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6258 if (ret == 0)
6259 return 0;
6260 }
6261 +
6262 + if (!__builtin_constant_p(n))
6263 + check_object_size(to, n, false);
6264 +
6265 return __copy_tofrom_user((__force void __user *)to, from, n);
6266 }
6267
6268 @@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6269 if (ret == 0)
6270 return 0;
6271 }
6272 +
6273 + if (!__builtin_constant_p(n))
6274 + check_object_size(from, n, true);
6275 +
6276 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6277 }
6278
6279 @@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6280 return __copy_to_user_inatomic(to, from, size);
6281 }
6282
6283 +#ifndef __powerpc64__
6284 +
6285 +static inline unsigned long __must_check copy_from_user(void *to,
6286 + const void __user *from, unsigned long n)
6287 +{
6288 + unsigned long over;
6289 +
6290 + if ((long)n < 0)
6291 + return n;
6292 +
6293 + if (access_ok(VERIFY_READ, from, n)) {
6294 + if (!__builtin_constant_p(n))
6295 + check_object_size(to, n, false);
6296 + return __copy_tofrom_user((__force void __user *)to, from, n);
6297 + }
6298 + if ((unsigned long)from < TASK_SIZE) {
6299 + over = (unsigned long)from + n - TASK_SIZE;
6300 + if (!__builtin_constant_p(n - over))
6301 + check_object_size(to, n - over, false);
6302 + return __copy_tofrom_user((__force void __user *)to, from,
6303 + n - over) + over;
6304 + }
6305 + return n;
6306 +}
6307 +
6308 +static inline unsigned long __must_check copy_to_user(void __user *to,
6309 + const void *from, unsigned long n)
6310 +{
6311 + unsigned long over;
6312 +
6313 + if ((long)n < 0)
6314 + return n;
6315 +
6316 + if (access_ok(VERIFY_WRITE, to, n)) {
6317 + if (!__builtin_constant_p(n))
6318 + check_object_size(from, n, true);
6319 + return __copy_tofrom_user(to, (__force void __user *)from, n);
6320 + }
6321 + if ((unsigned long)to < TASK_SIZE) {
6322 + over = (unsigned long)to + n - TASK_SIZE;
6323 + if (!__builtin_constant_p(n))
6324 + check_object_size(from, n - over, true);
6325 + return __copy_tofrom_user(to, (__force void __user *)from,
6326 + n - over) + over;
6327 + }
6328 + return n;
6329 +}
6330 +
6331 +#else /* __powerpc64__ */
6332 +
6333 +#define __copy_in_user(to, from, size) \
6334 + __copy_tofrom_user((to), (from), (size))
6335 +
6336 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6337 +{
6338 + if ((long)n < 0 || n > INT_MAX)
6339 + return n;
6340 +
6341 + if (!__builtin_constant_p(n))
6342 + check_object_size(to, n, false);
6343 +
6344 + if (likely(access_ok(VERIFY_READ, from, n)))
6345 + n = __copy_from_user(to, from, n);
6346 + else
6347 + memset(to, 0, n);
6348 + return n;
6349 +}
6350 +
6351 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6352 +{
6353 + if ((long)n < 0 || n > INT_MAX)
6354 + return n;
6355 +
6356 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
6357 + if (!__builtin_constant_p(n))
6358 + check_object_size(from, n, true);
6359 + n = __copy_to_user(to, from, n);
6360 + }
6361 + return n;
6362 +}
6363 +
6364 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
6365 + unsigned long n);
6366 +
6367 +#endif /* __powerpc64__ */
6368 +
6369 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6370
6371 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6372 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6373 index 4684e33..acc4d19e 100644
6374 --- a/arch/powerpc/kernel/exceptions-64e.S
6375 +++ b/arch/powerpc/kernel/exceptions-64e.S
6376 @@ -715,6 +715,7 @@ storage_fault_common:
6377 std r14,_DAR(r1)
6378 std r15,_DSISR(r1)
6379 addi r3,r1,STACK_FRAME_OVERHEAD
6380 + bl .save_nvgprs
6381 mr r4,r14
6382 mr r5,r15
6383 ld r14,PACA_EXGEN+EX_R14(r13)
6384 @@ -723,8 +724,7 @@ storage_fault_common:
6385 cmpdi r3,0
6386 bne- 1f
6387 b .ret_from_except_lite
6388 -1: bl .save_nvgprs
6389 - mr r5,r3
6390 +1: mr r5,r3
6391 addi r3,r1,STACK_FRAME_OVERHEAD
6392 ld r4,_DAR(r1)
6393 bl .bad_page_fault
6394 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6395 index 4665e82..080ea99 100644
6396 --- a/arch/powerpc/kernel/exceptions-64s.S
6397 +++ b/arch/powerpc/kernel/exceptions-64s.S
6398 @@ -1206,10 +1206,10 @@ handle_page_fault:
6399 11: ld r4,_DAR(r1)
6400 ld r5,_DSISR(r1)
6401 addi r3,r1,STACK_FRAME_OVERHEAD
6402 + bl .save_nvgprs
6403 bl .do_page_fault
6404 cmpdi r3,0
6405 beq+ 12f
6406 - bl .save_nvgprs
6407 mr r5,r3
6408 addi r3,r1,STACK_FRAME_OVERHEAD
6409 lwz r4,_DAR(r1)
6410 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6411 index 2e3200c..72095ce 100644
6412 --- a/arch/powerpc/kernel/module_32.c
6413 +++ b/arch/powerpc/kernel/module_32.c
6414 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6415 me->arch.core_plt_section = i;
6416 }
6417 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6418 - printk("Module doesn't contain .plt or .init.plt sections.\n");
6419 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6420 return -ENOEXEC;
6421 }
6422
6423 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6424
6425 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6426 /* Init, or core PLT? */
6427 - if (location >= mod->module_core
6428 - && location < mod->module_core + mod->core_size)
6429 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6430 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6431 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6432 - else
6433 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6434 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6435 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6436 + else {
6437 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6438 + return ~0UL;
6439 + }
6440
6441 /* Find this entry, or if that fails, the next avail. entry */
6442 while (entry->jump[0]) {
6443 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6444 index 8143067..21ae55b 100644
6445 --- a/arch/powerpc/kernel/process.c
6446 +++ b/arch/powerpc/kernel/process.c
6447 @@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6448 * Lookup NIP late so we have the best change of getting the
6449 * above info out without failing
6450 */
6451 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6452 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6453 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6454 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6455 #endif
6456 show_stack(current, (unsigned long *) regs->gpr[1]);
6457 if (!user_mode(regs))
6458 @@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6459 newsp = stack[0];
6460 ip = stack[STACK_FRAME_LR_SAVE];
6461 if (!firstframe || ip != lr) {
6462 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6463 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6464 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6465 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6466 - printk(" (%pS)",
6467 + printk(" (%pA)",
6468 (void *)current->ret_stack[curr_frame].ret);
6469 curr_frame--;
6470 }
6471 @@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6472 struct pt_regs *regs = (struct pt_regs *)
6473 (sp + STACK_FRAME_OVERHEAD);
6474 lr = regs->link;
6475 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
6476 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
6477 regs->trap, (void *)regs->nip, (void *)lr);
6478 firstframe = 1;
6479 }
6480 @@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6481 mtspr(SPRN_CTRLT, ctrl);
6482 }
6483 #endif /* CONFIG_PPC64 */
6484 -
6485 -unsigned long arch_align_stack(unsigned long sp)
6486 -{
6487 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6488 - sp -= get_random_int() & ~PAGE_MASK;
6489 - return sp & ~0xf;
6490 -}
6491 -
6492 -static inline unsigned long brk_rnd(void)
6493 -{
6494 - unsigned long rnd = 0;
6495 -
6496 - /* 8MB for 32bit, 1GB for 64bit */
6497 - if (is_32bit_task())
6498 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6499 - else
6500 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6501 -
6502 - return rnd << PAGE_SHIFT;
6503 -}
6504 -
6505 -unsigned long arch_randomize_brk(struct mm_struct *mm)
6506 -{
6507 - unsigned long base = mm->brk;
6508 - unsigned long ret;
6509 -
6510 -#ifdef CONFIG_PPC_STD_MMU_64
6511 - /*
6512 - * If we are using 1TB segments and we are allowed to randomise
6513 - * the heap, we can put it above 1TB so it is backed by a 1TB
6514 - * segment. Otherwise the heap will be in the bottom 1TB
6515 - * which always uses 256MB segments and this may result in a
6516 - * performance penalty.
6517 - */
6518 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6519 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6520 -#endif
6521 -
6522 - ret = PAGE_ALIGN(base + brk_rnd());
6523 -
6524 - if (ret < mm->brk)
6525 - return mm->brk;
6526 -
6527 - return ret;
6528 -}
6529 -
6530 -unsigned long randomize_et_dyn(unsigned long base)
6531 -{
6532 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6533 -
6534 - if (ret < base)
6535 - return base;
6536 -
6537 - return ret;
6538 -}
6539 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6540 index c497000..8fde506 100644
6541 --- a/arch/powerpc/kernel/ptrace.c
6542 +++ b/arch/powerpc/kernel/ptrace.c
6543 @@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
6544 return ret;
6545 }
6546
6547 +#ifdef CONFIG_GRKERNSEC_SETXID
6548 +extern void gr_delayed_cred_worker(void);
6549 +#endif
6550 +
6551 /*
6552 * We must return the syscall number to actually look up in the table.
6553 * This can be -1L to skip running any syscall at all.
6554 @@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6555
6556 secure_computing_strict(regs->gpr[0]);
6557
6558 +#ifdef CONFIG_GRKERNSEC_SETXID
6559 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6560 + gr_delayed_cred_worker();
6561 +#endif
6562 +
6563 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6564 tracehook_report_syscall_entry(regs))
6565 /*
6566 @@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6567 {
6568 int step;
6569
6570 +#ifdef CONFIG_GRKERNSEC_SETXID
6571 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6572 + gr_delayed_cred_worker();
6573 +#endif
6574 +
6575 audit_syscall_exit(regs);
6576
6577 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6578 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6579 index 804e323..79181c1 100644
6580 --- a/arch/powerpc/kernel/signal_32.c
6581 +++ b/arch/powerpc/kernel/signal_32.c
6582 @@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6583 /* Save user registers on the stack */
6584 frame = &rt_sf->uc.uc_mcontext;
6585 addr = frame;
6586 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6587 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6588 if (save_user_regs(regs, frame, 0, 1))
6589 goto badframe;
6590 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6591 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6592 index 1ca045d..139c3f7 100644
6593 --- a/arch/powerpc/kernel/signal_64.c
6594 +++ b/arch/powerpc/kernel/signal_64.c
6595 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6596 current->thread.fpscr.val = 0;
6597
6598 /* Set up to return from userspace. */
6599 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6600 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6601 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6602 } else {
6603 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6604 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6605 index 3ce1f86..c30e629 100644
6606 --- a/arch/powerpc/kernel/sysfs.c
6607 +++ b/arch/powerpc/kernel/sysfs.c
6608 @@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6609 return NOTIFY_OK;
6610 }
6611
6612 -static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6613 +static struct notifier_block sysfs_cpu_nb = {
6614 .notifier_call = sysfs_cpu_notify,
6615 };
6616
6617 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6618 index 3251840..3f7c77a 100644
6619 --- a/arch/powerpc/kernel/traps.c
6620 +++ b/arch/powerpc/kernel/traps.c
6621 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6622 return flags;
6623 }
6624
6625 +extern void gr_handle_kernel_exploit(void);
6626 +
6627 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6628 int signr)
6629 {
6630 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6631 panic("Fatal exception in interrupt");
6632 if (panic_on_oops)
6633 panic("Fatal exception");
6634 +
6635 + gr_handle_kernel_exploit();
6636 +
6637 do_exit(signr);
6638 }
6639
6640 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6641 index 1b2076f..835e4be 100644
6642 --- a/arch/powerpc/kernel/vdso.c
6643 +++ b/arch/powerpc/kernel/vdso.c
6644 @@ -34,6 +34,7 @@
6645 #include <asm/firmware.h>
6646 #include <asm/vdso.h>
6647 #include <asm/vdso_datapage.h>
6648 +#include <asm/mman.h>
6649
6650 #include "setup.h"
6651
6652 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6653 vdso_base = VDSO32_MBASE;
6654 #endif
6655
6656 - current->mm->context.vdso_base = 0;
6657 + current->mm->context.vdso_base = ~0UL;
6658
6659 /* vDSO has a problem and was disabled, just don't "enable" it for the
6660 * process
6661 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6662 vdso_base = get_unmapped_area(NULL, vdso_base,
6663 (vdso_pages << PAGE_SHIFT) +
6664 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6665 - 0, 0);
6666 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
6667 if (IS_ERR_VALUE(vdso_base)) {
6668 rc = vdso_base;
6669 goto fail_mmapsem;
6670 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6671 index 5eea6f3..5d10396 100644
6672 --- a/arch/powerpc/lib/usercopy_64.c
6673 +++ b/arch/powerpc/lib/usercopy_64.c
6674 @@ -9,22 +9,6 @@
6675 #include <linux/module.h>
6676 #include <asm/uaccess.h>
6677
6678 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6679 -{
6680 - if (likely(access_ok(VERIFY_READ, from, n)))
6681 - n = __copy_from_user(to, from, n);
6682 - else
6683 - memset(to, 0, n);
6684 - return n;
6685 -}
6686 -
6687 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6688 -{
6689 - if (likely(access_ok(VERIFY_WRITE, to, n)))
6690 - n = __copy_to_user(to, from, n);
6691 - return n;
6692 -}
6693 -
6694 unsigned long copy_in_user(void __user *to, const void __user *from,
6695 unsigned long n)
6696 {
6697 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6698 return n;
6699 }
6700
6701 -EXPORT_SYMBOL(copy_from_user);
6702 -EXPORT_SYMBOL(copy_to_user);
6703 EXPORT_SYMBOL(copy_in_user);
6704
6705 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6706 index 3a8489a..6a63b3b 100644
6707 --- a/arch/powerpc/mm/fault.c
6708 +++ b/arch/powerpc/mm/fault.c
6709 @@ -32,6 +32,10 @@
6710 #include <linux/perf_event.h>
6711 #include <linux/magic.h>
6712 #include <linux/ratelimit.h>
6713 +#include <linux/slab.h>
6714 +#include <linux/pagemap.h>
6715 +#include <linux/compiler.h>
6716 +#include <linux/unistd.h>
6717
6718 #include <asm/firmware.h>
6719 #include <asm/page.h>
6720 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
6721 }
6722 #endif
6723
6724 +#ifdef CONFIG_PAX_PAGEEXEC
6725 +/*
6726 + * PaX: decide what to do with offenders (regs->nip = fault address)
6727 + *
6728 + * returns 1 when task should be killed
6729 + */
6730 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6731 +{
6732 + return 1;
6733 +}
6734 +
6735 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6736 +{
6737 + unsigned long i;
6738 +
6739 + printk(KERN_ERR "PAX: bytes at PC: ");
6740 + for (i = 0; i < 5; i++) {
6741 + unsigned int c;
6742 + if (get_user(c, (unsigned int __user *)pc+i))
6743 + printk(KERN_CONT "???????? ");
6744 + else
6745 + printk(KERN_CONT "%08x ", c);
6746 + }
6747 + printk("\n");
6748 +}
6749 +#endif
6750 +
6751 /*
6752 * Check whether the instruction at regs->nip is a store using
6753 * an update addressing form which will update r1.
6754 @@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
6755 * indicate errors in DSISR but can validly be set in SRR1.
6756 */
6757 if (trap == 0x400)
6758 - error_code &= 0x48200000;
6759 + error_code &= 0x58200000;
6760 else
6761 is_write = error_code & DSISR_ISSTORE;
6762 #else
6763 @@ -364,7 +395,7 @@ good_area:
6764 * "undefined". Of those that can be set, this is the only
6765 * one which seems bad.
6766 */
6767 - if (error_code & 0x10000000)
6768 + if (error_code & DSISR_GUARDED)
6769 /* Guarded storage error. */
6770 goto bad_area;
6771 #endif /* CONFIG_8xx */
6772 @@ -379,7 +410,7 @@ good_area:
6773 * processors use the same I/D cache coherency mechanism
6774 * as embedded.
6775 */
6776 - if (error_code & DSISR_PROTFAULT)
6777 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
6778 goto bad_area;
6779 #endif /* CONFIG_PPC_STD_MMU */
6780
6781 @@ -462,6 +493,23 @@ bad_area:
6782 bad_area_nosemaphore:
6783 /* User mode accesses cause a SIGSEGV */
6784 if (user_mode(regs)) {
6785 +
6786 +#ifdef CONFIG_PAX_PAGEEXEC
6787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
6788 +#ifdef CONFIG_PPC_STD_MMU
6789 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
6790 +#else
6791 + if (is_exec && regs->nip == address) {
6792 +#endif
6793 + switch (pax_handle_fetch_fault(regs)) {
6794 + }
6795 +
6796 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
6797 + do_group_exit(SIGKILL);
6798 + }
6799 + }
6800 +#endif
6801 +
6802 _exception(SIGSEGV, regs, code, address);
6803 return 0;
6804 }
6805 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
6806 index 67a42ed..cd463e0 100644
6807 --- a/arch/powerpc/mm/mmap_64.c
6808 +++ b/arch/powerpc/mm/mmap_64.c
6809 @@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
6810 {
6811 unsigned long rnd = 0;
6812
6813 +#ifdef CONFIG_PAX_RANDMMAP
6814 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6815 +#endif
6816 +
6817 if (current->flags & PF_RANDOMIZE) {
6818 /* 8MB for 32bit, 1GB for 64bit */
6819 if (is_32bit_task())
6820 @@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6821 */
6822 if (mmap_is_legacy()) {
6823 mm->mmap_base = TASK_UNMAPPED_BASE;
6824 +
6825 +#ifdef CONFIG_PAX_RANDMMAP
6826 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6827 + mm->mmap_base += mm->delta_mmap;
6828 +#endif
6829 +
6830 mm->get_unmapped_area = arch_get_unmapped_area;
6831 mm->unmap_area = arch_unmap_area;
6832 } else {
6833 mm->mmap_base = mmap_base();
6834 +
6835 +#ifdef CONFIG_PAX_RANDMMAP
6836 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6837 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6838 +#endif
6839 +
6840 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6841 mm->unmap_area = arch_unmap_area_topdown;
6842 }
6843 diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
6844 index e779642..e5bb889 100644
6845 --- a/arch/powerpc/mm/mmu_context_nohash.c
6846 +++ b/arch/powerpc/mm/mmu_context_nohash.c
6847 @@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
6848 return NOTIFY_OK;
6849 }
6850
6851 -static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
6852 +static struct notifier_block mmu_context_cpu_nb = {
6853 .notifier_call = mmu_context_cpu_notify,
6854 };
6855
6856 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
6857 index bba87ca..c346a33 100644
6858 --- a/arch/powerpc/mm/numa.c
6859 +++ b/arch/powerpc/mm/numa.c
6860 @@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
6861 return ret;
6862 }
6863
6864 -static struct notifier_block __cpuinitdata ppc64_numa_nb = {
6865 +static struct notifier_block ppc64_numa_nb = {
6866 .notifier_call = cpu_numa_callback,
6867 .priority = 1 /* Must run before sched domains notifier. */
6868 };
6869 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
6870 index cf9dada..241529f 100644
6871 --- a/arch/powerpc/mm/slice.c
6872 +++ b/arch/powerpc/mm/slice.c
6873 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
6874 if ((mm->task_size - len) < addr)
6875 return 0;
6876 vma = find_vma(mm, addr);
6877 - return (!vma || (addr + len) <= vma->vm_start);
6878 + return check_heap_stack_gap(vma, addr, len, 0);
6879 }
6880
6881 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
6882 @@ -272,7 +272,7 @@ full_search:
6883 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
6884 continue;
6885 }
6886 - if (!vma || addr + len <= vma->vm_start) {
6887 + if (check_heap_stack_gap(vma, addr, len, 0)) {
6888 /*
6889 * Remember the place where we stopped the search:
6890 */
6891 @@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6892 }
6893 }
6894
6895 - addr = mm->mmap_base;
6896 - while (addr > len) {
6897 + if (mm->mmap_base < len)
6898 + addr = -ENOMEM;
6899 + else
6900 + addr = mm->mmap_base - len;
6901 +
6902 + while (!IS_ERR_VALUE(addr)) {
6903 /* Go down by chunk size */
6904 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
6905 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
6906
6907 /* Check for hit with different page size */
6908 mask = slice_range_to_mask(addr, len);
6909 @@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6910 * return with success:
6911 */
6912 vma = find_vma(mm, addr);
6913 - if (!vma || (addr + len) <= vma->vm_start) {
6914 + if (check_heap_stack_gap(vma, addr, len, 0)) {
6915 /* remember the address as a hint for next time */
6916 if (use_cache)
6917 mm->free_area_cache = addr;
6918 @@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6919 mm->cached_hole_size = vma->vm_start - addr;
6920
6921 /* try just below the current vma->vm_start */
6922 - addr = vma->vm_start;
6923 + addr = skip_heap_stack_gap(vma, len, 0);
6924 }
6925
6926 /*
6927 @@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
6928 if (fixed && addr > (mm->task_size - len))
6929 return -EINVAL;
6930
6931 +#ifdef CONFIG_PAX_RANDMMAP
6932 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
6933 + addr = 0;
6934 +#endif
6935 +
6936 /* If hint, make sure it matches our alignment restrictions */
6937 if (!fixed && addr) {
6938 addr = _ALIGN_UP(addr, 1ul << pshift);
6939 diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
6940 index bdb738a..49c9f95 100644
6941 --- a/arch/powerpc/platforms/powermac/smp.c
6942 +++ b/arch/powerpc/platforms/powermac/smp.c
6943 @@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
6944 return NOTIFY_OK;
6945 }
6946
6947 -static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
6948 +static struct notifier_block smp_core99_cpu_nb = {
6949 .notifier_call = smp_core99_cpu_notify,
6950 };
6951 #endif /* CONFIG_HOTPLUG_CPU */
6952 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
6953 index c797832..ce575c8 100644
6954 --- a/arch/s390/include/asm/atomic.h
6955 +++ b/arch/s390/include/asm/atomic.h
6956 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
6957 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
6958 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6959
6960 +#define atomic64_read_unchecked(v) atomic64_read(v)
6961 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6962 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6963 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6964 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6965 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6966 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6967 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6968 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6969 +
6970 #define smp_mb__before_atomic_dec() smp_mb()
6971 #define smp_mb__after_atomic_dec() smp_mb()
6972 #define smp_mb__before_atomic_inc() smp_mb()
6973 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
6974 index 4d7ccac..d03d0ad 100644
6975 --- a/arch/s390/include/asm/cache.h
6976 +++ b/arch/s390/include/asm/cache.h
6977 @@ -9,8 +9,10 @@
6978 #ifndef __ARCH_S390_CACHE_H
6979 #define __ARCH_S390_CACHE_H
6980
6981 -#define L1_CACHE_BYTES 256
6982 +#include <linux/const.h>
6983 +
6984 #define L1_CACHE_SHIFT 8
6985 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6986 #define NET_SKB_PAD 32
6987
6988 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6989 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
6990 index 178ff96..8c93bd1 100644
6991 --- a/arch/s390/include/asm/elf.h
6992 +++ b/arch/s390/include/asm/elf.h
6993 @@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
6994 the loader. We need to make sure that it is out of the way of the program
6995 that it will "exec", and that there is sufficient room for the brk. */
6996
6997 -extern unsigned long randomize_et_dyn(unsigned long base);
6998 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
6999 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7000 +
7001 +#ifdef CONFIG_PAX_ASLR
7002 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7003 +
7004 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7005 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7006 +#endif
7007
7008 /* This yields a mask that user programs can use to figure out what
7009 instruction set this CPU supports. */
7010 @@ -210,9 +216,6 @@ struct linux_binprm;
7011 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7012 int arch_setup_additional_pages(struct linux_binprm *, int);
7013
7014 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7015 -#define arch_randomize_brk arch_randomize_brk
7016 -
7017 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7018
7019 #endif
7020 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7021 index c4a93d6..4d2a9b4 100644
7022 --- a/arch/s390/include/asm/exec.h
7023 +++ b/arch/s390/include/asm/exec.h
7024 @@ -7,6 +7,6 @@
7025 #ifndef __ASM_EXEC_H
7026 #define __ASM_EXEC_H
7027
7028 -extern unsigned long arch_align_stack(unsigned long sp);
7029 +#define arch_align_stack(x) ((x) & ~0xfUL)
7030
7031 #endif /* __ASM_EXEC_H */
7032 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7033 index 34268df..ea97318 100644
7034 --- a/arch/s390/include/asm/uaccess.h
7035 +++ b/arch/s390/include/asm/uaccess.h
7036 @@ -252,6 +252,10 @@ static inline unsigned long __must_check
7037 copy_to_user(void __user *to, const void *from, unsigned long n)
7038 {
7039 might_fault();
7040 +
7041 + if ((long)n < 0)
7042 + return n;
7043 +
7044 if (access_ok(VERIFY_WRITE, to, n))
7045 n = __copy_to_user(to, from, n);
7046 return n;
7047 @@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7048 static inline unsigned long __must_check
7049 __copy_from_user(void *to, const void __user *from, unsigned long n)
7050 {
7051 + if ((long)n < 0)
7052 + return n;
7053 +
7054 if (__builtin_constant_p(n) && (n <= 256))
7055 return uaccess.copy_from_user_small(n, from, to);
7056 else
7057 @@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7058 static inline unsigned long __must_check
7059 copy_from_user(void *to, const void __user *from, unsigned long n)
7060 {
7061 - unsigned int sz = __compiletime_object_size(to);
7062 + size_t sz = __compiletime_object_size(to);
7063
7064 might_fault();
7065 - if (unlikely(sz != -1 && sz < n)) {
7066 +
7067 + if ((long)n < 0)
7068 + return n;
7069 +
7070 + if (unlikely(sz != (size_t)-1 && sz < n)) {
7071 copy_from_user_overflow();
7072 return n;
7073 }
7074 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7075 index 4610dea..cf0af21 100644
7076 --- a/arch/s390/kernel/module.c
7077 +++ b/arch/s390/kernel/module.c
7078 @@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7079
7080 /* Increase core size by size of got & plt and set start
7081 offsets for got and plt. */
7082 - me->core_size = ALIGN(me->core_size, 4);
7083 - me->arch.got_offset = me->core_size;
7084 - me->core_size += me->arch.got_size;
7085 - me->arch.plt_offset = me->core_size;
7086 - me->core_size += me->arch.plt_size;
7087 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
7088 + me->arch.got_offset = me->core_size_rw;
7089 + me->core_size_rw += me->arch.got_size;
7090 + me->arch.plt_offset = me->core_size_rx;
7091 + me->core_size_rx += me->arch.plt_size;
7092 return 0;
7093 }
7094
7095 @@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7096 if (info->got_initialized == 0) {
7097 Elf_Addr *gotent;
7098
7099 - gotent = me->module_core + me->arch.got_offset +
7100 + gotent = me->module_core_rw + me->arch.got_offset +
7101 info->got_offset;
7102 *gotent = val;
7103 info->got_initialized = 1;
7104 @@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7105 else if (r_type == R_390_GOTENT ||
7106 r_type == R_390_GOTPLTENT)
7107 *(unsigned int *) loc =
7108 - (val + (Elf_Addr) me->module_core - loc) >> 1;
7109 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7110 else if (r_type == R_390_GOT64 ||
7111 r_type == R_390_GOTPLT64)
7112 *(unsigned long *) loc = val;
7113 @@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7114 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7115 if (info->plt_initialized == 0) {
7116 unsigned int *ip;
7117 - ip = me->module_core + me->arch.plt_offset +
7118 + ip = me->module_core_rx + me->arch.plt_offset +
7119 info->plt_offset;
7120 #ifndef CONFIG_64BIT
7121 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7122 @@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7123 val - loc + 0xffffUL < 0x1ffffeUL) ||
7124 (r_type == R_390_PLT32DBL &&
7125 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7126 - val = (Elf_Addr) me->module_core +
7127 + val = (Elf_Addr) me->module_core_rx +
7128 me->arch.plt_offset +
7129 info->plt_offset;
7130 val += rela->r_addend - loc;
7131 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7132 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7133 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7134 val = val + rela->r_addend -
7135 - ((Elf_Addr) me->module_core + me->arch.got_offset);
7136 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7137 if (r_type == R_390_GOTOFF16)
7138 *(unsigned short *) loc = val;
7139 else if (r_type == R_390_GOTOFF32)
7140 @@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7141 break;
7142 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7143 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7144 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
7145 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7146 rela->r_addend - loc;
7147 if (r_type == R_390_GOTPC)
7148 *(unsigned int *) loc = val;
7149 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7150 index 536d645..4a5bd9e 100644
7151 --- a/arch/s390/kernel/process.c
7152 +++ b/arch/s390/kernel/process.c
7153 @@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7154 }
7155 return 0;
7156 }
7157 -
7158 -unsigned long arch_align_stack(unsigned long sp)
7159 -{
7160 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7161 - sp -= get_random_int() & ~PAGE_MASK;
7162 - return sp & ~0xf;
7163 -}
7164 -
7165 -static inline unsigned long brk_rnd(void)
7166 -{
7167 - /* 8MB for 32bit, 1GB for 64bit */
7168 - if (is_32bit_task())
7169 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7170 - else
7171 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7172 -}
7173 -
7174 -unsigned long arch_randomize_brk(struct mm_struct *mm)
7175 -{
7176 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7177 -
7178 - if (ret < mm->brk)
7179 - return mm->brk;
7180 - return ret;
7181 -}
7182 -
7183 -unsigned long randomize_et_dyn(unsigned long base)
7184 -{
7185 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7186 -
7187 - if (!(current->flags & PF_RANDOMIZE))
7188 - return base;
7189 - if (ret < base)
7190 - return base;
7191 - return ret;
7192 -}
7193 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7194 index c59a5ef..3fae59c 100644
7195 --- a/arch/s390/mm/mmap.c
7196 +++ b/arch/s390/mm/mmap.c
7197 @@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7198 */
7199 if (mmap_is_legacy()) {
7200 mm->mmap_base = TASK_UNMAPPED_BASE;
7201 +
7202 +#ifdef CONFIG_PAX_RANDMMAP
7203 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7204 + mm->mmap_base += mm->delta_mmap;
7205 +#endif
7206 +
7207 mm->get_unmapped_area = arch_get_unmapped_area;
7208 mm->unmap_area = arch_unmap_area;
7209 } else {
7210 mm->mmap_base = mmap_base();
7211 +
7212 +#ifdef CONFIG_PAX_RANDMMAP
7213 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7214 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7215 +#endif
7216 +
7217 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7218 mm->unmap_area = arch_unmap_area_topdown;
7219 }
7220 @@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7221 */
7222 if (mmap_is_legacy()) {
7223 mm->mmap_base = TASK_UNMAPPED_BASE;
7224 +
7225 +#ifdef CONFIG_PAX_RANDMMAP
7226 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7227 + mm->mmap_base += mm->delta_mmap;
7228 +#endif
7229 +
7230 mm->get_unmapped_area = s390_get_unmapped_area;
7231 mm->unmap_area = arch_unmap_area;
7232 } else {
7233 mm->mmap_base = mmap_base();
7234 +
7235 +#ifdef CONFIG_PAX_RANDMMAP
7236 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7237 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7238 +#endif
7239 +
7240 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7241 mm->unmap_area = arch_unmap_area_topdown;
7242 }
7243 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7244 index ae3d59f..f65f075 100644
7245 --- a/arch/score/include/asm/cache.h
7246 +++ b/arch/score/include/asm/cache.h
7247 @@ -1,7 +1,9 @@
7248 #ifndef _ASM_SCORE_CACHE_H
7249 #define _ASM_SCORE_CACHE_H
7250
7251 +#include <linux/const.h>
7252 +
7253 #define L1_CACHE_SHIFT 4
7254 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7255 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7256
7257 #endif /* _ASM_SCORE_CACHE_H */
7258 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7259 index f9f3cd5..58ff438 100644
7260 --- a/arch/score/include/asm/exec.h
7261 +++ b/arch/score/include/asm/exec.h
7262 @@ -1,6 +1,6 @@
7263 #ifndef _ASM_SCORE_EXEC_H
7264 #define _ASM_SCORE_EXEC_H
7265
7266 -extern unsigned long arch_align_stack(unsigned long sp);
7267 +#define arch_align_stack(x) (x)
7268
7269 #endif /* _ASM_SCORE_EXEC_H */
7270 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7271 index 7956846..5f37677 100644
7272 --- a/arch/score/kernel/process.c
7273 +++ b/arch/score/kernel/process.c
7274 @@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7275
7276 return task_pt_regs(task)->cp0_epc;
7277 }
7278 -
7279 -unsigned long arch_align_stack(unsigned long sp)
7280 -{
7281 - return sp;
7282 -}
7283 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7284 index ef9e555..331bd29 100644
7285 --- a/arch/sh/include/asm/cache.h
7286 +++ b/arch/sh/include/asm/cache.h
7287 @@ -9,10 +9,11 @@
7288 #define __ASM_SH_CACHE_H
7289 #ifdef __KERNEL__
7290
7291 +#include <linux/const.h>
7292 #include <linux/init.h>
7293 #include <cpu/cache.h>
7294
7295 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7296 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7297
7298 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7299
7300 diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7301 index 03f2b55..b027032 100644
7302 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7303 +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7304 @@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7305 return NOTIFY_OK;
7306 }
7307
7308 -static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7309 +static struct notifier_block shx3_cpu_notifier = {
7310 .notifier_call = shx3_cpu_callback,
7311 };
7312
7313 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7314 index 6777177..cb5e44f 100644
7315 --- a/arch/sh/mm/mmap.c
7316 +++ b/arch/sh/mm/mmap.c
7317 @@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7318 struct mm_struct *mm = current->mm;
7319 struct vm_area_struct *vma;
7320 int do_colour_align;
7321 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7322 struct vm_unmapped_area_info info;
7323
7324 if (flags & MAP_FIXED) {
7325 @@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7326 if (filp || (flags & MAP_SHARED))
7327 do_colour_align = 1;
7328
7329 +#ifdef CONFIG_PAX_RANDMMAP
7330 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7331 +#endif
7332 +
7333 if (addr) {
7334 if (do_colour_align)
7335 addr = COLOUR_ALIGN(addr, pgoff);
7336 @@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7337 addr = PAGE_ALIGN(addr);
7338
7339 vma = find_vma(mm, addr);
7340 - if (TASK_SIZE - len >= addr &&
7341 - (!vma || addr + len <= vma->vm_start))
7342 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7343 return addr;
7344 }
7345
7346 info.flags = 0;
7347 info.length = len;
7348 - info.low_limit = TASK_UNMAPPED_BASE;
7349 + info.low_limit = mm->mmap_base;
7350 info.high_limit = TASK_SIZE;
7351 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7352 info.align_offset = pgoff << PAGE_SHIFT;
7353 @@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7354 struct mm_struct *mm = current->mm;
7355 unsigned long addr = addr0;
7356 int do_colour_align;
7357 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7358 struct vm_unmapped_area_info info;
7359
7360 if (flags & MAP_FIXED) {
7361 @@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7362 if (filp || (flags & MAP_SHARED))
7363 do_colour_align = 1;
7364
7365 +#ifdef CONFIG_PAX_RANDMMAP
7366 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7367 +#endif
7368 +
7369 /* requesting a specific address */
7370 if (addr) {
7371 if (do_colour_align)
7372 @@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7373 addr = PAGE_ALIGN(addr);
7374
7375 vma = find_vma(mm, addr);
7376 - if (TASK_SIZE - len >= addr &&
7377 - (!vma || addr + len <= vma->vm_start))
7378 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7379 return addr;
7380 }
7381
7382 @@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7383 VM_BUG_ON(addr != -ENOMEM);
7384 info.flags = 0;
7385 info.low_limit = TASK_UNMAPPED_BASE;
7386 +
7387 +#ifdef CONFIG_PAX_RANDMMAP
7388 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7389 + info.low_limit += mm->delta_mmap;
7390 +#endif
7391 +
7392 info.high_limit = TASK_SIZE;
7393 addr = vm_unmapped_area(&info);
7394 }
7395 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7396 index be56a24..443328f 100644
7397 --- a/arch/sparc/include/asm/atomic_64.h
7398 +++ b/arch/sparc/include/asm/atomic_64.h
7399 @@ -14,18 +14,40 @@
7400 #define ATOMIC64_INIT(i) { (i) }
7401
7402 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7403 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7404 +{
7405 + return v->counter;
7406 +}
7407 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7408 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7409 +{
7410 + return v->counter;
7411 +}
7412
7413 #define atomic_set(v, i) (((v)->counter) = i)
7414 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7415 +{
7416 + v->counter = i;
7417 +}
7418 #define atomic64_set(v, i) (((v)->counter) = i)
7419 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7420 +{
7421 + v->counter = i;
7422 +}
7423
7424 extern void atomic_add(int, atomic_t *);
7425 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7426 extern void atomic64_add(long, atomic64_t *);
7427 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7428 extern void atomic_sub(int, atomic_t *);
7429 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7430 extern void atomic64_sub(long, atomic64_t *);
7431 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7432
7433 extern int atomic_add_ret(int, atomic_t *);
7434 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7435 extern long atomic64_add_ret(long, atomic64_t *);
7436 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7437 extern int atomic_sub_ret(int, atomic_t *);
7438 extern long atomic64_sub_ret(long, atomic64_t *);
7439
7440 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7441 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7442
7443 #define atomic_inc_return(v) atomic_add_ret(1, v)
7444 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7445 +{
7446 + return atomic_add_ret_unchecked(1, v);
7447 +}
7448 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7449 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7450 +{
7451 + return atomic64_add_ret_unchecked(1, v);
7452 +}
7453
7454 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7455 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7456
7457 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7458 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7459 +{
7460 + return atomic_add_ret_unchecked(i, v);
7461 +}
7462 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7463 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7464 +{
7465 + return atomic64_add_ret_unchecked(i, v);
7466 +}
7467
7468 /*
7469 * atomic_inc_and_test - increment and test
7470 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7471 * other cases.
7472 */
7473 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7474 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7475 +{
7476 + return atomic_inc_return_unchecked(v) == 0;
7477 +}
7478 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7479
7480 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7481 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7482 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7483
7484 #define atomic_inc(v) atomic_add(1, v)
7485 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7486 +{
7487 + atomic_add_unchecked(1, v);
7488 +}
7489 #define atomic64_inc(v) atomic64_add(1, v)
7490 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7491 +{
7492 + atomic64_add_unchecked(1, v);
7493 +}
7494
7495 #define atomic_dec(v) atomic_sub(1, v)
7496 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7497 +{
7498 + atomic_sub_unchecked(1, v);
7499 +}
7500 #define atomic64_dec(v) atomic64_sub(1, v)
7501 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7502 +{
7503 + atomic64_sub_unchecked(1, v);
7504 +}
7505
7506 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7507 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7508
7509 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7510 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7511 +{
7512 + return cmpxchg(&v->counter, old, new);
7513 +}
7514 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7515 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7516 +{
7517 + return xchg(&v->counter, new);
7518 +}
7519
7520 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7521 {
7522 - int c, old;
7523 + int c, old, new;
7524 c = atomic_read(v);
7525 for (;;) {
7526 - if (unlikely(c == (u)))
7527 + if (unlikely(c == u))
7528 break;
7529 - old = atomic_cmpxchg((v), c, c + (a));
7530 +
7531 + asm volatile("addcc %2, %0, %0\n"
7532 +
7533 +#ifdef CONFIG_PAX_REFCOUNT
7534 + "tvs %%icc, 6\n"
7535 +#endif
7536 +
7537 + : "=r" (new)
7538 + : "0" (c), "ir" (a)
7539 + : "cc");
7540 +
7541 + old = atomic_cmpxchg(v, c, new);
7542 if (likely(old == c))
7543 break;
7544 c = old;
7545 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7546 #define atomic64_cmpxchg(v, o, n) \
7547 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7548 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7549 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7550 +{
7551 + return xchg(&v->counter, new);
7552 +}
7553
7554 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7555 {
7556 - long c, old;
7557 + long c, old, new;
7558 c = atomic64_read(v);
7559 for (;;) {
7560 - if (unlikely(c == (u)))
7561 + if (unlikely(c == u))
7562 break;
7563 - old = atomic64_cmpxchg((v), c, c + (a));
7564 +
7565 + asm volatile("addcc %2, %0, %0\n"
7566 +
7567 +#ifdef CONFIG_PAX_REFCOUNT
7568 + "tvs %%xcc, 6\n"
7569 +#endif
7570 +
7571 + : "=r" (new)
7572 + : "0" (c), "ir" (a)
7573 + : "cc");
7574 +
7575 + old = atomic64_cmpxchg(v, c, new);
7576 if (likely(old == c))
7577 break;
7578 c = old;
7579 }
7580 - return c != (u);
7581 + return c != u;
7582 }
7583
7584 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7585 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7586 index 5bb6991..5c2132e 100644
7587 --- a/arch/sparc/include/asm/cache.h
7588 +++ b/arch/sparc/include/asm/cache.h
7589 @@ -7,10 +7,12 @@
7590 #ifndef _SPARC_CACHE_H
7591 #define _SPARC_CACHE_H
7592
7593 +#include <linux/const.h>
7594 +
7595 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7596
7597 #define L1_CACHE_SHIFT 5
7598 -#define L1_CACHE_BYTES 32
7599 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7600
7601 #ifdef CONFIG_SPARC32
7602 #define SMP_CACHE_BYTES_SHIFT 5
7603 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7604 index ac74a2c..a9e58af 100644
7605 --- a/arch/sparc/include/asm/elf_32.h
7606 +++ b/arch/sparc/include/asm/elf_32.h
7607 @@ -114,6 +114,13 @@ typedef struct {
7608
7609 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7610
7611 +#ifdef CONFIG_PAX_ASLR
7612 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
7613 +
7614 +#define PAX_DELTA_MMAP_LEN 16
7615 +#define PAX_DELTA_STACK_LEN 16
7616 +#endif
7617 +
7618 /* This yields a mask that user programs can use to figure out what
7619 instruction set this cpu supports. This can NOT be done in userspace
7620 on Sparc. */
7621 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7622 index 370ca1e..d4f4a98 100644
7623 --- a/arch/sparc/include/asm/elf_64.h
7624 +++ b/arch/sparc/include/asm/elf_64.h
7625 @@ -189,6 +189,13 @@ typedef struct {
7626 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7627 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7628
7629 +#ifdef CONFIG_PAX_ASLR
7630 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7631 +
7632 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7633 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7634 +#endif
7635 +
7636 extern unsigned long sparc64_elf_hwcap;
7637 #define ELF_HWCAP sparc64_elf_hwcap
7638
7639 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7640 index 9b1c36d..209298b 100644
7641 --- a/arch/sparc/include/asm/pgalloc_32.h
7642 +++ b/arch/sparc/include/asm/pgalloc_32.h
7643 @@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7644 }
7645
7646 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7647 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7648
7649 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7650 unsigned long address)
7651 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7652 index bcfe063..b333142 100644
7653 --- a/arch/sparc/include/asm/pgalloc_64.h
7654 +++ b/arch/sparc/include/asm/pgalloc_64.h
7655 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7656 }
7657
7658 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7659 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7660
7661 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7662 {
7663 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7664 index 6fc1348..390c50a 100644
7665 --- a/arch/sparc/include/asm/pgtable_32.h
7666 +++ b/arch/sparc/include/asm/pgtable_32.h
7667 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7668 #define PAGE_SHARED SRMMU_PAGE_SHARED
7669 #define PAGE_COPY SRMMU_PAGE_COPY
7670 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7671 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7672 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7673 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7674 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7675
7676 /* Top-level page directory - dummy used by init-mm.
7677 @@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7678
7679 /* xwr */
7680 #define __P000 PAGE_NONE
7681 -#define __P001 PAGE_READONLY
7682 -#define __P010 PAGE_COPY
7683 -#define __P011 PAGE_COPY
7684 +#define __P001 PAGE_READONLY_NOEXEC
7685 +#define __P010 PAGE_COPY_NOEXEC
7686 +#define __P011 PAGE_COPY_NOEXEC
7687 #define __P100 PAGE_READONLY
7688 #define __P101 PAGE_READONLY
7689 #define __P110 PAGE_COPY
7690 #define __P111 PAGE_COPY
7691
7692 #define __S000 PAGE_NONE
7693 -#define __S001 PAGE_READONLY
7694 -#define __S010 PAGE_SHARED
7695 -#define __S011 PAGE_SHARED
7696 +#define __S001 PAGE_READONLY_NOEXEC
7697 +#define __S010 PAGE_SHARED_NOEXEC
7698 +#define __S011 PAGE_SHARED_NOEXEC
7699 #define __S100 PAGE_READONLY
7700 #define __S101 PAGE_READONLY
7701 #define __S110 PAGE_SHARED
7702 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7703 index 79da178..c2eede8 100644
7704 --- a/arch/sparc/include/asm/pgtsrmmu.h
7705 +++ b/arch/sparc/include/asm/pgtsrmmu.h
7706 @@ -115,6 +115,11 @@
7707 SRMMU_EXEC | SRMMU_REF)
7708 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7709 SRMMU_EXEC | SRMMU_REF)
7710 +
7711 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7712 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7713 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7714 +
7715 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7716 SRMMU_DIRTY | SRMMU_REF)
7717
7718 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7719 index 9689176..63c18ea 100644
7720 --- a/arch/sparc/include/asm/spinlock_64.h
7721 +++ b/arch/sparc/include/asm/spinlock_64.h
7722 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7723
7724 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7725
7726 -static void inline arch_read_lock(arch_rwlock_t *lock)
7727 +static inline void arch_read_lock(arch_rwlock_t *lock)
7728 {
7729 unsigned long tmp1, tmp2;
7730
7731 __asm__ __volatile__ (
7732 "1: ldsw [%2], %0\n"
7733 " brlz,pn %0, 2f\n"
7734 -"4: add %0, 1, %1\n"
7735 +"4: addcc %0, 1, %1\n"
7736 +
7737 +#ifdef CONFIG_PAX_REFCOUNT
7738 +" tvs %%icc, 6\n"
7739 +#endif
7740 +
7741 " cas [%2], %0, %1\n"
7742 " cmp %0, %1\n"
7743 " bne,pn %%icc, 1b\n"
7744 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
7745 " .previous"
7746 : "=&r" (tmp1), "=&r" (tmp2)
7747 : "r" (lock)
7748 - : "memory");
7749 + : "memory", "cc");
7750 }
7751
7752 -static int inline arch_read_trylock(arch_rwlock_t *lock)
7753 +static inline int arch_read_trylock(arch_rwlock_t *lock)
7754 {
7755 int tmp1, tmp2;
7756
7757 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7758 "1: ldsw [%2], %0\n"
7759 " brlz,a,pn %0, 2f\n"
7760 " mov 0, %0\n"
7761 -" add %0, 1, %1\n"
7762 +" addcc %0, 1, %1\n"
7763 +
7764 +#ifdef CONFIG_PAX_REFCOUNT
7765 +" tvs %%icc, 6\n"
7766 +#endif
7767 +
7768 " cas [%2], %0, %1\n"
7769 " cmp %0, %1\n"
7770 " bne,pn %%icc, 1b\n"
7771 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7772 return tmp1;
7773 }
7774
7775 -static void inline arch_read_unlock(arch_rwlock_t *lock)
7776 +static inline void arch_read_unlock(arch_rwlock_t *lock)
7777 {
7778 unsigned long tmp1, tmp2;
7779
7780 __asm__ __volatile__(
7781 "1: lduw [%2], %0\n"
7782 -" sub %0, 1, %1\n"
7783 +" subcc %0, 1, %1\n"
7784 +
7785 +#ifdef CONFIG_PAX_REFCOUNT
7786 +" tvs %%icc, 6\n"
7787 +#endif
7788 +
7789 " cas [%2], %0, %1\n"
7790 " cmp %0, %1\n"
7791 " bne,pn %%xcc, 1b\n"
7792 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
7793 : "memory");
7794 }
7795
7796 -static void inline arch_write_lock(arch_rwlock_t *lock)
7797 +static inline void arch_write_lock(arch_rwlock_t *lock)
7798 {
7799 unsigned long mask, tmp1, tmp2;
7800
7801 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
7802 : "memory");
7803 }
7804
7805 -static void inline arch_write_unlock(arch_rwlock_t *lock)
7806 +static inline void arch_write_unlock(arch_rwlock_t *lock)
7807 {
7808 __asm__ __volatile__(
7809 " stw %%g0, [%0]"
7810 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
7811 : "memory");
7812 }
7813
7814 -static int inline arch_write_trylock(arch_rwlock_t *lock)
7815 +static inline int arch_write_trylock(arch_rwlock_t *lock)
7816 {
7817 unsigned long mask, tmp1, tmp2, result;
7818
7819 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
7820 index 25849ae..924c54b 100644
7821 --- a/arch/sparc/include/asm/thread_info_32.h
7822 +++ b/arch/sparc/include/asm/thread_info_32.h
7823 @@ -49,6 +49,8 @@ struct thread_info {
7824 unsigned long w_saved;
7825
7826 struct restart_block restart_block;
7827 +
7828 + unsigned long lowest_stack;
7829 };
7830
7831 /*
7832 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
7833 index 269bd92..e46a9b8 100644
7834 --- a/arch/sparc/include/asm/thread_info_64.h
7835 +++ b/arch/sparc/include/asm/thread_info_64.h
7836 @@ -63,6 +63,8 @@ struct thread_info {
7837 struct pt_regs *kern_una_regs;
7838 unsigned int kern_una_insn;
7839
7840 + unsigned long lowest_stack;
7841 +
7842 unsigned long fpregs[0] __attribute__ ((aligned(64)));
7843 };
7844
7845 @@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
7846 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
7847 /* flag bit 6 is available */
7848 #define TIF_32BIT 7 /* 32-bit binary */
7849 -/* flag bit 8 is available */
7850 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
7851 #define TIF_SECCOMP 9 /* secure computing */
7852 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
7853 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
7854 +
7855 /* NOTE: Thread flags >= 12 should be ones we have no interest
7856 * in using in assembly, else we can't use the mask as
7857 * an immediate value in instructions such as andcc.
7858 @@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
7859 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
7860 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7861 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
7862 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7863
7864 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
7865 _TIF_DO_NOTIFY_RESUME_MASK | \
7866 _TIF_NEED_RESCHED)
7867 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
7868
7869 +#define _TIF_WORK_SYSCALL \
7870 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
7871 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7872 +
7873 +
7874 /*
7875 * Thread-synchronous status.
7876 *
7877 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
7878 index 0167d26..9acd8ed 100644
7879 --- a/arch/sparc/include/asm/uaccess.h
7880 +++ b/arch/sparc/include/asm/uaccess.h
7881 @@ -1,5 +1,13 @@
7882 #ifndef ___ASM_SPARC_UACCESS_H
7883 #define ___ASM_SPARC_UACCESS_H
7884 +
7885 +#ifdef __KERNEL__
7886 +#ifndef __ASSEMBLY__
7887 +#include <linux/types.h>
7888 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
7889 +#endif
7890 +#endif
7891 +
7892 #if defined(__sparc__) && defined(__arch64__)
7893 #include <asm/uaccess_64.h>
7894 #else
7895 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
7896 index 53a28dd..50c38c3 100644
7897 --- a/arch/sparc/include/asm/uaccess_32.h
7898 +++ b/arch/sparc/include/asm/uaccess_32.h
7899 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
7900
7901 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7902 {
7903 - if (n && __access_ok((unsigned long) to, n))
7904 + if ((long)n < 0)
7905 + return n;
7906 +
7907 + if (n && __access_ok((unsigned long) to, n)) {
7908 + if (!__builtin_constant_p(n))
7909 + check_object_size(from, n, true);
7910 return __copy_user(to, (__force void __user *) from, n);
7911 - else
7912 + } else
7913 return n;
7914 }
7915
7916 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
7917 {
7918 + if ((long)n < 0)
7919 + return n;
7920 +
7921 + if (!__builtin_constant_p(n))
7922 + check_object_size(from, n, true);
7923 +
7924 return __copy_user(to, (__force void __user *) from, n);
7925 }
7926
7927 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7928 {
7929 - if (n && __access_ok((unsigned long) from, n))
7930 + if ((long)n < 0)
7931 + return n;
7932 +
7933 + if (n && __access_ok((unsigned long) from, n)) {
7934 + if (!__builtin_constant_p(n))
7935 + check_object_size(to, n, false);
7936 return __copy_user((__force void __user *) to, from, n);
7937 - else
7938 + } else
7939 return n;
7940 }
7941
7942 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
7943 {
7944 + if ((long)n < 0)
7945 + return n;
7946 +
7947 return __copy_user((__force void __user *) to, from, n);
7948 }
7949
7950 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
7951 index e562d3c..191f176 100644
7952 --- a/arch/sparc/include/asm/uaccess_64.h
7953 +++ b/arch/sparc/include/asm/uaccess_64.h
7954 @@ -10,6 +10,7 @@
7955 #include <linux/compiler.h>
7956 #include <linux/string.h>
7957 #include <linux/thread_info.h>
7958 +#include <linux/kernel.h>
7959 #include <asm/asi.h>
7960 #include <asm/spitfire.h>
7961 #include <asm-generic/uaccess-unaligned.h>
7962 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
7963 static inline unsigned long __must_check
7964 copy_from_user(void *to, const void __user *from, unsigned long size)
7965 {
7966 - unsigned long ret = ___copy_from_user(to, from, size);
7967 + unsigned long ret;
7968
7969 + if ((long)size < 0 || size > INT_MAX)
7970 + return size;
7971 +
7972 + if (!__builtin_constant_p(size))
7973 + check_object_size(to, size, false);
7974 +
7975 + ret = ___copy_from_user(to, from, size);
7976 if (unlikely(ret))
7977 ret = copy_from_user_fixup(to, from, size);
7978
7979 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
7980 static inline unsigned long __must_check
7981 copy_to_user(void __user *to, const void *from, unsigned long size)
7982 {
7983 - unsigned long ret = ___copy_to_user(to, from, size);
7984 + unsigned long ret;
7985
7986 + if ((long)size < 0 || size > INT_MAX)
7987 + return size;
7988 +
7989 + if (!__builtin_constant_p(size))
7990 + check_object_size(from, size, true);
7991 +
7992 + ret = ___copy_to_user(to, from, size);
7993 if (unlikely(ret))
7994 ret = copy_to_user_fixup(to, from, size);
7995 return ret;
7996 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
7997 index 6cf591b..b49e65a 100644
7998 --- a/arch/sparc/kernel/Makefile
7999 +++ b/arch/sparc/kernel/Makefile
8000 @@ -3,7 +3,7 @@
8001 #
8002
8003 asflags-y := -ansi
8004 -ccflags-y := -Werror
8005 +#ccflags-y := -Werror
8006
8007 extra-y := head_$(BITS).o
8008
8009 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8010 index be8e862..5b50b12 100644
8011 --- a/arch/sparc/kernel/process_32.c
8012 +++ b/arch/sparc/kernel/process_32.c
8013 @@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8014
8015 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8016 r->psr, r->pc, r->npc, r->y, print_tainted());
8017 - printk("PC: <%pS>\n", (void *) r->pc);
8018 + printk("PC: <%pA>\n", (void *) r->pc);
8019 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8020 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8021 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8022 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8023 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8024 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8025 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8026 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8027
8028 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8029 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8030 @@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8031 rw = (struct reg_window32 *) fp;
8032 pc = rw->ins[7];
8033 printk("[%08lx : ", pc);
8034 - printk("%pS ] ", (void *) pc);
8035 + printk("%pA ] ", (void *) pc);
8036 fp = rw->ins[6];
8037 } while (++count < 16);
8038 printk("\n");
8039 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8040 index cdb80b2..5ca141d 100644
8041 --- a/arch/sparc/kernel/process_64.c
8042 +++ b/arch/sparc/kernel/process_64.c
8043 @@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8044 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8045 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8046 if (regs->tstate & TSTATE_PRIV)
8047 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8048 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8049 }
8050
8051 void show_regs(struct pt_regs *regs)
8052 {
8053 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8054 regs->tpc, regs->tnpc, regs->y, print_tainted());
8055 - printk("TPC: <%pS>\n", (void *) regs->tpc);
8056 + printk("TPC: <%pA>\n", (void *) regs->tpc);
8057 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8058 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8059 regs->u_regs[3]);
8060 @@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8061 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8062 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8063 regs->u_regs[15]);
8064 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8065 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8066 show_regwindow(regs);
8067 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8068 }
8069 @@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8070 ((tp && tp->task) ? tp->task->pid : -1));
8071
8072 if (gp->tstate & TSTATE_PRIV) {
8073 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8074 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8075 (void *) gp->tpc,
8076 (void *) gp->o7,
8077 (void *) gp->i7,
8078 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8079 index 7ff45e4..a58f271 100644
8080 --- a/arch/sparc/kernel/ptrace_64.c
8081 +++ b/arch/sparc/kernel/ptrace_64.c
8082 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8083 return ret;
8084 }
8085
8086 +#ifdef CONFIG_GRKERNSEC_SETXID
8087 +extern void gr_delayed_cred_worker(void);
8088 +#endif
8089 +
8090 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8091 {
8092 int ret = 0;
8093 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8094 /* do the secure computing check first */
8095 secure_computing_strict(regs->u_regs[UREG_G1]);
8096
8097 +#ifdef CONFIG_GRKERNSEC_SETXID
8098 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8099 + gr_delayed_cred_worker();
8100 +#endif
8101 +
8102 if (test_thread_flag(TIF_SYSCALL_TRACE))
8103 ret = tracehook_report_syscall_entry(regs);
8104
8105 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8106
8107 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8108 {
8109 +#ifdef CONFIG_GRKERNSEC_SETXID
8110 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8111 + gr_delayed_cred_worker();
8112 +#endif
8113 +
8114 audit_syscall_exit(regs);
8115
8116 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8117 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8118 index 2da0bdc..79128d2 100644
8119 --- a/arch/sparc/kernel/sys_sparc_32.c
8120 +++ b/arch/sparc/kernel/sys_sparc_32.c
8121 @@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8122 if (len > TASK_SIZE - PAGE_SIZE)
8123 return -ENOMEM;
8124 if (!addr)
8125 - addr = TASK_UNMAPPED_BASE;
8126 + addr = current->mm->mmap_base;
8127
8128 info.flags = 0;
8129 info.length = len;
8130 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8131 index 708bc29..f0129cb 100644
8132 --- a/arch/sparc/kernel/sys_sparc_64.c
8133 +++ b/arch/sparc/kernel/sys_sparc_64.c
8134 @@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8135 struct vm_area_struct * vma;
8136 unsigned long task_size = TASK_SIZE;
8137 int do_color_align;
8138 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8139 struct vm_unmapped_area_info info;
8140
8141 if (flags & MAP_FIXED) {
8142 /* We do not accept a shared mapping if it would violate
8143 * cache aliasing constraints.
8144 */
8145 - if ((flags & MAP_SHARED) &&
8146 + if ((filp || (flags & MAP_SHARED)) &&
8147 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8148 return -EINVAL;
8149 return addr;
8150 @@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8151 if (filp || (flags & MAP_SHARED))
8152 do_color_align = 1;
8153
8154 +#ifdef CONFIG_PAX_RANDMMAP
8155 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8156 +#endif
8157 +
8158 if (addr) {
8159 if (do_color_align)
8160 addr = COLOR_ALIGN(addr, pgoff);
8161 @@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8162 addr = PAGE_ALIGN(addr);
8163
8164 vma = find_vma(mm, addr);
8165 - if (task_size - len >= addr &&
8166 - (!vma || addr + len <= vma->vm_start))
8167 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8168 return addr;
8169 }
8170
8171 info.flags = 0;
8172 info.length = len;
8173 - info.low_limit = TASK_UNMAPPED_BASE;
8174 + info.low_limit = mm->mmap_base;
8175 info.high_limit = min(task_size, VA_EXCLUDE_START);
8176 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8177 info.align_offset = pgoff << PAGE_SHIFT;
8178 @@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8179 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8180 VM_BUG_ON(addr != -ENOMEM);
8181 info.low_limit = VA_EXCLUDE_END;
8182 +
8183 +#ifdef CONFIG_PAX_RANDMMAP
8184 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8185 + info.low_limit += mm->delta_mmap;
8186 +#endif
8187 +
8188 info.high_limit = task_size;
8189 addr = vm_unmapped_area(&info);
8190 }
8191 @@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8192 unsigned long task_size = STACK_TOP32;
8193 unsigned long addr = addr0;
8194 int do_color_align;
8195 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8196 struct vm_unmapped_area_info info;
8197
8198 /* This should only ever run for 32-bit processes. */
8199 @@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8200 /* We do not accept a shared mapping if it would violate
8201 * cache aliasing constraints.
8202 */
8203 - if ((flags & MAP_SHARED) &&
8204 + if ((filp || (flags & MAP_SHARED)) &&
8205 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8206 return -EINVAL;
8207 return addr;
8208 @@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8209 if (filp || (flags & MAP_SHARED))
8210 do_color_align = 1;
8211
8212 +#ifdef CONFIG_PAX_RANDMMAP
8213 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8214 +#endif
8215 +
8216 /* requesting a specific address */
8217 if (addr) {
8218 if (do_color_align)
8219 @@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8220 addr = PAGE_ALIGN(addr);
8221
8222 vma = find_vma(mm, addr);
8223 - if (task_size - len >= addr &&
8224 - (!vma || addr + len <= vma->vm_start))
8225 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8226 return addr;
8227 }
8228
8229 @@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8230 VM_BUG_ON(addr != -ENOMEM);
8231 info.flags = 0;
8232 info.low_limit = TASK_UNMAPPED_BASE;
8233 +
8234 +#ifdef CONFIG_PAX_RANDMMAP
8235 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8236 + info.low_limit += mm->delta_mmap;
8237 +#endif
8238 +
8239 info.high_limit = STACK_TOP32;
8240 addr = vm_unmapped_area(&info);
8241 }
8242 @@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8243 {
8244 unsigned long rnd = 0UL;
8245
8246 +#ifdef CONFIG_PAX_RANDMMAP
8247 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8248 +#endif
8249 +
8250 if (current->flags & PF_RANDOMIZE) {
8251 unsigned long val = get_random_int();
8252 if (test_thread_flag(TIF_32BIT))
8253 @@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8254 gap == RLIM_INFINITY ||
8255 sysctl_legacy_va_layout) {
8256 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8257 +
8258 +#ifdef CONFIG_PAX_RANDMMAP
8259 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8260 + mm->mmap_base += mm->delta_mmap;
8261 +#endif
8262 +
8263 mm->get_unmapped_area = arch_get_unmapped_area;
8264 mm->unmap_area = arch_unmap_area;
8265 } else {
8266 @@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8267 gap = (task_size / 6 * 5);
8268
8269 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8270 +
8271 +#ifdef CONFIG_PAX_RANDMMAP
8272 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8273 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8274 +#endif
8275 +
8276 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8277 mm->unmap_area = arch_unmap_area_topdown;
8278 }
8279 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8280 index e0fed77..604a7e5 100644
8281 --- a/arch/sparc/kernel/syscalls.S
8282 +++ b/arch/sparc/kernel/syscalls.S
8283 @@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8284 #endif
8285 .align 32
8286 1: ldx [%g6 + TI_FLAGS], %l5
8287 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8288 + andcc %l5, _TIF_WORK_SYSCALL, %g0
8289 be,pt %icc, rtrap
8290 nop
8291 call syscall_trace_leave
8292 @@ -190,7 +190,7 @@ linux_sparc_syscall32:
8293
8294 srl %i5, 0, %o5 ! IEU1
8295 srl %i2, 0, %o2 ! IEU0 Group
8296 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8297 + andcc %l0, _TIF_WORK_SYSCALL, %g0
8298 bne,pn %icc, linux_syscall_trace32 ! CTI
8299 mov %i0, %l5 ! IEU1
8300 call %l7 ! CTI Group brk forced
8301 @@ -213,7 +213,7 @@ linux_sparc_syscall:
8302
8303 mov %i3, %o3 ! IEU1
8304 mov %i4, %o4 ! IEU0 Group
8305 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8306 + andcc %l0, _TIF_WORK_SYSCALL, %g0
8307 bne,pn %icc, linux_syscall_trace ! CTI Group
8308 mov %i0, %l5 ! IEU0
8309 2: call %l7 ! CTI Group brk forced
8310 @@ -229,7 +229,7 @@ ret_sys_call:
8311
8312 cmp %o0, -ERESTART_RESTARTBLOCK
8313 bgeu,pn %xcc, 1f
8314 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8315 + andcc %l0, _TIF_WORK_SYSCALL, %g0
8316 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8317
8318 2:
8319 diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8320 index 654e8aa..45f431b 100644
8321 --- a/arch/sparc/kernel/sysfs.c
8322 +++ b/arch/sparc/kernel/sysfs.c
8323 @@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8324 return NOTIFY_OK;
8325 }
8326
8327 -static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8328 +static struct notifier_block sysfs_cpu_nb = {
8329 .notifier_call = sysfs_cpu_notify,
8330 };
8331
8332 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8333 index a5785ea..405c5f7 100644
8334 --- a/arch/sparc/kernel/traps_32.c
8335 +++ b/arch/sparc/kernel/traps_32.c
8336 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8337 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8338 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8339
8340 +extern void gr_handle_kernel_exploit(void);
8341 +
8342 void die_if_kernel(char *str, struct pt_regs *regs)
8343 {
8344 static int die_counter;
8345 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8346 count++ < 30 &&
8347 (((unsigned long) rw) >= PAGE_OFFSET) &&
8348 !(((unsigned long) rw) & 0x7)) {
8349 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
8350 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
8351 (void *) rw->ins[7]);
8352 rw = (struct reg_window32 *)rw->ins[6];
8353 }
8354 }
8355 printk("Instruction DUMP:");
8356 instruction_dump ((unsigned long *) regs->pc);
8357 - if(regs->psr & PSR_PS)
8358 + if(regs->psr & PSR_PS) {
8359 + gr_handle_kernel_exploit();
8360 do_exit(SIGKILL);
8361 + }
8362 do_exit(SIGSEGV);
8363 }
8364
8365 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8366 index e7ecf15..6520e65 100644
8367 --- a/arch/sparc/kernel/traps_64.c
8368 +++ b/arch/sparc/kernel/traps_64.c
8369 @@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8370 i + 1,
8371 p->trapstack[i].tstate, p->trapstack[i].tpc,
8372 p->trapstack[i].tnpc, p->trapstack[i].tt);
8373 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8374 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8375 }
8376 }
8377
8378 @@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8379
8380 lvl -= 0x100;
8381 if (regs->tstate & TSTATE_PRIV) {
8382 +
8383 +#ifdef CONFIG_PAX_REFCOUNT
8384 + if (lvl == 6)
8385 + pax_report_refcount_overflow(regs);
8386 +#endif
8387 +
8388 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8389 die_if_kernel(buffer, regs);
8390 }
8391 @@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8392 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8393 {
8394 char buffer[32];
8395 -
8396 +
8397 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8398 0, lvl, SIGTRAP) == NOTIFY_STOP)
8399 return;
8400
8401 +#ifdef CONFIG_PAX_REFCOUNT
8402 + if (lvl == 6)
8403 + pax_report_refcount_overflow(regs);
8404 +#endif
8405 +
8406 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8407
8408 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8409 @@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8410 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8411 printk("%s" "ERROR(%d): ",
8412 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8413 - printk("TPC<%pS>\n", (void *) regs->tpc);
8414 + printk("TPC<%pA>\n", (void *) regs->tpc);
8415 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8416 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8417 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8418 @@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8419 smp_processor_id(),
8420 (type & 0x1) ? 'I' : 'D',
8421 regs->tpc);
8422 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8423 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8424 panic("Irrecoverable Cheetah+ parity error.");
8425 }
8426
8427 @@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8428 smp_processor_id(),
8429 (type & 0x1) ? 'I' : 'D',
8430 regs->tpc);
8431 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8432 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8433 }
8434
8435 struct sun4v_error_entry {
8436 @@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8437
8438 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8439 regs->tpc, tl);
8440 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8441 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8442 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8443 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8444 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8445 (void *) regs->u_regs[UREG_I7]);
8446 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8447 "pte[%lx] error[%lx]\n",
8448 @@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8449
8450 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8451 regs->tpc, tl);
8452 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8453 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8454 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8455 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8456 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8457 (void *) regs->u_regs[UREG_I7]);
8458 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8459 "pte[%lx] error[%lx]\n",
8460 @@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8461 fp = (unsigned long)sf->fp + STACK_BIAS;
8462 }
8463
8464 - printk(" [%016lx] %pS\n", pc, (void *) pc);
8465 + printk(" [%016lx] %pA\n", pc, (void *) pc);
8466 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8467 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8468 int index = tsk->curr_ret_stack;
8469 if (tsk->ret_stack && index >= graph) {
8470 pc = tsk->ret_stack[index - graph].ret;
8471 - printk(" [%016lx] %pS\n", pc, (void *) pc);
8472 + printk(" [%016lx] %pA\n", pc, (void *) pc);
8473 graph++;
8474 }
8475 }
8476 @@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8477 return (struct reg_window *) (fp + STACK_BIAS);
8478 }
8479
8480 +extern void gr_handle_kernel_exploit(void);
8481 +
8482 void die_if_kernel(char *str, struct pt_regs *regs)
8483 {
8484 static int die_counter;
8485 @@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8486 while (rw &&
8487 count++ < 30 &&
8488 kstack_valid(tp, (unsigned long) rw)) {
8489 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
8490 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
8491 (void *) rw->ins[7]);
8492
8493 rw = kernel_stack_up(rw);
8494 @@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8495 }
8496 user_instruction_dump ((unsigned int __user *) regs->tpc);
8497 }
8498 - if (regs->tstate & TSTATE_PRIV)
8499 + if (regs->tstate & TSTATE_PRIV) {
8500 + gr_handle_kernel_exploit();
8501 do_exit(SIGKILL);
8502 + }
8503 do_exit(SIGSEGV);
8504 }
8505 EXPORT_SYMBOL(die_if_kernel);
8506 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8507 index 8201c25e..072a2a7 100644
8508 --- a/arch/sparc/kernel/unaligned_64.c
8509 +++ b/arch/sparc/kernel/unaligned_64.c
8510 @@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8511 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8512
8513 if (__ratelimit(&ratelimit)) {
8514 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
8515 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
8516 regs->tpc, (void *) regs->tpc);
8517 }
8518 }
8519 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8520 index 8410065f2..4fd4ca22 100644
8521 --- a/arch/sparc/lib/Makefile
8522 +++ b/arch/sparc/lib/Makefile
8523 @@ -2,7 +2,7 @@
8524 #
8525
8526 asflags-y := -ansi -DST_DIV0=0x02
8527 -ccflags-y := -Werror
8528 +#ccflags-y := -Werror
8529
8530 lib-$(CONFIG_SPARC32) += ashrdi3.o
8531 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8532 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8533 index 85c233d..68500e0 100644
8534 --- a/arch/sparc/lib/atomic_64.S
8535 +++ b/arch/sparc/lib/atomic_64.S
8536 @@ -17,7 +17,12 @@
8537 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8538 BACKOFF_SETUP(%o2)
8539 1: lduw [%o1], %g1
8540 - add %g1, %o0, %g7
8541 + addcc %g1, %o0, %g7
8542 +
8543 +#ifdef CONFIG_PAX_REFCOUNT
8544 + tvs %icc, 6
8545 +#endif
8546 +
8547 cas [%o1], %g1, %g7
8548 cmp %g1, %g7
8549 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8550 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8551 2: BACKOFF_SPIN(%o2, %o3, 1b)
8552 ENDPROC(atomic_add)
8553
8554 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8555 + BACKOFF_SETUP(%o2)
8556 +1: lduw [%o1], %g1
8557 + add %g1, %o0, %g7
8558 + cas [%o1], %g1, %g7
8559 + cmp %g1, %g7
8560 + bne,pn %icc, 2f
8561 + nop
8562 + retl
8563 + nop
8564 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8565 +ENDPROC(atomic_add_unchecked)
8566 +
8567 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8568 BACKOFF_SETUP(%o2)
8569 1: lduw [%o1], %g1
8570 - sub %g1, %o0, %g7
8571 + subcc %g1, %o0, %g7
8572 +
8573 +#ifdef CONFIG_PAX_REFCOUNT
8574 + tvs %icc, 6
8575 +#endif
8576 +
8577 cas [%o1], %g1, %g7
8578 cmp %g1, %g7
8579 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8580 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8581 2: BACKOFF_SPIN(%o2, %o3, 1b)
8582 ENDPROC(atomic_sub)
8583
8584 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8585 + BACKOFF_SETUP(%o2)
8586 +1: lduw [%o1], %g1
8587 + sub %g1, %o0, %g7
8588 + cas [%o1], %g1, %g7
8589 + cmp %g1, %g7
8590 + bne,pn %icc, 2f
8591 + nop
8592 + retl
8593 + nop
8594 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8595 +ENDPROC(atomic_sub_unchecked)
8596 +
8597 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8598 BACKOFF_SETUP(%o2)
8599 1: lduw [%o1], %g1
8600 - add %g1, %o0, %g7
8601 + addcc %g1, %o0, %g7
8602 +
8603 +#ifdef CONFIG_PAX_REFCOUNT
8604 + tvs %icc, 6
8605 +#endif
8606 +
8607 cas [%o1], %g1, %g7
8608 cmp %g1, %g7
8609 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8610 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8611 2: BACKOFF_SPIN(%o2, %o3, 1b)
8612 ENDPROC(atomic_add_ret)
8613
8614 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8615 + BACKOFF_SETUP(%o2)
8616 +1: lduw [%o1], %g1
8617 + addcc %g1, %o0, %g7
8618 + cas [%o1], %g1, %g7
8619 + cmp %g1, %g7
8620 + bne,pn %icc, 2f
8621 + add %g7, %o0, %g7
8622 + sra %g7, 0, %o0
8623 + retl
8624 + nop
8625 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8626 +ENDPROC(atomic_add_ret_unchecked)
8627 +
8628 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8629 BACKOFF_SETUP(%o2)
8630 1: lduw [%o1], %g1
8631 - sub %g1, %o0, %g7
8632 + subcc %g1, %o0, %g7
8633 +
8634 +#ifdef CONFIG_PAX_REFCOUNT
8635 + tvs %icc, 6
8636 +#endif
8637 +
8638 cas [%o1], %g1, %g7
8639 cmp %g1, %g7
8640 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8641 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8642 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8643 BACKOFF_SETUP(%o2)
8644 1: ldx [%o1], %g1
8645 - add %g1, %o0, %g7
8646 + addcc %g1, %o0, %g7
8647 +
8648 +#ifdef CONFIG_PAX_REFCOUNT
8649 + tvs %xcc, 6
8650 +#endif
8651 +
8652 casx [%o1], %g1, %g7
8653 cmp %g1, %g7
8654 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8655 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8656 2: BACKOFF_SPIN(%o2, %o3, 1b)
8657 ENDPROC(atomic64_add)
8658
8659 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8660 + BACKOFF_SETUP(%o2)
8661 +1: ldx [%o1], %g1
8662 + addcc %g1, %o0, %g7
8663 + casx [%o1], %g1, %g7
8664 + cmp %g1, %g7
8665 + bne,pn %xcc, 2f
8666 + nop
8667 + retl
8668 + nop
8669 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8670 +ENDPROC(atomic64_add_unchecked)
8671 +
8672 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8673 BACKOFF_SETUP(%o2)
8674 1: ldx [%o1], %g1
8675 - sub %g1, %o0, %g7
8676 + subcc %g1, %o0, %g7
8677 +
8678 +#ifdef CONFIG_PAX_REFCOUNT
8679 + tvs %xcc, 6
8680 +#endif
8681 +
8682 casx [%o1], %g1, %g7
8683 cmp %g1, %g7
8684 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8685 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8686 2: BACKOFF_SPIN(%o2, %o3, 1b)
8687 ENDPROC(atomic64_sub)
8688
8689 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8690 + BACKOFF_SETUP(%o2)
8691 +1: ldx [%o1], %g1
8692 + subcc %g1, %o0, %g7
8693 + casx [%o1], %g1, %g7
8694 + cmp %g1, %g7
8695 + bne,pn %xcc, 2f
8696 + nop
8697 + retl
8698 + nop
8699 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8700 +ENDPROC(atomic64_sub_unchecked)
8701 +
8702 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8703 BACKOFF_SETUP(%o2)
8704 1: ldx [%o1], %g1
8705 - add %g1, %o0, %g7
8706 + addcc %g1, %o0, %g7
8707 +
8708 +#ifdef CONFIG_PAX_REFCOUNT
8709 + tvs %xcc, 6
8710 +#endif
8711 +
8712 casx [%o1], %g1, %g7
8713 cmp %g1, %g7
8714 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8715 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8716 2: BACKOFF_SPIN(%o2, %o3, 1b)
8717 ENDPROC(atomic64_add_ret)
8718
8719 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8720 + BACKOFF_SETUP(%o2)
8721 +1: ldx [%o1], %g1
8722 + addcc %g1, %o0, %g7
8723 + casx [%o1], %g1, %g7
8724 + cmp %g1, %g7
8725 + bne,pn %xcc, 2f
8726 + add %g7, %o0, %g7
8727 + mov %g7, %o0
8728 + retl
8729 + nop
8730 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8731 +ENDPROC(atomic64_add_ret_unchecked)
8732 +
8733 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8734 BACKOFF_SETUP(%o2)
8735 1: ldx [%o1], %g1
8736 - sub %g1, %o0, %g7
8737 + subcc %g1, %o0, %g7
8738 +
8739 +#ifdef CONFIG_PAX_REFCOUNT
8740 + tvs %xcc, 6
8741 +#endif
8742 +
8743 casx [%o1], %g1, %g7
8744 cmp %g1, %g7
8745 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8746 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
8747 index 0c4e35e..745d3e4 100644
8748 --- a/arch/sparc/lib/ksyms.c
8749 +++ b/arch/sparc/lib/ksyms.c
8750 @@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
8751
8752 /* Atomic counter implementation. */
8753 EXPORT_SYMBOL(atomic_add);
8754 +EXPORT_SYMBOL(atomic_add_unchecked);
8755 EXPORT_SYMBOL(atomic_add_ret);
8756 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
8757 EXPORT_SYMBOL(atomic_sub);
8758 +EXPORT_SYMBOL(atomic_sub_unchecked);
8759 EXPORT_SYMBOL(atomic_sub_ret);
8760 EXPORT_SYMBOL(atomic64_add);
8761 +EXPORT_SYMBOL(atomic64_add_unchecked);
8762 EXPORT_SYMBOL(atomic64_add_ret);
8763 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
8764 EXPORT_SYMBOL(atomic64_sub);
8765 +EXPORT_SYMBOL(atomic64_sub_unchecked);
8766 EXPORT_SYMBOL(atomic64_sub_ret);
8767 EXPORT_SYMBOL(atomic64_dec_if_positive);
8768
8769 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
8770 index 30c3ecc..736f015 100644
8771 --- a/arch/sparc/mm/Makefile
8772 +++ b/arch/sparc/mm/Makefile
8773 @@ -2,7 +2,7 @@
8774 #
8775
8776 asflags-y := -ansi
8777 -ccflags-y := -Werror
8778 +#ccflags-y := -Werror
8779
8780 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8781 obj-y += fault_$(BITS).o
8782 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
8783 index e98bfda..ea8d221 100644
8784 --- a/arch/sparc/mm/fault_32.c
8785 +++ b/arch/sparc/mm/fault_32.c
8786 @@ -21,6 +21,9 @@
8787 #include <linux/perf_event.h>
8788 #include <linux/interrupt.h>
8789 #include <linux/kdebug.h>
8790 +#include <linux/slab.h>
8791 +#include <linux/pagemap.h>
8792 +#include <linux/compiler.h>
8793
8794 #include <asm/page.h>
8795 #include <asm/pgtable.h>
8796 @@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
8797 return safe_compute_effective_address(regs, insn);
8798 }
8799
8800 +#ifdef CONFIG_PAX_PAGEEXEC
8801 +#ifdef CONFIG_PAX_DLRESOLVE
8802 +static void pax_emuplt_close(struct vm_area_struct *vma)
8803 +{
8804 + vma->vm_mm->call_dl_resolve = 0UL;
8805 +}
8806 +
8807 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8808 +{
8809 + unsigned int *kaddr;
8810 +
8811 + vmf->page = alloc_page(GFP_HIGHUSER);
8812 + if (!vmf->page)
8813 + return VM_FAULT_OOM;
8814 +
8815 + kaddr = kmap(vmf->page);
8816 + memset(kaddr, 0, PAGE_SIZE);
8817 + kaddr[0] = 0x9DE3BFA8U; /* save */
8818 + flush_dcache_page(vmf->page);
8819 + kunmap(vmf->page);
8820 + return VM_FAULT_MAJOR;
8821 +}
8822 +
8823 +static const struct vm_operations_struct pax_vm_ops = {
8824 + .close = pax_emuplt_close,
8825 + .fault = pax_emuplt_fault
8826 +};
8827 +
8828 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
8829 +{
8830 + int ret;
8831 +
8832 + INIT_LIST_HEAD(&vma->anon_vma_chain);
8833 + vma->vm_mm = current->mm;
8834 + vma->vm_start = addr;
8835 + vma->vm_end = addr + PAGE_SIZE;
8836 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
8837 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
8838 + vma->vm_ops = &pax_vm_ops;
8839 +
8840 + ret = insert_vm_struct(current->mm, vma);
8841 + if (ret)
8842 + return ret;
8843 +
8844 + ++current->mm->total_vm;
8845 + return 0;
8846 +}
8847 +#endif
8848 +
8849 +/*
8850 + * PaX: decide what to do with offenders (regs->pc = fault address)
8851 + *
8852 + * returns 1 when task should be killed
8853 + * 2 when patched PLT trampoline was detected
8854 + * 3 when unpatched PLT trampoline was detected
8855 + */
8856 +static int pax_handle_fetch_fault(struct pt_regs *regs)
8857 +{
8858 +
8859 +#ifdef CONFIG_PAX_EMUPLT
8860 + int err;
8861 +
8862 + do { /* PaX: patched PLT emulation #1 */
8863 + unsigned int sethi1, sethi2, jmpl;
8864 +
8865 + err = get_user(sethi1, (unsigned int *)regs->pc);
8866 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
8867 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
8868 +
8869 + if (err)
8870 + break;
8871 +
8872 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
8873 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
8874 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
8875 + {
8876 + unsigned int addr;
8877 +
8878 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
8879 + addr = regs->u_regs[UREG_G1];
8880 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8881 + regs->pc = addr;
8882 + regs->npc = addr+4;
8883 + return 2;
8884 + }
8885 + } while (0);
8886 +
8887 + do { /* PaX: patched PLT emulation #2 */
8888 + unsigned int ba;
8889 +
8890 + err = get_user(ba, (unsigned int *)regs->pc);
8891 +
8892 + if (err)
8893 + break;
8894 +
8895 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
8896 + unsigned int addr;
8897 +
8898 + if ((ba & 0xFFC00000U) == 0x30800000U)
8899 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8900 + else
8901 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8902 + regs->pc = addr;
8903 + regs->npc = addr+4;
8904 + return 2;
8905 + }
8906 + } while (0);
8907 +
8908 + do { /* PaX: patched PLT emulation #3 */
8909 + unsigned int sethi, bajmpl, nop;
8910 +
8911 + err = get_user(sethi, (unsigned int *)regs->pc);
8912 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
8913 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
8914 +
8915 + if (err)
8916 + break;
8917 +
8918 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
8919 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
8920 + nop == 0x01000000U)
8921 + {
8922 + unsigned int addr;
8923 +
8924 + addr = (sethi & 0x003FFFFFU) << 10;
8925 + regs->u_regs[UREG_G1] = addr;
8926 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
8927 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8928 + else
8929 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8930 + regs->pc = addr;
8931 + regs->npc = addr+4;
8932 + return 2;
8933 + }
8934 + } while (0);
8935 +
8936 + do { /* PaX: unpatched PLT emulation step 1 */
8937 + unsigned int sethi, ba, nop;
8938 +
8939 + err = get_user(sethi, (unsigned int *)regs->pc);
8940 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
8941 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
8942 +
8943 + if (err)
8944 + break;
8945 +
8946 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
8947 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
8948 + nop == 0x01000000U)
8949 + {
8950 + unsigned int addr, save, call;
8951 +
8952 + if ((ba & 0xFFC00000U) == 0x30800000U)
8953 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8954 + else
8955 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8956 +
8957 + err = get_user(save, (unsigned int *)addr);
8958 + err |= get_user(call, (unsigned int *)(addr+4));
8959 + err |= get_user(nop, (unsigned int *)(addr+8));
8960 + if (err)
8961 + break;
8962 +
8963 +#ifdef CONFIG_PAX_DLRESOLVE
8964 + if (save == 0x9DE3BFA8U &&
8965 + (call & 0xC0000000U) == 0x40000000U &&
8966 + nop == 0x01000000U)
8967 + {
8968 + struct vm_area_struct *vma;
8969 + unsigned long call_dl_resolve;
8970 +
8971 + down_read(&current->mm->mmap_sem);
8972 + call_dl_resolve = current->mm->call_dl_resolve;
8973 + up_read(&current->mm->mmap_sem);
8974 + if (likely(call_dl_resolve))
8975 + goto emulate;
8976 +
8977 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
8978 +
8979 + down_write(&current->mm->mmap_sem);
8980 + if (current->mm->call_dl_resolve) {
8981 + call_dl_resolve = current->mm->call_dl_resolve;
8982 + up_write(&current->mm->mmap_sem);
8983 + if (vma)
8984 + kmem_cache_free(vm_area_cachep, vma);
8985 + goto emulate;
8986 + }
8987 +
8988 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
8989 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
8990 + up_write(&current->mm->mmap_sem);
8991 + if (vma)
8992 + kmem_cache_free(vm_area_cachep, vma);
8993 + return 1;
8994 + }
8995 +
8996 + if (pax_insert_vma(vma, call_dl_resolve)) {
8997 + up_write(&current->mm->mmap_sem);
8998 + kmem_cache_free(vm_area_cachep, vma);
8999 + return 1;
9000 + }
9001 +
9002 + current->mm->call_dl_resolve = call_dl_resolve;
9003 + up_write(&current->mm->mmap_sem);
9004 +
9005 +emulate:
9006 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9007 + regs->pc = call_dl_resolve;
9008 + regs->npc = addr+4;
9009 + return 3;
9010 + }
9011 +#endif
9012 +
9013 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9014 + if ((save & 0xFFC00000U) == 0x05000000U &&
9015 + (call & 0xFFFFE000U) == 0x85C0A000U &&
9016 + nop == 0x01000000U)
9017 + {
9018 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9019 + regs->u_regs[UREG_G2] = addr + 4;
9020 + addr = (save & 0x003FFFFFU) << 10;
9021 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9022 + regs->pc = addr;
9023 + regs->npc = addr+4;
9024 + return 3;
9025 + }
9026 + }
9027 + } while (0);
9028 +
9029 + do { /* PaX: unpatched PLT emulation step 2 */
9030 + unsigned int save, call, nop;
9031 +
9032 + err = get_user(save, (unsigned int *)(regs->pc-4));
9033 + err |= get_user(call, (unsigned int *)regs->pc);
9034 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
9035 + if (err)
9036 + break;
9037 +
9038 + if (save == 0x9DE3BFA8U &&
9039 + (call & 0xC0000000U) == 0x40000000U &&
9040 + nop == 0x01000000U)
9041 + {
9042 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9043 +
9044 + regs->u_regs[UREG_RETPC] = regs->pc;
9045 + regs->pc = dl_resolve;
9046 + regs->npc = dl_resolve+4;
9047 + return 3;
9048 + }
9049 + } while (0);
9050 +#endif
9051 +
9052 + return 1;
9053 +}
9054 +
9055 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9056 +{
9057 + unsigned long i;
9058 +
9059 + printk(KERN_ERR "PAX: bytes at PC: ");
9060 + for (i = 0; i < 8; i++) {
9061 + unsigned int c;
9062 + if (get_user(c, (unsigned int *)pc+i))
9063 + printk(KERN_CONT "???????? ");
9064 + else
9065 + printk(KERN_CONT "%08x ", c);
9066 + }
9067 + printk("\n");
9068 +}
9069 +#endif
9070 +
9071 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9072 int text_fault)
9073 {
9074 @@ -230,6 +504,24 @@ good_area:
9075 if (!(vma->vm_flags & VM_WRITE))
9076 goto bad_area;
9077 } else {
9078 +
9079 +#ifdef CONFIG_PAX_PAGEEXEC
9080 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9081 + up_read(&mm->mmap_sem);
9082 + switch (pax_handle_fetch_fault(regs)) {
9083 +
9084 +#ifdef CONFIG_PAX_EMUPLT
9085 + case 2:
9086 + case 3:
9087 + return;
9088 +#endif
9089 +
9090 + }
9091 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9092 + do_group_exit(SIGKILL);
9093 + }
9094 +#endif
9095 +
9096 /* Allow reads even for write-only mappings */
9097 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9098 goto bad_area;
9099 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9100 index 5062ff3..e0b75f3 100644
9101 --- a/arch/sparc/mm/fault_64.c
9102 +++ b/arch/sparc/mm/fault_64.c
9103 @@ -21,6 +21,9 @@
9104 #include <linux/kprobes.h>
9105 #include <linux/kdebug.h>
9106 #include <linux/percpu.h>
9107 +#include <linux/slab.h>
9108 +#include <linux/pagemap.h>
9109 +#include <linux/compiler.h>
9110
9111 #include <asm/page.h>
9112 #include <asm/pgtable.h>
9113 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9114 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9115 regs->tpc);
9116 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9117 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9118 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9119 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9120 dump_stack();
9121 unhandled_fault(regs->tpc, current, regs);
9122 @@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9123 show_regs(regs);
9124 }
9125
9126 +#ifdef CONFIG_PAX_PAGEEXEC
9127 +#ifdef CONFIG_PAX_DLRESOLVE
9128 +static void pax_emuplt_close(struct vm_area_struct *vma)
9129 +{
9130 + vma->vm_mm->call_dl_resolve = 0UL;
9131 +}
9132 +
9133 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9134 +{
9135 + unsigned int *kaddr;
9136 +
9137 + vmf->page = alloc_page(GFP_HIGHUSER);
9138 + if (!vmf->page)
9139 + return VM_FAULT_OOM;
9140 +
9141 + kaddr = kmap(vmf->page);
9142 + memset(kaddr, 0, PAGE_SIZE);
9143 + kaddr[0] = 0x9DE3BFA8U; /* save */
9144 + flush_dcache_page(vmf->page);
9145 + kunmap(vmf->page);
9146 + return VM_FAULT_MAJOR;
9147 +}
9148 +
9149 +static const struct vm_operations_struct pax_vm_ops = {
9150 + .close = pax_emuplt_close,
9151 + .fault = pax_emuplt_fault
9152 +};
9153 +
9154 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9155 +{
9156 + int ret;
9157 +
9158 + INIT_LIST_HEAD(&vma->anon_vma_chain);
9159 + vma->vm_mm = current->mm;
9160 + vma->vm_start = addr;
9161 + vma->vm_end = addr + PAGE_SIZE;
9162 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9163 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9164 + vma->vm_ops = &pax_vm_ops;
9165 +
9166 + ret = insert_vm_struct(current->mm, vma);
9167 + if (ret)
9168 + return ret;
9169 +
9170 + ++current->mm->total_vm;
9171 + return 0;
9172 +}
9173 +#endif
9174 +
9175 +/*
9176 + * PaX: decide what to do with offenders (regs->tpc = fault address)
9177 + *
9178 + * returns 1 when task should be killed
9179 + * 2 when patched PLT trampoline was detected
9180 + * 3 when unpatched PLT trampoline was detected
9181 + */
9182 +static int pax_handle_fetch_fault(struct pt_regs *regs)
9183 +{
9184 +
9185 +#ifdef CONFIG_PAX_EMUPLT
9186 + int err;
9187 +
9188 + do { /* PaX: patched PLT emulation #1 */
9189 + unsigned int sethi1, sethi2, jmpl;
9190 +
9191 + err = get_user(sethi1, (unsigned int *)regs->tpc);
9192 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9193 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9194 +
9195 + if (err)
9196 + break;
9197 +
9198 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9199 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
9200 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
9201 + {
9202 + unsigned long addr;
9203 +
9204 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9205 + addr = regs->u_regs[UREG_G1];
9206 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9207 +
9208 + if (test_thread_flag(TIF_32BIT))
9209 + addr &= 0xFFFFFFFFUL;
9210 +
9211 + regs->tpc = addr;
9212 + regs->tnpc = addr+4;
9213 + return 2;
9214 + }
9215 + } while (0);
9216 +
9217 + do { /* PaX: patched PLT emulation #2 */
9218 + unsigned int ba;
9219 +
9220 + err = get_user(ba, (unsigned int *)regs->tpc);
9221 +
9222 + if (err)
9223 + break;
9224 +
9225 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9226 + unsigned long addr;
9227 +
9228 + if ((ba & 0xFFC00000U) == 0x30800000U)
9229 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9230 + else
9231 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9232 +
9233 + if (test_thread_flag(TIF_32BIT))
9234 + addr &= 0xFFFFFFFFUL;
9235 +
9236 + regs->tpc = addr;
9237 + regs->tnpc = addr+4;
9238 + return 2;
9239 + }
9240 + } while (0);
9241 +
9242 + do { /* PaX: patched PLT emulation #3 */
9243 + unsigned int sethi, bajmpl, nop;
9244 +
9245 + err = get_user(sethi, (unsigned int *)regs->tpc);
9246 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9247 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9248 +
9249 + if (err)
9250 + break;
9251 +
9252 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9253 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9254 + nop == 0x01000000U)
9255 + {
9256 + unsigned long addr;
9257 +
9258 + addr = (sethi & 0x003FFFFFU) << 10;
9259 + regs->u_regs[UREG_G1] = addr;
9260 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9261 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9262 + else
9263 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9264 +
9265 + if (test_thread_flag(TIF_32BIT))
9266 + addr &= 0xFFFFFFFFUL;
9267 +
9268 + regs->tpc = addr;
9269 + regs->tnpc = addr+4;
9270 + return 2;
9271 + }
9272 + } while (0);
9273 +
9274 + do { /* PaX: patched PLT emulation #4 */
9275 + unsigned int sethi, mov1, call, mov2;
9276 +
9277 + err = get_user(sethi, (unsigned int *)regs->tpc);
9278 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9279 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
9280 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9281 +
9282 + if (err)
9283 + break;
9284 +
9285 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9286 + mov1 == 0x8210000FU &&
9287 + (call & 0xC0000000U) == 0x40000000U &&
9288 + mov2 == 0x9E100001U)
9289 + {
9290 + unsigned long addr;
9291 +
9292 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9293 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9294 +
9295 + if (test_thread_flag(TIF_32BIT))
9296 + addr &= 0xFFFFFFFFUL;
9297 +
9298 + regs->tpc = addr;
9299 + regs->tnpc = addr+4;
9300 + return 2;
9301 + }
9302 + } while (0);
9303 +
9304 + do { /* PaX: patched PLT emulation #5 */
9305 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9306 +
9307 + err = get_user(sethi, (unsigned int *)regs->tpc);
9308 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9309 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9310 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9311 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9312 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9313 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9314 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9315 +
9316 + if (err)
9317 + break;
9318 +
9319 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9320 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
9321 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9322 + (or1 & 0xFFFFE000U) == 0x82106000U &&
9323 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
9324 + sllx == 0x83287020U &&
9325 + jmpl == 0x81C04005U &&
9326 + nop == 0x01000000U)
9327 + {
9328 + unsigned long addr;
9329 +
9330 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9331 + regs->u_regs[UREG_G1] <<= 32;
9332 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9333 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9334 + regs->tpc = addr;
9335 + regs->tnpc = addr+4;
9336 + return 2;
9337 + }
9338 + } while (0);
9339 +
9340 + do { /* PaX: patched PLT emulation #6 */
9341 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9342 +
9343 + err = get_user(sethi, (unsigned int *)regs->tpc);
9344 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9345 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9346 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9347 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
9348 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9349 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9350 +
9351 + if (err)
9352 + break;
9353 +
9354 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9355 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
9356 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9357 + sllx == 0x83287020U &&
9358 + (or & 0xFFFFE000U) == 0x8A116000U &&
9359 + jmpl == 0x81C04005U &&
9360 + nop == 0x01000000U)
9361 + {
9362 + unsigned long addr;
9363 +
9364 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9365 + regs->u_regs[UREG_G1] <<= 32;
9366 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9367 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9368 + regs->tpc = addr;
9369 + regs->tnpc = addr+4;
9370 + return 2;
9371 + }
9372 + } while (0);
9373 +
9374 + do { /* PaX: unpatched PLT emulation step 1 */
9375 + unsigned int sethi, ba, nop;
9376 +
9377 + err = get_user(sethi, (unsigned int *)regs->tpc);
9378 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9379 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9380 +
9381 + if (err)
9382 + break;
9383 +
9384 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9385 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9386 + nop == 0x01000000U)
9387 + {
9388 + unsigned long addr;
9389 + unsigned int save, call;
9390 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9391 +
9392 + if ((ba & 0xFFC00000U) == 0x30800000U)
9393 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9394 + else
9395 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9396 +
9397 + if (test_thread_flag(TIF_32BIT))
9398 + addr &= 0xFFFFFFFFUL;
9399 +
9400 + err = get_user(save, (unsigned int *)addr);
9401 + err |= get_user(call, (unsigned int *)(addr+4));
9402 + err |= get_user(nop, (unsigned int *)(addr+8));
9403 + if (err)
9404 + break;
9405 +
9406 +#ifdef CONFIG_PAX_DLRESOLVE
9407 + if (save == 0x9DE3BFA8U &&
9408 + (call & 0xC0000000U) == 0x40000000U &&
9409 + nop == 0x01000000U)
9410 + {
9411 + struct vm_area_struct *vma;
9412 + unsigned long call_dl_resolve;
9413 +
9414 + down_read(&current->mm->mmap_sem);
9415 + call_dl_resolve = current->mm->call_dl_resolve;
9416 + up_read(&current->mm->mmap_sem);
9417 + if (likely(call_dl_resolve))
9418 + goto emulate;
9419 +
9420 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9421 +
9422 + down_write(&current->mm->mmap_sem);
9423 + if (current->mm->call_dl_resolve) {
9424 + call_dl_resolve = current->mm->call_dl_resolve;
9425 + up_write(&current->mm->mmap_sem);
9426 + if (vma)
9427 + kmem_cache_free(vm_area_cachep, vma);
9428 + goto emulate;
9429 + }
9430 +
9431 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9432 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9433 + up_write(&current->mm->mmap_sem);
9434 + if (vma)
9435 + kmem_cache_free(vm_area_cachep, vma);
9436 + return 1;
9437 + }
9438 +
9439 + if (pax_insert_vma(vma, call_dl_resolve)) {
9440 + up_write(&current->mm->mmap_sem);
9441 + kmem_cache_free(vm_area_cachep, vma);
9442 + return 1;
9443 + }
9444 +
9445 + current->mm->call_dl_resolve = call_dl_resolve;
9446 + up_write(&current->mm->mmap_sem);
9447 +
9448 +emulate:
9449 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9450 + regs->tpc = call_dl_resolve;
9451 + regs->tnpc = addr+4;
9452 + return 3;
9453 + }
9454 +#endif
9455 +
9456 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9457 + if ((save & 0xFFC00000U) == 0x05000000U &&
9458 + (call & 0xFFFFE000U) == 0x85C0A000U &&
9459 + nop == 0x01000000U)
9460 + {
9461 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9462 + regs->u_regs[UREG_G2] = addr + 4;
9463 + addr = (save & 0x003FFFFFU) << 10;
9464 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9465 +
9466 + if (test_thread_flag(TIF_32BIT))
9467 + addr &= 0xFFFFFFFFUL;
9468 +
9469 + regs->tpc = addr;
9470 + regs->tnpc = addr+4;
9471 + return 3;
9472 + }
9473 +
9474 + /* PaX: 64-bit PLT stub */
9475 + err = get_user(sethi1, (unsigned int *)addr);
9476 + err |= get_user(sethi2, (unsigned int *)(addr+4));
9477 + err |= get_user(or1, (unsigned int *)(addr+8));
9478 + err |= get_user(or2, (unsigned int *)(addr+12));
9479 + err |= get_user(sllx, (unsigned int *)(addr+16));
9480 + err |= get_user(add, (unsigned int *)(addr+20));
9481 + err |= get_user(jmpl, (unsigned int *)(addr+24));
9482 + err |= get_user(nop, (unsigned int *)(addr+28));
9483 + if (err)
9484 + break;
9485 +
9486 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9487 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9488 + (or1 & 0xFFFFE000U) == 0x88112000U &&
9489 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
9490 + sllx == 0x89293020U &&
9491 + add == 0x8A010005U &&
9492 + jmpl == 0x89C14000U &&
9493 + nop == 0x01000000U)
9494 + {
9495 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9496 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9497 + regs->u_regs[UREG_G4] <<= 32;
9498 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9499 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9500 + regs->u_regs[UREG_G4] = addr + 24;
9501 + addr = regs->u_regs[UREG_G5];
9502 + regs->tpc = addr;
9503 + regs->tnpc = addr+4;
9504 + return 3;
9505 + }
9506 + }
9507 + } while (0);
9508 +
9509 +#ifdef CONFIG_PAX_DLRESOLVE
9510 + do { /* PaX: unpatched PLT emulation step 2 */
9511 + unsigned int save, call, nop;
9512 +
9513 + err = get_user(save, (unsigned int *)(regs->tpc-4));
9514 + err |= get_user(call, (unsigned int *)regs->tpc);
9515 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9516 + if (err)
9517 + break;
9518 +
9519 + if (save == 0x9DE3BFA8U &&
9520 + (call & 0xC0000000U) == 0x40000000U &&
9521 + nop == 0x01000000U)
9522 + {
9523 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9524 +
9525 + if (test_thread_flag(TIF_32BIT))
9526 + dl_resolve &= 0xFFFFFFFFUL;
9527 +
9528 + regs->u_regs[UREG_RETPC] = regs->tpc;
9529 + regs->tpc = dl_resolve;
9530 + regs->tnpc = dl_resolve+4;
9531 + return 3;
9532 + }
9533 + } while (0);
9534 +#endif
9535 +
9536 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9537 + unsigned int sethi, ba, nop;
9538 +
9539 + err = get_user(sethi, (unsigned int *)regs->tpc);
9540 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9541 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9542 +
9543 + if (err)
9544 + break;
9545 +
9546 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9547 + (ba & 0xFFF00000U) == 0x30600000U &&
9548 + nop == 0x01000000U)
9549 + {
9550 + unsigned long addr;
9551 +
9552 + addr = (sethi & 0x003FFFFFU) << 10;
9553 + regs->u_regs[UREG_G1] = addr;
9554 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9555 +
9556 + if (test_thread_flag(TIF_32BIT))
9557 + addr &= 0xFFFFFFFFUL;
9558 +
9559 + regs->tpc = addr;
9560 + regs->tnpc = addr+4;
9561 + return 2;
9562 + }
9563 + } while (0);
9564 +
9565 +#endif
9566 +
9567 + return 1;
9568 +}
9569 +
9570 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9571 +{
9572 + unsigned long i;
9573 +
9574 + printk(KERN_ERR "PAX: bytes at PC: ");
9575 + for (i = 0; i < 8; i++) {
9576 + unsigned int c;
9577 + if (get_user(c, (unsigned int *)pc+i))
9578 + printk(KERN_CONT "???????? ");
9579 + else
9580 + printk(KERN_CONT "%08x ", c);
9581 + }
9582 + printk("\n");
9583 +}
9584 +#endif
9585 +
9586 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9587 {
9588 struct mm_struct *mm = current->mm;
9589 @@ -341,6 +804,29 @@ retry:
9590 if (!vma)
9591 goto bad_area;
9592
9593 +#ifdef CONFIG_PAX_PAGEEXEC
9594 + /* PaX: detect ITLB misses on non-exec pages */
9595 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9596 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9597 + {
9598 + if (address != regs->tpc)
9599 + goto good_area;
9600 +
9601 + up_read(&mm->mmap_sem);
9602 + switch (pax_handle_fetch_fault(regs)) {
9603 +
9604 +#ifdef CONFIG_PAX_EMUPLT
9605 + case 2:
9606 + case 3:
9607 + return;
9608 +#endif
9609 +
9610 + }
9611 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9612 + do_group_exit(SIGKILL);
9613 + }
9614 +#endif
9615 +
9616 /* Pure DTLB misses do not tell us whether the fault causing
9617 * load/store/atomic was a write or not, it only says that there
9618 * was no match. So in such a case we (carefully) read the
9619 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9620 index d2b5944..bd813f2 100644
9621 --- a/arch/sparc/mm/hugetlbpage.c
9622 +++ b/arch/sparc/mm/hugetlbpage.c
9623 @@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9624
9625 info.flags = 0;
9626 info.length = len;
9627 - info.low_limit = TASK_UNMAPPED_BASE;
9628 + info.low_limit = mm->mmap_base;
9629 info.high_limit = min(task_size, VA_EXCLUDE_START);
9630 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9631 info.align_offset = 0;
9632 @@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9633 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9634 VM_BUG_ON(addr != -ENOMEM);
9635 info.low_limit = VA_EXCLUDE_END;
9636 +
9637 +#ifdef CONFIG_PAX_RANDMMAP
9638 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9639 + info.low_limit += mm->delta_mmap;
9640 +#endif
9641 +
9642 info.high_limit = task_size;
9643 addr = vm_unmapped_area(&info);
9644 }
9645 @@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9646 VM_BUG_ON(addr != -ENOMEM);
9647 info.flags = 0;
9648 info.low_limit = TASK_UNMAPPED_BASE;
9649 +
9650 +#ifdef CONFIG_PAX_RANDMMAP
9651 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9652 + info.low_limit += mm->delta_mmap;
9653 +#endif
9654 +
9655 info.high_limit = STACK_TOP32;
9656 addr = vm_unmapped_area(&info);
9657 }
9658 @@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9659 struct mm_struct *mm = current->mm;
9660 struct vm_area_struct *vma;
9661 unsigned long task_size = TASK_SIZE;
9662 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9663
9664 if (test_thread_flag(TIF_32BIT))
9665 task_size = STACK_TOP32;
9666 @@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9667 return addr;
9668 }
9669
9670 +#ifdef CONFIG_PAX_RANDMMAP
9671 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9672 +#endif
9673 +
9674 if (addr) {
9675 addr = ALIGN(addr, HPAGE_SIZE);
9676 vma = find_vma(mm, addr);
9677 - if (task_size - len >= addr &&
9678 - (!vma || addr + len <= vma->vm_start))
9679 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9680 return addr;
9681 }
9682 if (mm->get_unmapped_area == arch_get_unmapped_area)
9683 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
9684 index f4500c6..889656c 100644
9685 --- a/arch/tile/include/asm/atomic_64.h
9686 +++ b/arch/tile/include/asm/atomic_64.h
9687 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9688
9689 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9690
9691 +#define atomic64_read_unchecked(v) atomic64_read(v)
9692 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9693 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9694 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9695 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9696 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
9697 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9698 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
9699 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9700 +
9701 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
9702 #define smp_mb__before_atomic_dec() smp_mb()
9703 #define smp_mb__after_atomic_dec() smp_mb()
9704 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
9705 index a9a5299..0fce79e 100644
9706 --- a/arch/tile/include/asm/cache.h
9707 +++ b/arch/tile/include/asm/cache.h
9708 @@ -15,11 +15,12 @@
9709 #ifndef _ASM_TILE_CACHE_H
9710 #define _ASM_TILE_CACHE_H
9711
9712 +#include <linux/const.h>
9713 #include <arch/chip.h>
9714
9715 /* bytes per L1 data cache line */
9716 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
9717 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9718 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9719
9720 /* bytes per L2 cache line */
9721 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
9722 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
9723 index 9ab078a..d6635c2 100644
9724 --- a/arch/tile/include/asm/uaccess.h
9725 +++ b/arch/tile/include/asm/uaccess.h
9726 @@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
9727 const void __user *from,
9728 unsigned long n)
9729 {
9730 - int sz = __compiletime_object_size(to);
9731 + size_t sz = __compiletime_object_size(to);
9732
9733 - if (likely(sz == -1 || sz >= n))
9734 + if (likely(sz == (size_t)-1 || sz >= n))
9735 n = _copy_from_user(to, from, n);
9736 else
9737 copy_from_user_overflow();
9738 diff --git a/arch/um/Makefile b/arch/um/Makefile
9739 index 133f7de..1d6f2f1 100644
9740 --- a/arch/um/Makefile
9741 +++ b/arch/um/Makefile
9742 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
9743 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
9744 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
9745
9746 +ifdef CONSTIFY_PLUGIN
9747 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
9748 +endif
9749 +
9750 #This will adjust *FLAGS accordingly to the platform.
9751 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
9752
9753 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
9754 index 19e1bdd..3665b77 100644
9755 --- a/arch/um/include/asm/cache.h
9756 +++ b/arch/um/include/asm/cache.h
9757 @@ -1,6 +1,7 @@
9758 #ifndef __UM_CACHE_H
9759 #define __UM_CACHE_H
9760
9761 +#include <linux/const.h>
9762
9763 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
9764 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9765 @@ -12,6 +13,6 @@
9766 # define L1_CACHE_SHIFT 5
9767 #endif
9768
9769 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9770 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9771
9772 #endif
9773 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
9774 index 2e0a6b1..a64d0f5 100644
9775 --- a/arch/um/include/asm/kmap_types.h
9776 +++ b/arch/um/include/asm/kmap_types.h
9777 @@ -8,6 +8,6 @@
9778
9779 /* No more #include "asm/arch/kmap_types.h" ! */
9780
9781 -#define KM_TYPE_NR 14
9782 +#define KM_TYPE_NR 15
9783
9784 #endif
9785 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
9786 index 5ff53d9..5850cdf 100644
9787 --- a/arch/um/include/asm/page.h
9788 +++ b/arch/um/include/asm/page.h
9789 @@ -14,6 +14,9 @@
9790 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
9791 #define PAGE_MASK (~(PAGE_SIZE-1))
9792
9793 +#define ktla_ktva(addr) (addr)
9794 +#define ktva_ktla(addr) (addr)
9795 +
9796 #ifndef __ASSEMBLY__
9797
9798 struct page;
9799 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
9800 index 0032f92..cd151e0 100644
9801 --- a/arch/um/include/asm/pgtable-3level.h
9802 +++ b/arch/um/include/asm/pgtable-3level.h
9803 @@ -58,6 +58,7 @@
9804 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
9805 #define pud_populate(mm, pud, pmd) \
9806 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
9807 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9808
9809 #ifdef CONFIG_64BIT
9810 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
9811 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
9812 index b462b13..e7a19aa 100644
9813 --- a/arch/um/kernel/process.c
9814 +++ b/arch/um/kernel/process.c
9815 @@ -386,22 +386,6 @@ int singlestepping(void * t)
9816 return 2;
9817 }
9818
9819 -/*
9820 - * Only x86 and x86_64 have an arch_align_stack().
9821 - * All other arches have "#define arch_align_stack(x) (x)"
9822 - * in their asm/system.h
9823 - * As this is included in UML from asm-um/system-generic.h,
9824 - * we can use it to behave as the subarch does.
9825 - */
9826 -#ifndef arch_align_stack
9827 -unsigned long arch_align_stack(unsigned long sp)
9828 -{
9829 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9830 - sp -= get_random_int() % 8192;
9831 - return sp & ~0xf;
9832 -}
9833 -#endif
9834 -
9835 unsigned long get_wchan(struct task_struct *p)
9836 {
9837 unsigned long stack_page, sp, ip;
9838 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
9839 index ad8f795..2c7eec6 100644
9840 --- a/arch/unicore32/include/asm/cache.h
9841 +++ b/arch/unicore32/include/asm/cache.h
9842 @@ -12,8 +12,10 @@
9843 #ifndef __UNICORE_CACHE_H__
9844 #define __UNICORE_CACHE_H__
9845
9846 -#define L1_CACHE_SHIFT (5)
9847 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9848 +#include <linux/const.h>
9849 +
9850 +#define L1_CACHE_SHIFT 5
9851 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9852
9853 /*
9854 * Memory returned by kmalloc() may be used for DMA, so we must make
9855 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
9856 index 0694d09..b58b3aa 100644
9857 --- a/arch/x86/Kconfig
9858 +++ b/arch/x86/Kconfig
9859 @@ -238,7 +238,7 @@ config X86_HT
9860
9861 config X86_32_LAZY_GS
9862 def_bool y
9863 - depends on X86_32 && !CC_STACKPROTECTOR
9864 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
9865
9866 config ARCH_HWEIGHT_CFLAGS
9867 string
9868 @@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
9869
9870 config X86_MSR
9871 tristate "/dev/cpu/*/msr - Model-specific register support"
9872 + depends on !GRKERNSEC_KMEM
9873 ---help---
9874 This device gives privileged processes access to the x86
9875 Model-Specific Registers (MSRs). It is a character device with
9876 @@ -1054,7 +1055,7 @@ choice
9877
9878 config NOHIGHMEM
9879 bool "off"
9880 - depends on !X86_NUMAQ
9881 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9882 ---help---
9883 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
9884 However, the address space of 32-bit x86 processors is only 4
9885 @@ -1091,7 +1092,7 @@ config NOHIGHMEM
9886
9887 config HIGHMEM4G
9888 bool "4GB"
9889 - depends on !X86_NUMAQ
9890 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9891 ---help---
9892 Select this if you have a 32-bit processor and between 1 and 4
9893 gigabytes of physical RAM.
9894 @@ -1145,7 +1146,7 @@ config PAGE_OFFSET
9895 hex
9896 default 0xB0000000 if VMSPLIT_3G_OPT
9897 default 0x80000000 if VMSPLIT_2G
9898 - default 0x78000000 if VMSPLIT_2G_OPT
9899 + default 0x70000000 if VMSPLIT_2G_OPT
9900 default 0x40000000 if VMSPLIT_1G
9901 default 0xC0000000
9902 depends on X86_32
9903 @@ -1542,6 +1543,7 @@ config SECCOMP
9904
9905 config CC_STACKPROTECTOR
9906 bool "Enable -fstack-protector buffer overflow detection"
9907 + depends on X86_64 || !PAX_MEMORY_UDEREF
9908 ---help---
9909 This option turns on the -fstack-protector GCC feature. This
9910 feature puts, at the beginning of functions, a canary value on
9911 @@ -1599,6 +1601,7 @@ config KEXEC_JUMP
9912 config PHYSICAL_START
9913 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
9914 default "0x1000000"
9915 + range 0x400000 0x40000000
9916 ---help---
9917 This gives the physical address where the kernel is loaded.
9918
9919 @@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
9920 config PHYSICAL_ALIGN
9921 hex "Alignment value to which kernel should be aligned" if X86_32
9922 default "0x1000000"
9923 + range 0x400000 0x1000000 if PAX_KERNEXEC
9924 range 0x2000 0x1000000
9925 ---help---
9926 This value puts the alignment restrictions on physical address
9927 @@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
9928 If unsure, say N.
9929
9930 config COMPAT_VDSO
9931 - def_bool y
9932 + def_bool n
9933 prompt "Compat VDSO support"
9934 depends on X86_32 || IA32_EMULATION
9935 + depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
9936 ---help---
9937 Map the 32-bit VDSO to the predictable old-style address too.
9938
9939 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
9940 index c026cca..14657ae 100644
9941 --- a/arch/x86/Kconfig.cpu
9942 +++ b/arch/x86/Kconfig.cpu
9943 @@ -319,7 +319,7 @@ config X86_PPRO_FENCE
9944
9945 config X86_F00F_BUG
9946 def_bool y
9947 - depends on M586MMX || M586TSC || M586 || M486
9948 + depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
9949
9950 config X86_INVD_BUG
9951 def_bool y
9952 @@ -327,7 +327,7 @@ config X86_INVD_BUG
9953
9954 config X86_ALIGNMENT_16
9955 def_bool y
9956 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
9957 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
9958
9959 config X86_INTEL_USERCOPY
9960 def_bool y
9961 @@ -373,7 +373,7 @@ config X86_CMPXCHG64
9962 # generates cmov.
9963 config X86_CMOV
9964 def_bool y
9965 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
9966 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
9967
9968 config X86_MINIMUM_CPU_FAMILY
9969 int
9970 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
9971 index b322f12..652d0d9 100644
9972 --- a/arch/x86/Kconfig.debug
9973 +++ b/arch/x86/Kconfig.debug
9974 @@ -84,7 +84,7 @@ config X86_PTDUMP
9975 config DEBUG_RODATA
9976 bool "Write protect kernel read-only data structures"
9977 default y
9978 - depends on DEBUG_KERNEL
9979 + depends on DEBUG_KERNEL && BROKEN
9980 ---help---
9981 Mark the kernel read-only data as write-protected in the pagetables,
9982 in order to catch accidental (and incorrect) writes to such const
9983 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
9984
9985 config DEBUG_SET_MODULE_RONX
9986 bool "Set loadable kernel module data as NX and text as RO"
9987 - depends on MODULES
9988 + depends on MODULES && BROKEN
9989 ---help---
9990 This option helps catch unintended modifications to loadable
9991 kernel module's text and read-only data. It also prevents execution
9992 @@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
9993
9994 config DEBUG_STRICT_USER_COPY_CHECKS
9995 bool "Strict copy size checks"
9996 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
9997 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
9998 ---help---
9999 Enabling this option turns a certain set of sanity checks for user
10000 copy operations into compile time failures.
10001 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10002 index e71fc42..7829607 100644
10003 --- a/arch/x86/Makefile
10004 +++ b/arch/x86/Makefile
10005 @@ -50,6 +50,7 @@ else
10006 UTS_MACHINE := x86_64
10007 CHECKFLAGS += -D__x86_64__ -m64
10008
10009 + biarch := $(call cc-option,-m64)
10010 KBUILD_AFLAGS += -m64
10011 KBUILD_CFLAGS += -m64
10012
10013 @@ -230,3 +231,12 @@ define archhelp
10014 echo ' FDARGS="..." arguments for the booted kernel'
10015 echo ' FDINITRD=file initrd for the booted kernel'
10016 endef
10017 +
10018 +define OLD_LD
10019 +
10020 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10021 +*** Please upgrade your binutils to 2.18 or newer
10022 +endef
10023 +
10024 +archprepare:
10025 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10026 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10027 index 379814b..add62ce 100644
10028 --- a/arch/x86/boot/Makefile
10029 +++ b/arch/x86/boot/Makefile
10030 @@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10031 $(call cc-option, -fno-stack-protector) \
10032 $(call cc-option, -mpreferred-stack-boundary=2)
10033 KBUILD_CFLAGS += $(call cc-option, -m32)
10034 +ifdef CONSTIFY_PLUGIN
10035 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10036 +endif
10037 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10038 GCOV_PROFILE := n
10039
10040 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10041 index 878e4b9..20537ab 100644
10042 --- a/arch/x86/boot/bitops.h
10043 +++ b/arch/x86/boot/bitops.h
10044 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10045 u8 v;
10046 const u32 *p = (const u32 *)addr;
10047
10048 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10049 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10050 return v;
10051 }
10052
10053 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10054
10055 static inline void set_bit(int nr, void *addr)
10056 {
10057 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10058 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10059 }
10060
10061 #endif /* BOOT_BITOPS_H */
10062 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10063 index 18997e5..83d9c67 100644
10064 --- a/arch/x86/boot/boot.h
10065 +++ b/arch/x86/boot/boot.h
10066 @@ -85,7 +85,7 @@ static inline void io_delay(void)
10067 static inline u16 ds(void)
10068 {
10069 u16 seg;
10070 - asm("movw %%ds,%0" : "=rm" (seg));
10071 + asm volatile("movw %%ds,%0" : "=rm" (seg));
10072 return seg;
10073 }
10074
10075 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10076 static inline int memcmp(const void *s1, const void *s2, size_t len)
10077 {
10078 u8 diff;
10079 - asm("repe; cmpsb; setnz %0"
10080 + asm volatile("repe; cmpsb; setnz %0"
10081 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10082 return diff;
10083 }
10084 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10085 index 8a84501..b2d165f 100644
10086 --- a/arch/x86/boot/compressed/Makefile
10087 +++ b/arch/x86/boot/compressed/Makefile
10088 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10089 KBUILD_CFLAGS += $(cflags-y)
10090 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10091 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10092 +ifdef CONSTIFY_PLUGIN
10093 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10094 +endif
10095
10096 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10097 GCOV_PROFILE := n
10098 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10099 index c205035..5853587 100644
10100 --- a/arch/x86/boot/compressed/eboot.c
10101 +++ b/arch/x86/boot/compressed/eboot.c
10102 @@ -150,7 +150,6 @@ again:
10103 *addr = max_addr;
10104 }
10105
10106 -free_pool:
10107 efi_call_phys1(sys_table->boottime->free_pool, map);
10108
10109 fail:
10110 @@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10111 if (i == map_size / desc_size)
10112 status = EFI_NOT_FOUND;
10113
10114 -free_pool:
10115 efi_call_phys1(sys_table->boottime->free_pool, map);
10116 fail:
10117 return status;
10118 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10119 index 1e3184f..0d11e2e 100644
10120 --- a/arch/x86/boot/compressed/head_32.S
10121 +++ b/arch/x86/boot/compressed/head_32.S
10122 @@ -118,7 +118,7 @@ preferred_addr:
10123 notl %eax
10124 andl %eax, %ebx
10125 #else
10126 - movl $LOAD_PHYSICAL_ADDR, %ebx
10127 + movl $____LOAD_PHYSICAL_ADDR, %ebx
10128 #endif
10129
10130 /* Target address to relocate to for decompression */
10131 @@ -204,7 +204,7 @@ relocated:
10132 * and where it was actually loaded.
10133 */
10134 movl %ebp, %ebx
10135 - subl $LOAD_PHYSICAL_ADDR, %ebx
10136 + subl $____LOAD_PHYSICAL_ADDR, %ebx
10137 jz 2f /* Nothing to be done if loaded at compiled addr. */
10138 /*
10139 * Process relocations.
10140 @@ -212,8 +212,7 @@ relocated:
10141
10142 1: subl $4, %edi
10143 movl (%edi), %ecx
10144 - testl %ecx, %ecx
10145 - jz 2f
10146 + jecxz 2f
10147 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10148 jmp 1b
10149 2:
10150 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10151 index f5d1aaa..cce11dc 100644
10152 --- a/arch/x86/boot/compressed/head_64.S
10153 +++ b/arch/x86/boot/compressed/head_64.S
10154 @@ -91,7 +91,7 @@ ENTRY(startup_32)
10155 notl %eax
10156 andl %eax, %ebx
10157 #else
10158 - movl $LOAD_PHYSICAL_ADDR, %ebx
10159 + movl $____LOAD_PHYSICAL_ADDR, %ebx
10160 #endif
10161
10162 /* Target address to relocate to for decompression */
10163 @@ -273,7 +273,7 @@ preferred_addr:
10164 notq %rax
10165 andq %rax, %rbp
10166 #else
10167 - movq $LOAD_PHYSICAL_ADDR, %rbp
10168 + movq $____LOAD_PHYSICAL_ADDR, %rbp
10169 #endif
10170
10171 /* Target address to relocate to for decompression */
10172 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10173 index 88f7ff6..ed695dd 100644
10174 --- a/arch/x86/boot/compressed/misc.c
10175 +++ b/arch/x86/boot/compressed/misc.c
10176 @@ -303,7 +303,7 @@ static void parse_elf(void *output)
10177 case PT_LOAD:
10178 #ifdef CONFIG_RELOCATABLE
10179 dest = output;
10180 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10181 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10182 #else
10183 dest = (void *)(phdr->p_paddr);
10184 #endif
10185 @@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10186 error("Destination address too large");
10187 #endif
10188 #ifndef CONFIG_RELOCATABLE
10189 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10190 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10191 error("Wrong destination address");
10192 #endif
10193
10194 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10195 index 4d3ff03..e4972ff 100644
10196 --- a/arch/x86/boot/cpucheck.c
10197 +++ b/arch/x86/boot/cpucheck.c
10198 @@ -74,7 +74,7 @@ static int has_fpu(void)
10199 u16 fcw = -1, fsw = -1;
10200 u32 cr0;
10201
10202 - asm("movl %%cr0,%0" : "=r" (cr0));
10203 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
10204 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10205 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10206 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10207 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10208 {
10209 u32 f0, f1;
10210
10211 - asm("pushfl ; "
10212 + asm volatile("pushfl ; "
10213 "pushfl ; "
10214 "popl %0 ; "
10215 "movl %0,%1 ; "
10216 @@ -115,7 +115,7 @@ static void get_flags(void)
10217 set_bit(X86_FEATURE_FPU, cpu.flags);
10218
10219 if (has_eflag(X86_EFLAGS_ID)) {
10220 - asm("cpuid"
10221 + asm volatile("cpuid"
10222 : "=a" (max_intel_level),
10223 "=b" (cpu_vendor[0]),
10224 "=d" (cpu_vendor[1]),
10225 @@ -124,7 +124,7 @@ static void get_flags(void)
10226
10227 if (max_intel_level >= 0x00000001 &&
10228 max_intel_level <= 0x0000ffff) {
10229 - asm("cpuid"
10230 + asm volatile("cpuid"
10231 : "=a" (tfms),
10232 "=c" (cpu.flags[4]),
10233 "=d" (cpu.flags[0])
10234 @@ -136,7 +136,7 @@ static void get_flags(void)
10235 cpu.model += ((tfms >> 16) & 0xf) << 4;
10236 }
10237
10238 - asm("cpuid"
10239 + asm volatile("cpuid"
10240 : "=a" (max_amd_level)
10241 : "a" (0x80000000)
10242 : "ebx", "ecx", "edx");
10243 @@ -144,7 +144,7 @@ static void get_flags(void)
10244 if (max_amd_level >= 0x80000001 &&
10245 max_amd_level <= 0x8000ffff) {
10246 u32 eax = 0x80000001;
10247 - asm("cpuid"
10248 + asm volatile("cpuid"
10249 : "+a" (eax),
10250 "=c" (cpu.flags[6]),
10251 "=d" (cpu.flags[1])
10252 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10253 u32 ecx = MSR_K7_HWCR;
10254 u32 eax, edx;
10255
10256 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10257 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10258 eax &= ~(1 << 15);
10259 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10260 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10261
10262 get_flags(); /* Make sure it really did something */
10263 err = check_flags();
10264 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10265 u32 ecx = MSR_VIA_FCR;
10266 u32 eax, edx;
10267
10268 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10269 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10270 eax |= (1<<1)|(1<<7);
10271 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10272 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10273
10274 set_bit(X86_FEATURE_CX8, cpu.flags);
10275 err = check_flags();
10276 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10277 u32 eax, edx;
10278 u32 level = 1;
10279
10280 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10281 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10282 - asm("cpuid"
10283 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10284 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10285 + asm volatile("cpuid"
10286 : "+a" (level), "=d" (cpu.flags[0])
10287 : : "ecx", "ebx");
10288 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10289 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10290
10291 err = check_flags();
10292 }
10293 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10294 index 944ce59..87ee37a 100644
10295 --- a/arch/x86/boot/header.S
10296 +++ b/arch/x86/boot/header.S
10297 @@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10298 # single linked list of
10299 # struct setup_data
10300
10301 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10302 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10303
10304 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10305 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10306 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10307 +#else
10308 #define VO_INIT_SIZE (VO__end - VO__text)
10309 +#endif
10310 #if ZO_INIT_SIZE > VO_INIT_SIZE
10311 #define INIT_SIZE ZO_INIT_SIZE
10312 #else
10313 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10314 index db75d07..8e6d0af 100644
10315 --- a/arch/x86/boot/memory.c
10316 +++ b/arch/x86/boot/memory.c
10317 @@ -19,7 +19,7 @@
10318
10319 static int detect_memory_e820(void)
10320 {
10321 - int count = 0;
10322 + unsigned int count = 0;
10323 struct biosregs ireg, oreg;
10324 struct e820entry *desc = boot_params.e820_map;
10325 static struct e820entry buf; /* static so it is zeroed */
10326 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10327 index 11e8c6e..fdbb1ed 100644
10328 --- a/arch/x86/boot/video-vesa.c
10329 +++ b/arch/x86/boot/video-vesa.c
10330 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10331
10332 boot_params.screen_info.vesapm_seg = oreg.es;
10333 boot_params.screen_info.vesapm_off = oreg.di;
10334 + boot_params.screen_info.vesapm_size = oreg.cx;
10335 }
10336
10337 /*
10338 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10339 index 43eda28..5ab5fdb 100644
10340 --- a/arch/x86/boot/video.c
10341 +++ b/arch/x86/boot/video.c
10342 @@ -96,7 +96,7 @@ static void store_mode_params(void)
10343 static unsigned int get_entry(void)
10344 {
10345 char entry_buf[4];
10346 - int i, len = 0;
10347 + unsigned int i, len = 0;
10348 int key;
10349 unsigned int v;
10350
10351 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10352 index 5b577d5..3c1fed4 100644
10353 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
10354 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10355 @@ -8,6 +8,8 @@
10356 * including this sentence is retained in full.
10357 */
10358
10359 +#include <asm/alternative-asm.h>
10360 +
10361 .extern crypto_ft_tab
10362 .extern crypto_it_tab
10363 .extern crypto_fl_tab
10364 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
10365 je B192; \
10366 leaq 32(r9),r9;
10367
10368 +#define ret pax_force_retaddr 0, 1; ret
10369 +
10370 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10371 movq r1,r2; \
10372 movq r3,r4; \
10373 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10374 index 3470624..201259d 100644
10375 --- a/arch/x86/crypto/aesni-intel_asm.S
10376 +++ b/arch/x86/crypto/aesni-intel_asm.S
10377 @@ -31,6 +31,7 @@
10378
10379 #include <linux/linkage.h>
10380 #include <asm/inst.h>
10381 +#include <asm/alternative-asm.h>
10382
10383 #ifdef __x86_64__
10384 .data
10385 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
10386 pop %r14
10387 pop %r13
10388 pop %r12
10389 + pax_force_retaddr 0, 1
10390 ret
10391 +ENDPROC(aesni_gcm_dec)
10392
10393
10394 /*****************************************************************************
10395 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
10396 pop %r14
10397 pop %r13
10398 pop %r12
10399 + pax_force_retaddr 0, 1
10400 ret
10401 +ENDPROC(aesni_gcm_enc)
10402
10403 #endif
10404
10405 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
10406 pxor %xmm1, %xmm0
10407 movaps %xmm0, (TKEYP)
10408 add $0x10, TKEYP
10409 + pax_force_retaddr_bts
10410 ret
10411
10412 .align 4
10413 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
10414 shufps $0b01001110, %xmm2, %xmm1
10415 movaps %xmm1, 0x10(TKEYP)
10416 add $0x20, TKEYP
10417 + pax_force_retaddr_bts
10418 ret
10419
10420 .align 4
10421 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
10422
10423 movaps %xmm0, (TKEYP)
10424 add $0x10, TKEYP
10425 + pax_force_retaddr_bts
10426 ret
10427
10428 .align 4
10429 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
10430 pxor %xmm1, %xmm2
10431 movaps %xmm2, (TKEYP)
10432 add $0x10, TKEYP
10433 + pax_force_retaddr_bts
10434 ret
10435
10436 /*
10437 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
10438 #ifndef __x86_64__
10439 popl KEYP
10440 #endif
10441 + pax_force_retaddr 0, 1
10442 ret
10443 +ENDPROC(aesni_set_key)
10444
10445 /*
10446 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
10447 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
10448 popl KLEN
10449 popl KEYP
10450 #endif
10451 + pax_force_retaddr 0, 1
10452 ret
10453 +ENDPROC(aesni_enc)
10454
10455 /*
10456 * _aesni_enc1: internal ABI
10457 @@ -1959,6 +1972,7 @@ _aesni_enc1:
10458 AESENC KEY STATE
10459 movaps 0x70(TKEYP), KEY
10460 AESENCLAST KEY STATE
10461 + pax_force_retaddr_bts
10462 ret
10463
10464 /*
10465 @@ -2067,6 +2081,7 @@ _aesni_enc4:
10466 AESENCLAST KEY STATE2
10467 AESENCLAST KEY STATE3
10468 AESENCLAST KEY STATE4
10469 + pax_force_retaddr_bts
10470 ret
10471
10472 /*
10473 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
10474 popl KLEN
10475 popl KEYP
10476 #endif
10477 + pax_force_retaddr 0, 1
10478 ret
10479 +ENDPROC(aesni_dec)
10480
10481 /*
10482 * _aesni_dec1: internal ABI
10483 @@ -2146,6 +2163,7 @@ _aesni_dec1:
10484 AESDEC KEY STATE
10485 movaps 0x70(TKEYP), KEY
10486 AESDECLAST KEY STATE
10487 + pax_force_retaddr_bts
10488 ret
10489
10490 /*
10491 @@ -2254,6 +2272,7 @@ _aesni_dec4:
10492 AESDECLAST KEY STATE2
10493 AESDECLAST KEY STATE3
10494 AESDECLAST KEY STATE4
10495 + pax_force_retaddr_bts
10496 ret
10497
10498 /*
10499 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
10500 popl KEYP
10501 popl LEN
10502 #endif
10503 + pax_force_retaddr 0, 1
10504 ret
10505 +ENDPROC(aesni_ecb_enc)
10506
10507 /*
10508 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10509 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
10510 popl KEYP
10511 popl LEN
10512 #endif
10513 + pax_force_retaddr 0, 1
10514 ret
10515 +ENDPROC(aesni_ecb_dec)
10516
10517 /*
10518 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10519 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
10520 popl LEN
10521 popl IVP
10522 #endif
10523 + pax_force_retaddr 0, 1
10524 ret
10525 +ENDPROC(aesni_cbc_enc)
10526
10527 /*
10528 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10529 @@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
10530 popl LEN
10531 popl IVP
10532 #endif
10533 + pax_force_retaddr 0, 1
10534 ret
10535 +ENDPROC(aesni_cbc_dec)
10536
10537 #ifdef __x86_64__
10538 .align 16
10539 @@ -2526,6 +2553,7 @@ _aesni_inc_init:
10540 mov $1, TCTR_LOW
10541 MOVQ_R64_XMM TCTR_LOW INC
10542 MOVQ_R64_XMM CTR TCTR_LOW
10543 + pax_force_retaddr_bts
10544 ret
10545
10546 /*
10547 @@ -2554,6 +2582,7 @@ _aesni_inc:
10548 .Linc_low:
10549 movaps CTR, IV
10550 PSHUFB_XMM BSWAP_MASK IV
10551 + pax_force_retaddr_bts
10552 ret
10553
10554 /*
10555 @@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
10556 .Lctr_enc_ret:
10557 movups IV, (IVP)
10558 .Lctr_enc_just_ret:
10559 + pax_force_retaddr 0, 1
10560 ret
10561 +ENDPROC(aesni_ctr_enc)
10562 #endif
10563 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10564 index 391d245..67f35c2 100644
10565 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10566 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10567 @@ -20,6 +20,8 @@
10568 *
10569 */
10570
10571 +#include <asm/alternative-asm.h>
10572 +
10573 .file "blowfish-x86_64-asm.S"
10574 .text
10575
10576 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
10577 jnz __enc_xor;
10578
10579 write_block();
10580 + pax_force_retaddr 0, 1
10581 ret;
10582 __enc_xor:
10583 xor_block();
10584 + pax_force_retaddr 0, 1
10585 ret;
10586
10587 .align 8
10588 @@ -188,6 +192,7 @@ blowfish_dec_blk:
10589
10590 movq %r11, %rbp;
10591
10592 + pax_force_retaddr 0, 1
10593 ret;
10594
10595 /**********************************************************************
10596 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
10597
10598 popq %rbx;
10599 popq %rbp;
10600 + pax_force_retaddr 0, 1
10601 ret;
10602
10603 __enc_xor4:
10604 @@ -349,6 +355,7 @@ __enc_xor4:
10605
10606 popq %rbx;
10607 popq %rbp;
10608 + pax_force_retaddr 0, 1
10609 ret;
10610
10611 .align 8
10612 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
10613 popq %rbx;
10614 popq %rbp;
10615
10616 + pax_force_retaddr 0, 1
10617 ret;
10618
10619 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
10620 index 0b33743..7a56206 100644
10621 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
10622 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
10623 @@ -20,6 +20,8 @@
10624 *
10625 */
10626
10627 +#include <asm/alternative-asm.h>
10628 +
10629 .file "camellia-x86_64-asm_64.S"
10630 .text
10631
10632 @@ -229,12 +231,14 @@ __enc_done:
10633 enc_outunpack(mov, RT1);
10634
10635 movq RRBP, %rbp;
10636 + pax_force_retaddr 0, 1
10637 ret;
10638
10639 __enc_xor:
10640 enc_outunpack(xor, RT1);
10641
10642 movq RRBP, %rbp;
10643 + pax_force_retaddr 0, 1
10644 ret;
10645
10646 .global camellia_dec_blk;
10647 @@ -275,6 +279,7 @@ __dec_rounds16:
10648 dec_outunpack();
10649
10650 movq RRBP, %rbp;
10651 + pax_force_retaddr 0, 1
10652 ret;
10653
10654 /**********************************************************************
10655 @@ -468,6 +473,7 @@ __enc2_done:
10656
10657 movq RRBP, %rbp;
10658 popq %rbx;
10659 + pax_force_retaddr 0, 1
10660 ret;
10661
10662 __enc2_xor:
10663 @@ -475,6 +481,7 @@ __enc2_xor:
10664
10665 movq RRBP, %rbp;
10666 popq %rbx;
10667 + pax_force_retaddr 0, 1
10668 ret;
10669
10670 .global camellia_dec_blk_2way;
10671 @@ -517,4 +524,5 @@ __dec2_rounds16:
10672
10673 movq RRBP, %rbp;
10674 movq RXOR, %rbx;
10675 + pax_force_retaddr 0, 1
10676 ret;
10677 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10678 index 15b00ac..2071784 100644
10679 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10680 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10681 @@ -23,6 +23,8 @@
10682 *
10683 */
10684
10685 +#include <asm/alternative-asm.h>
10686 +
10687 .file "cast5-avx-x86_64-asm_64.S"
10688
10689 .extern cast_s1
10690 @@ -281,6 +283,7 @@ __skip_enc:
10691 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10692 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10693
10694 + pax_force_retaddr 0, 1
10695 ret;
10696
10697 .align 16
10698 @@ -353,6 +356,7 @@ __dec_tail:
10699 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10700 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10701
10702 + pax_force_retaddr 0, 1
10703 ret;
10704
10705 __skip_dec:
10706 @@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
10707 vmovdqu RR4, (6*4*4)(%r11);
10708 vmovdqu RL4, (7*4*4)(%r11);
10709
10710 + pax_force_retaddr
10711 ret;
10712
10713 .align 16
10714 @@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
10715 vmovdqu RR4, (6*4*4)(%r11);
10716 vmovdqu RL4, (7*4*4)(%r11);
10717
10718 + pax_force_retaddr
10719 ret;
10720
10721 .align 16
10722 @@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
10723
10724 popq %r12;
10725
10726 + pax_force_retaddr
10727 ret;
10728
10729 .align 16
10730 @@ -555,4 +562,5 @@ cast5_ctr_16way:
10731
10732 popq %r12;
10733
10734 + pax_force_retaddr
10735 ret;
10736 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10737 index 2569d0d..637c289 100644
10738 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10739 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10740 @@ -23,6 +23,8 @@
10741 *
10742 */
10743
10744 +#include <asm/alternative-asm.h>
10745 +
10746 #include "glue_helper-asm-avx.S"
10747
10748 .file "cast6-avx-x86_64-asm_64.S"
10749 @@ -294,6 +296,7 @@ __cast6_enc_blk8:
10750 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
10751 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
10752
10753 + pax_force_retaddr 0, 1
10754 ret;
10755
10756 .align 8
10757 @@ -340,6 +343,7 @@ __cast6_dec_blk8:
10758 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
10759 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
10760
10761 + pax_force_retaddr 0, 1
10762 ret;
10763
10764 .align 8
10765 @@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
10766
10767 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10768
10769 + pax_force_retaddr
10770 ret;
10771
10772 .align 8
10773 @@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
10774
10775 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10776
10777 + pax_force_retaddr
10778 ret;
10779
10780 .align 8
10781 @@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
10782
10783 popq %r12;
10784
10785 + pax_force_retaddr
10786 ret;
10787
10788 .align 8
10789 @@ -436,4 +443,5 @@ cast6_ctr_8way:
10790
10791 popq %r12;
10792
10793 + pax_force_retaddr
10794 ret;
10795 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
10796 index 6214a9b..1f4fc9a 100644
10797 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
10798 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
10799 @@ -1,3 +1,5 @@
10800 +#include <asm/alternative-asm.h>
10801 +
10802 # enter ECRYPT_encrypt_bytes
10803 .text
10804 .p2align 5
10805 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
10806 add %r11,%rsp
10807 mov %rdi,%rax
10808 mov %rsi,%rdx
10809 + pax_force_retaddr 0, 1
10810 ret
10811 # bytesatleast65:
10812 ._bytesatleast65:
10813 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
10814 add %r11,%rsp
10815 mov %rdi,%rax
10816 mov %rsi,%rdx
10817 + pax_force_retaddr
10818 ret
10819 # enter ECRYPT_ivsetup
10820 .text
10821 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
10822 add %r11,%rsp
10823 mov %rdi,%rax
10824 mov %rsi,%rdx
10825 + pax_force_retaddr
10826 ret
10827 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10828 index 02b0e9f..cf4cf5c 100644
10829 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10830 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10831 @@ -24,6 +24,8 @@
10832 *
10833 */
10834
10835 +#include <asm/alternative-asm.h>
10836 +
10837 #include "glue_helper-asm-avx.S"
10838
10839 .file "serpent-avx-x86_64-asm_64.S"
10840 @@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
10841 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10842 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10843
10844 + pax_force_retaddr
10845 ret;
10846
10847 .align 8
10848 @@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
10849 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
10850 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
10851
10852 + pax_force_retaddr
10853 ret;
10854
10855 .align 8
10856 @@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
10857
10858 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10859
10860 + pax_force_retaddr
10861 ret;
10862
10863 .align 8
10864 @@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
10865
10866 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
10867
10868 + pax_force_retaddr
10869 ret;
10870
10871 .align 8
10872 @@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
10873
10874 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
10875
10876 + pax_force_retaddr
10877 ret;
10878
10879 .align 8
10880 @@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
10881
10882 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10883
10884 + pax_force_retaddr
10885 ret;
10886 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10887 index 3ee1ff0..cbc568b 100644
10888 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10889 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10890 @@ -24,6 +24,8 @@
10891 *
10892 */
10893
10894 +#include <asm/alternative-asm.h>
10895 +
10896 .file "serpent-sse2-x86_64-asm_64.S"
10897 .text
10898
10899 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
10900 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10901 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10902
10903 + pax_force_retaddr
10904 ret;
10905
10906 __enc_xor8:
10907 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10908 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10909
10910 + pax_force_retaddr
10911 ret;
10912
10913 .align 8
10914 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
10915 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
10916 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
10917
10918 + pax_force_retaddr
10919 ret;
10920 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
10921 index 49d6987..df66bd4 100644
10922 --- a/arch/x86/crypto/sha1_ssse3_asm.S
10923 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
10924 @@ -28,6 +28,8 @@
10925 * (at your option) any later version.
10926 */
10927
10928 +#include <asm/alternative-asm.h>
10929 +
10930 #define CTX %rdi // arg1
10931 #define BUF %rsi // arg2
10932 #define CNT %rdx // arg3
10933 @@ -104,6 +106,7 @@
10934 pop %r12
10935 pop %rbp
10936 pop %rbx
10937 + pax_force_retaddr 0, 1
10938 ret
10939
10940 .size \name, .-\name
10941 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
10942 index ebac16b..8092eb9 100644
10943 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
10944 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
10945 @@ -23,6 +23,8 @@
10946 *
10947 */
10948
10949 +#include <asm/alternative-asm.h>
10950 +
10951 #include "glue_helper-asm-avx.S"
10952
10953 .file "twofish-avx-x86_64-asm_64.S"
10954 @@ -283,6 +285,7 @@ __twofish_enc_blk8:
10955 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
10956 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
10957
10958 + pax_force_retaddr 0, 1
10959 ret;
10960
10961 .align 8
10962 @@ -324,6 +327,7 @@ __twofish_dec_blk8:
10963 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
10964 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
10965
10966 + pax_force_retaddr 0, 1
10967 ret;
10968
10969 .align 8
10970 @@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
10971
10972 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
10973
10974 + pax_force_retaddr 0, 1
10975 ret;
10976
10977 .align 8
10978 @@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
10979
10980 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10981
10982 + pax_force_retaddr 0, 1
10983 ret;
10984
10985 .align 8
10986 @@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
10987
10988 popq %r12;
10989
10990 + pax_force_retaddr 0, 1
10991 ret;
10992
10993 .align 8
10994 @@ -420,4 +427,5 @@ twofish_ctr_8way:
10995
10996 popq %r12;
10997
10998 + pax_force_retaddr 0, 1
10999 ret;
11000 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11001 index 5b012a2..36d5364 100644
11002 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11003 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11004 @@ -20,6 +20,8 @@
11005 *
11006 */
11007
11008 +#include <asm/alternative-asm.h>
11009 +
11010 .file "twofish-x86_64-asm-3way.S"
11011 .text
11012
11013 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11014 popq %r13;
11015 popq %r14;
11016 popq %r15;
11017 + pax_force_retaddr 0, 1
11018 ret;
11019
11020 __enc_xor3:
11021 @@ -271,6 +274,7 @@ __enc_xor3:
11022 popq %r13;
11023 popq %r14;
11024 popq %r15;
11025 + pax_force_retaddr 0, 1
11026 ret;
11027
11028 .global twofish_dec_blk_3way
11029 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11030 popq %r13;
11031 popq %r14;
11032 popq %r15;
11033 + pax_force_retaddr 0, 1
11034 ret;
11035
11036 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11037 index 7bcf3fc..f53832f 100644
11038 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11039 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11040 @@ -21,6 +21,7 @@
11041 .text
11042
11043 #include <asm/asm-offsets.h>
11044 +#include <asm/alternative-asm.h>
11045
11046 #define a_offset 0
11047 #define b_offset 4
11048 @@ -268,6 +269,7 @@ twofish_enc_blk:
11049
11050 popq R1
11051 movq $1,%rax
11052 + pax_force_retaddr 0, 1
11053 ret
11054
11055 twofish_dec_blk:
11056 @@ -319,4 +321,5 @@ twofish_dec_blk:
11057
11058 popq R1
11059 movq $1,%rax
11060 + pax_force_retaddr 0, 1
11061 ret
11062 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11063 index a703af1..f5b9c36 100644
11064 --- a/arch/x86/ia32/ia32_aout.c
11065 +++ b/arch/x86/ia32/ia32_aout.c
11066 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11067 unsigned long dump_start, dump_size;
11068 struct user32 dump;
11069
11070 + memset(&dump, 0, sizeof(dump));
11071 +
11072 fs = get_fs();
11073 set_fs(KERNEL_DS);
11074 has_dumped = 1;
11075 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11076 index a1daf4a..f8c4537 100644
11077 --- a/arch/x86/ia32/ia32_signal.c
11078 +++ b/arch/x86/ia32/ia32_signal.c
11079 @@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11080 sp -= frame_size;
11081 /* Align the stack pointer according to the i386 ABI,
11082 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11083 - sp = ((sp + 4) & -16ul) - 4;
11084 + sp = ((sp - 12) & -16ul) - 4;
11085 return (void __user *) sp;
11086 }
11087
11088 @@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11089 * These are actually not used anymore, but left because some
11090 * gdb versions depend on them as a marker.
11091 */
11092 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11093 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11094 } put_user_catch(err);
11095
11096 if (err)
11097 @@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11098 0xb8,
11099 __NR_ia32_rt_sigreturn,
11100 0x80cd,
11101 - 0,
11102 + 0
11103 };
11104
11105 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11106 @@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11107
11108 if (ka->sa.sa_flags & SA_RESTORER)
11109 restorer = ka->sa.sa_restorer;
11110 + else if (current->mm->context.vdso)
11111 + /* Return stub is in 32bit vsyscall page */
11112 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11113 else
11114 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11115 - rt_sigreturn);
11116 + restorer = &frame->retcode;
11117 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11118
11119 /*
11120 * Not actually used anymore, but left because some gdb
11121 * versions need it.
11122 */
11123 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11124 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11125 } put_user_catch(err);
11126
11127 err |= copy_siginfo_to_user32(&frame->info, info);
11128 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11129 index 142c4ce..19b683f 100644
11130 --- a/arch/x86/ia32/ia32entry.S
11131 +++ b/arch/x86/ia32/ia32entry.S
11132 @@ -15,8 +15,10 @@
11133 #include <asm/irqflags.h>
11134 #include <asm/asm.h>
11135 #include <asm/smap.h>
11136 +#include <asm/pgtable.h>
11137 #include <linux/linkage.h>
11138 #include <linux/err.h>
11139 +#include <asm/alternative-asm.h>
11140
11141 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11142 #include <linux/elf-em.h>
11143 @@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11144 ENDPROC(native_irq_enable_sysexit)
11145 #endif
11146
11147 + .macro pax_enter_kernel_user
11148 + pax_set_fptr_mask
11149 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11150 + call pax_enter_kernel_user
11151 +#endif
11152 + .endm
11153 +
11154 + .macro pax_exit_kernel_user
11155 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11156 + call pax_exit_kernel_user
11157 +#endif
11158 +#ifdef CONFIG_PAX_RANDKSTACK
11159 + pushq %rax
11160 + pushq %r11
11161 + call pax_randomize_kstack
11162 + popq %r11
11163 + popq %rax
11164 +#endif
11165 + .endm
11166 +
11167 +.macro pax_erase_kstack
11168 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11169 + call pax_erase_kstack
11170 +#endif
11171 +.endm
11172 +
11173 /*
11174 * 32bit SYSENTER instruction entry.
11175 *
11176 @@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11177 CFI_REGISTER rsp,rbp
11178 SWAPGS_UNSAFE_STACK
11179 movq PER_CPU_VAR(kernel_stack), %rsp
11180 - addq $(KERNEL_STACK_OFFSET),%rsp
11181 - /*
11182 - * No need to follow this irqs on/off section: the syscall
11183 - * disabled irqs, here we enable it straight after entry:
11184 - */
11185 - ENABLE_INTERRUPTS(CLBR_NONE)
11186 movl %ebp,%ebp /* zero extension */
11187 pushq_cfi $__USER32_DS
11188 /*CFI_REL_OFFSET ss,0*/
11189 @@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11190 CFI_REL_OFFSET rsp,0
11191 pushfq_cfi
11192 /*CFI_REL_OFFSET rflags,0*/
11193 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11194 - CFI_REGISTER rip,r10
11195 + orl $X86_EFLAGS_IF,(%rsp)
11196 + GET_THREAD_INFO(%r11)
11197 + movl TI_sysenter_return(%r11), %r11d
11198 + CFI_REGISTER rip,r11
11199 pushq_cfi $__USER32_CS
11200 /*CFI_REL_OFFSET cs,0*/
11201 movl %eax, %eax
11202 - pushq_cfi %r10
11203 + pushq_cfi %r11
11204 CFI_REL_OFFSET rip,0
11205 pushq_cfi %rax
11206 cld
11207 SAVE_ARGS 0,1,0
11208 + pax_enter_kernel_user
11209 +
11210 +#ifdef CONFIG_PAX_RANDKSTACK
11211 + pax_erase_kstack
11212 +#endif
11213 +
11214 + /*
11215 + * No need to follow this irqs on/off section: the syscall
11216 + * disabled irqs, here we enable it straight after entry:
11217 + */
11218 + ENABLE_INTERRUPTS(CLBR_NONE)
11219 /* no need to do an access_ok check here because rbp has been
11220 32bit zero extended */
11221 +
11222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11223 + mov $PAX_USER_SHADOW_BASE,%r11
11224 + add %r11,%rbp
11225 +#endif
11226 +
11227 ASM_STAC
11228 1: movl (%rbp),%ebp
11229 _ASM_EXTABLE(1b,ia32_badarg)
11230 ASM_CLAC
11231 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11232 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11233 + GET_THREAD_INFO(%r11)
11234 + orl $TS_COMPAT,TI_status(%r11)
11235 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11236 CFI_REMEMBER_STATE
11237 jnz sysenter_tracesys
11238 cmpq $(IA32_NR_syscalls-1),%rax
11239 @@ -162,12 +204,15 @@ sysenter_do_call:
11240 sysenter_dispatch:
11241 call *ia32_sys_call_table(,%rax,8)
11242 movq %rax,RAX-ARGOFFSET(%rsp)
11243 + GET_THREAD_INFO(%r11)
11244 DISABLE_INTERRUPTS(CLBR_NONE)
11245 TRACE_IRQS_OFF
11246 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11247 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11248 jnz sysexit_audit
11249 sysexit_from_sys_call:
11250 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11251 + pax_exit_kernel_user
11252 + pax_erase_kstack
11253 + andl $~TS_COMPAT,TI_status(%r11)
11254 /* clear IF, that popfq doesn't enable interrupts early */
11255 andl $~0x200,EFLAGS-R11(%rsp)
11256 movl RIP-R11(%rsp),%edx /* User %eip */
11257 @@ -193,6 +238,9 @@ sysexit_from_sys_call:
11258 movl %eax,%esi /* 2nd arg: syscall number */
11259 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11260 call __audit_syscall_entry
11261 +
11262 + pax_erase_kstack
11263 +
11264 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11265 cmpq $(IA32_NR_syscalls-1),%rax
11266 ja ia32_badsys
11267 @@ -204,7 +252,7 @@ sysexit_from_sys_call:
11268 .endm
11269
11270 .macro auditsys_exit exit
11271 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11272 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11273 jnz ia32_ret_from_sys_call
11274 TRACE_IRQS_ON
11275 ENABLE_INTERRUPTS(CLBR_NONE)
11276 @@ -215,11 +263,12 @@ sysexit_from_sys_call:
11277 1: setbe %al /* 1 if error, 0 if not */
11278 movzbl %al,%edi /* zero-extend that into %edi */
11279 call __audit_syscall_exit
11280 + GET_THREAD_INFO(%r11)
11281 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11282 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11283 DISABLE_INTERRUPTS(CLBR_NONE)
11284 TRACE_IRQS_OFF
11285 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11286 + testl %edi,TI_flags(%r11)
11287 jz \exit
11288 CLEAR_RREGS -ARGOFFSET
11289 jmp int_with_check
11290 @@ -237,7 +286,7 @@ sysexit_audit:
11291
11292 sysenter_tracesys:
11293 #ifdef CONFIG_AUDITSYSCALL
11294 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11295 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11296 jz sysenter_auditsys
11297 #endif
11298 SAVE_REST
11299 @@ -249,6 +298,9 @@ sysenter_tracesys:
11300 RESTORE_REST
11301 cmpq $(IA32_NR_syscalls-1),%rax
11302 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11303 +
11304 + pax_erase_kstack
11305 +
11306 jmp sysenter_do_call
11307 CFI_ENDPROC
11308 ENDPROC(ia32_sysenter_target)
11309 @@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11310 ENTRY(ia32_cstar_target)
11311 CFI_STARTPROC32 simple
11312 CFI_SIGNAL_FRAME
11313 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11314 + CFI_DEF_CFA rsp,0
11315 CFI_REGISTER rip,rcx
11316 /*CFI_REGISTER rflags,r11*/
11317 SWAPGS_UNSAFE_STACK
11318 movl %esp,%r8d
11319 CFI_REGISTER rsp,r8
11320 movq PER_CPU_VAR(kernel_stack),%rsp
11321 + SAVE_ARGS 8*6,0,0
11322 + pax_enter_kernel_user
11323 +
11324 +#ifdef CONFIG_PAX_RANDKSTACK
11325 + pax_erase_kstack
11326 +#endif
11327 +
11328 /*
11329 * No need to follow this irqs on/off section: the syscall
11330 * disabled irqs and here we enable it straight after entry:
11331 */
11332 ENABLE_INTERRUPTS(CLBR_NONE)
11333 - SAVE_ARGS 8,0,0
11334 movl %eax,%eax /* zero extension */
11335 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11336 movq %rcx,RIP-ARGOFFSET(%rsp)
11337 @@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11338 /* no need to do an access_ok check here because r8 has been
11339 32bit zero extended */
11340 /* hardware stack frame is complete now */
11341 +
11342 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11343 + mov $PAX_USER_SHADOW_BASE,%r11
11344 + add %r11,%r8
11345 +#endif
11346 +
11347 ASM_STAC
11348 1: movl (%r8),%r9d
11349 _ASM_EXTABLE(1b,ia32_badarg)
11350 ASM_CLAC
11351 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11352 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11353 + GET_THREAD_INFO(%r11)
11354 + orl $TS_COMPAT,TI_status(%r11)
11355 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11356 CFI_REMEMBER_STATE
11357 jnz cstar_tracesys
11358 cmpq $IA32_NR_syscalls-1,%rax
11359 @@ -319,12 +384,15 @@ cstar_do_call:
11360 cstar_dispatch:
11361 call *ia32_sys_call_table(,%rax,8)
11362 movq %rax,RAX-ARGOFFSET(%rsp)
11363 + GET_THREAD_INFO(%r11)
11364 DISABLE_INTERRUPTS(CLBR_NONE)
11365 TRACE_IRQS_OFF
11366 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11367 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11368 jnz sysretl_audit
11369 sysretl_from_sys_call:
11370 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11371 + pax_exit_kernel_user
11372 + pax_erase_kstack
11373 + andl $~TS_COMPAT,TI_status(%r11)
11374 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11375 movl RIP-ARGOFFSET(%rsp),%ecx
11376 CFI_REGISTER rip,rcx
11377 @@ -352,7 +420,7 @@ sysretl_audit:
11378
11379 cstar_tracesys:
11380 #ifdef CONFIG_AUDITSYSCALL
11381 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11382 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11383 jz cstar_auditsys
11384 #endif
11385 xchgl %r9d,%ebp
11386 @@ -366,6 +434,9 @@ cstar_tracesys:
11387 xchgl %ebp,%r9d
11388 cmpq $(IA32_NR_syscalls-1),%rax
11389 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11390 +
11391 + pax_erase_kstack
11392 +
11393 jmp cstar_do_call
11394 END(ia32_cstar_target)
11395
11396 @@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11397 CFI_REL_OFFSET rip,RIP-RIP
11398 PARAVIRT_ADJUST_EXCEPTION_FRAME
11399 SWAPGS
11400 - /*
11401 - * No need to follow this irqs on/off section: the syscall
11402 - * disabled irqs and here we enable it straight after entry:
11403 - */
11404 - ENABLE_INTERRUPTS(CLBR_NONE)
11405 movl %eax,%eax
11406 pushq_cfi %rax
11407 cld
11408 /* note the registers are not zero extended to the sf.
11409 this could be a problem. */
11410 SAVE_ARGS 0,1,0
11411 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11412 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11413 + pax_enter_kernel_user
11414 +
11415 +#ifdef CONFIG_PAX_RANDKSTACK
11416 + pax_erase_kstack
11417 +#endif
11418 +
11419 + /*
11420 + * No need to follow this irqs on/off section: the syscall
11421 + * disabled irqs and here we enable it straight after entry:
11422 + */
11423 + ENABLE_INTERRUPTS(CLBR_NONE)
11424 + GET_THREAD_INFO(%r11)
11425 + orl $TS_COMPAT,TI_status(%r11)
11426 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11427 jnz ia32_tracesys
11428 cmpq $(IA32_NR_syscalls-1),%rax
11429 ja ia32_badsys
11430 @@ -442,6 +520,9 @@ ia32_tracesys:
11431 RESTORE_REST
11432 cmpq $(IA32_NR_syscalls-1),%rax
11433 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11434 +
11435 + pax_erase_kstack
11436 +
11437 jmp ia32_do_call
11438 END(ia32_syscall)
11439
11440 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11441 index d0b689b..34be51d 100644
11442 --- a/arch/x86/ia32/sys_ia32.c
11443 +++ b/arch/x86/ia32/sys_ia32.c
11444 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11445 */
11446 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11447 {
11448 - typeof(ubuf->st_uid) uid = 0;
11449 - typeof(ubuf->st_gid) gid = 0;
11450 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
11451 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
11452 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11453 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11454 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11455 @@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
11456 mm_segment_t old_fs = get_fs();
11457
11458 set_fs(KERNEL_DS);
11459 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
11460 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
11461 set_fs(old_fs);
11462 if (put_compat_timespec(&t, interval))
11463 return -EFAULT;
11464 @@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
11465 mm_segment_t old_fs = get_fs();
11466
11467 set_fs(KERNEL_DS);
11468 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
11469 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
11470 set_fs(old_fs);
11471 if (!ret) {
11472 switch (_NSIG_WORDS) {
11473 @@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
11474 if (copy_siginfo_from_user32(&info, uinfo))
11475 return -EFAULT;
11476 set_fs(KERNEL_DS);
11477 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
11478 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
11479 set_fs(old_fs);
11480 return ret;
11481 }
11482 @@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
11483 return -EFAULT;
11484
11485 set_fs(KERNEL_DS);
11486 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
11487 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
11488 count);
11489 set_fs(old_fs);
11490
11491 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11492 index 372231c..a5aa1a1 100644
11493 --- a/arch/x86/include/asm/alternative-asm.h
11494 +++ b/arch/x86/include/asm/alternative-asm.h
11495 @@ -18,6 +18,45 @@
11496 .endm
11497 #endif
11498
11499 +#ifdef KERNEXEC_PLUGIN
11500 + .macro pax_force_retaddr_bts rip=0
11501 + btsq $63,\rip(%rsp)
11502 + .endm
11503 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11504 + .macro pax_force_retaddr rip=0, reload=0
11505 + btsq $63,\rip(%rsp)
11506 + .endm
11507 + .macro pax_force_fptr ptr
11508 + btsq $63,\ptr
11509 + .endm
11510 + .macro pax_set_fptr_mask
11511 + .endm
11512 +#endif
11513 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11514 + .macro pax_force_retaddr rip=0, reload=0
11515 + .if \reload
11516 + pax_set_fptr_mask
11517 + .endif
11518 + orq %r10,\rip(%rsp)
11519 + .endm
11520 + .macro pax_force_fptr ptr
11521 + orq %r10,\ptr
11522 + .endm
11523 + .macro pax_set_fptr_mask
11524 + movabs $0x8000000000000000,%r10
11525 + .endm
11526 +#endif
11527 +#else
11528 + .macro pax_force_retaddr rip=0, reload=0
11529 + .endm
11530 + .macro pax_force_fptr ptr
11531 + .endm
11532 + .macro pax_force_retaddr_bts rip=0
11533 + .endm
11534 + .macro pax_set_fptr_mask
11535 + .endm
11536 +#endif
11537 +
11538 .macro altinstruction_entry orig alt feature orig_len alt_len
11539 .long \orig - .
11540 .long \alt - .
11541 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11542 index 58ed6d9..f1cbe58 100644
11543 --- a/arch/x86/include/asm/alternative.h
11544 +++ b/arch/x86/include/asm/alternative.h
11545 @@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11546 ".pushsection .discard,\"aw\",@progbits\n" \
11547 DISCARD_ENTRY(1) \
11548 ".popsection\n" \
11549 - ".pushsection .altinstr_replacement, \"ax\"\n" \
11550 + ".pushsection .altinstr_replacement, \"a\"\n" \
11551 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11552 ".popsection"
11553
11554 @@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11555 DISCARD_ENTRY(1) \
11556 DISCARD_ENTRY(2) \
11557 ".popsection\n" \
11558 - ".pushsection .altinstr_replacement, \"ax\"\n" \
11559 + ".pushsection .altinstr_replacement, \"a\"\n" \
11560 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11561 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11562 ".popsection"
11563 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11564 index 3388034..050f0b9 100644
11565 --- a/arch/x86/include/asm/apic.h
11566 +++ b/arch/x86/include/asm/apic.h
11567 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11568
11569 #ifdef CONFIG_X86_LOCAL_APIC
11570
11571 -extern unsigned int apic_verbosity;
11572 +extern int apic_verbosity;
11573 extern int local_apic_timer_c2_ok;
11574
11575 extern int disable_apic;
11576 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11577 index 20370c6..a2eb9b0 100644
11578 --- a/arch/x86/include/asm/apm.h
11579 +++ b/arch/x86/include/asm/apm.h
11580 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11581 __asm__ __volatile__(APM_DO_ZERO_SEGS
11582 "pushl %%edi\n\t"
11583 "pushl %%ebp\n\t"
11584 - "lcall *%%cs:apm_bios_entry\n\t"
11585 + "lcall *%%ss:apm_bios_entry\n\t"
11586 "setc %%al\n\t"
11587 "popl %%ebp\n\t"
11588 "popl %%edi\n\t"
11589 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11590 __asm__ __volatile__(APM_DO_ZERO_SEGS
11591 "pushl %%edi\n\t"
11592 "pushl %%ebp\n\t"
11593 - "lcall *%%cs:apm_bios_entry\n\t"
11594 + "lcall *%%ss:apm_bios_entry\n\t"
11595 "setc %%bl\n\t"
11596 "popl %%ebp\n\t"
11597 "popl %%edi\n\t"
11598 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11599 index 722aa3b..3a0bb27 100644
11600 --- a/arch/x86/include/asm/atomic.h
11601 +++ b/arch/x86/include/asm/atomic.h
11602 @@ -22,7 +22,18 @@
11603 */
11604 static inline int atomic_read(const atomic_t *v)
11605 {
11606 - return (*(volatile int *)&(v)->counter);
11607 + return (*(volatile const int *)&(v)->counter);
11608 +}
11609 +
11610 +/**
11611 + * atomic_read_unchecked - read atomic variable
11612 + * @v: pointer of type atomic_unchecked_t
11613 + *
11614 + * Atomically reads the value of @v.
11615 + */
11616 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
11617 +{
11618 + return (*(volatile const int *)&(v)->counter);
11619 }
11620
11621 /**
11622 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
11623 }
11624
11625 /**
11626 + * atomic_set_unchecked - set atomic variable
11627 + * @v: pointer of type atomic_unchecked_t
11628 + * @i: required value
11629 + *
11630 + * Atomically sets the value of @v to @i.
11631 + */
11632 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
11633 +{
11634 + v->counter = i;
11635 +}
11636 +
11637 +/**
11638 * atomic_add - add integer to atomic variable
11639 * @i: integer value to add
11640 * @v: pointer of type atomic_t
11641 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
11642 */
11643 static inline void atomic_add(int i, atomic_t *v)
11644 {
11645 - asm volatile(LOCK_PREFIX "addl %1,%0"
11646 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
11647 +
11648 +#ifdef CONFIG_PAX_REFCOUNT
11649 + "jno 0f\n"
11650 + LOCK_PREFIX "subl %1,%0\n"
11651 + "int $4\n0:\n"
11652 + _ASM_EXTABLE(0b, 0b)
11653 +#endif
11654 +
11655 + : "+m" (v->counter)
11656 + : "ir" (i));
11657 +}
11658 +
11659 +/**
11660 + * atomic_add_unchecked - add integer to atomic variable
11661 + * @i: integer value to add
11662 + * @v: pointer of type atomic_unchecked_t
11663 + *
11664 + * Atomically adds @i to @v.
11665 + */
11666 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
11667 +{
11668 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
11669 : "+m" (v->counter)
11670 : "ir" (i));
11671 }
11672 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
11673 */
11674 static inline void atomic_sub(int i, atomic_t *v)
11675 {
11676 - asm volatile(LOCK_PREFIX "subl %1,%0"
11677 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
11678 +
11679 +#ifdef CONFIG_PAX_REFCOUNT
11680 + "jno 0f\n"
11681 + LOCK_PREFIX "addl %1,%0\n"
11682 + "int $4\n0:\n"
11683 + _ASM_EXTABLE(0b, 0b)
11684 +#endif
11685 +
11686 + : "+m" (v->counter)
11687 + : "ir" (i));
11688 +}
11689 +
11690 +/**
11691 + * atomic_sub_unchecked - subtract integer from atomic variable
11692 + * @i: integer value to subtract
11693 + * @v: pointer of type atomic_unchecked_t
11694 + *
11695 + * Atomically subtracts @i from @v.
11696 + */
11697 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
11698 +{
11699 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
11700 : "+m" (v->counter)
11701 : "ir" (i));
11702 }
11703 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11704 {
11705 unsigned char c;
11706
11707 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
11708 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
11709 +
11710 +#ifdef CONFIG_PAX_REFCOUNT
11711 + "jno 0f\n"
11712 + LOCK_PREFIX "addl %2,%0\n"
11713 + "int $4\n0:\n"
11714 + _ASM_EXTABLE(0b, 0b)
11715 +#endif
11716 +
11717 + "sete %1\n"
11718 : "+m" (v->counter), "=qm" (c)
11719 : "ir" (i) : "memory");
11720 return c;
11721 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11722 */
11723 static inline void atomic_inc(atomic_t *v)
11724 {
11725 - asm volatile(LOCK_PREFIX "incl %0"
11726 + asm volatile(LOCK_PREFIX "incl %0\n"
11727 +
11728 +#ifdef CONFIG_PAX_REFCOUNT
11729 + "jno 0f\n"
11730 + LOCK_PREFIX "decl %0\n"
11731 + "int $4\n0:\n"
11732 + _ASM_EXTABLE(0b, 0b)
11733 +#endif
11734 +
11735 + : "+m" (v->counter));
11736 +}
11737 +
11738 +/**
11739 + * atomic_inc_unchecked - increment atomic variable
11740 + * @v: pointer of type atomic_unchecked_t
11741 + *
11742 + * Atomically increments @v by 1.
11743 + */
11744 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
11745 +{
11746 + asm volatile(LOCK_PREFIX "incl %0\n"
11747 : "+m" (v->counter));
11748 }
11749
11750 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
11751 */
11752 static inline void atomic_dec(atomic_t *v)
11753 {
11754 - asm volatile(LOCK_PREFIX "decl %0"
11755 + asm volatile(LOCK_PREFIX "decl %0\n"
11756 +
11757 +#ifdef CONFIG_PAX_REFCOUNT
11758 + "jno 0f\n"
11759 + LOCK_PREFIX "incl %0\n"
11760 + "int $4\n0:\n"
11761 + _ASM_EXTABLE(0b, 0b)
11762 +#endif
11763 +
11764 + : "+m" (v->counter));
11765 +}
11766 +
11767 +/**
11768 + * atomic_dec_unchecked - decrement atomic variable
11769 + * @v: pointer of type atomic_unchecked_t
11770 + *
11771 + * Atomically decrements @v by 1.
11772 + */
11773 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
11774 +{
11775 + asm volatile(LOCK_PREFIX "decl %0\n"
11776 : "+m" (v->counter));
11777 }
11778
11779 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
11780 {
11781 unsigned char c;
11782
11783 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
11784 + asm volatile(LOCK_PREFIX "decl %0\n"
11785 +
11786 +#ifdef CONFIG_PAX_REFCOUNT
11787 + "jno 0f\n"
11788 + LOCK_PREFIX "incl %0\n"
11789 + "int $4\n0:\n"
11790 + _ASM_EXTABLE(0b, 0b)
11791 +#endif
11792 +
11793 + "sete %1\n"
11794 : "+m" (v->counter), "=qm" (c)
11795 : : "memory");
11796 return c != 0;
11797 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
11798 {
11799 unsigned char c;
11800
11801 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
11802 + asm volatile(LOCK_PREFIX "incl %0\n"
11803 +
11804 +#ifdef CONFIG_PAX_REFCOUNT
11805 + "jno 0f\n"
11806 + LOCK_PREFIX "decl %0\n"
11807 + "int $4\n0:\n"
11808 + _ASM_EXTABLE(0b, 0b)
11809 +#endif
11810 +
11811 + "sete %1\n"
11812 + : "+m" (v->counter), "=qm" (c)
11813 + : : "memory");
11814 + return c != 0;
11815 +}
11816 +
11817 +/**
11818 + * atomic_inc_and_test_unchecked - increment and test
11819 + * @v: pointer of type atomic_unchecked_t
11820 + *
11821 + * Atomically increments @v by 1
11822 + * and returns true if the result is zero, or false for all
11823 + * other cases.
11824 + */
11825 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
11826 +{
11827 + unsigned char c;
11828 +
11829 + asm volatile(LOCK_PREFIX "incl %0\n"
11830 + "sete %1\n"
11831 : "+m" (v->counter), "=qm" (c)
11832 : : "memory");
11833 return c != 0;
11834 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
11835 {
11836 unsigned char c;
11837
11838 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
11839 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
11840 +
11841 +#ifdef CONFIG_PAX_REFCOUNT
11842 + "jno 0f\n"
11843 + LOCK_PREFIX "subl %2,%0\n"
11844 + "int $4\n0:\n"
11845 + _ASM_EXTABLE(0b, 0b)
11846 +#endif
11847 +
11848 + "sets %1\n"
11849 : "+m" (v->counter), "=qm" (c)
11850 : "ir" (i) : "memory");
11851 return c;
11852 @@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
11853 */
11854 static inline int atomic_add_return(int i, atomic_t *v)
11855 {
11856 + return i + xadd_check_overflow(&v->counter, i);
11857 +}
11858 +
11859 +/**
11860 + * atomic_add_return_unchecked - add integer and return
11861 + * @i: integer value to add
11862 + * @v: pointer of type atomic_unchecked_t
11863 + *
11864 + * Atomically adds @i to @v and returns @i + @v
11865 + */
11866 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
11867 +{
11868 return i + xadd(&v->counter, i);
11869 }
11870
11871 @@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
11872 }
11873
11874 #define atomic_inc_return(v) (atomic_add_return(1, v))
11875 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
11876 +{
11877 + return atomic_add_return_unchecked(1, v);
11878 +}
11879 #define atomic_dec_return(v) (atomic_sub_return(1, v))
11880
11881 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
11882 @@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
11883 return cmpxchg(&v->counter, old, new);
11884 }
11885
11886 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
11887 +{
11888 + return cmpxchg(&v->counter, old, new);
11889 +}
11890 +
11891 static inline int atomic_xchg(atomic_t *v, int new)
11892 {
11893 return xchg(&v->counter, new);
11894 }
11895
11896 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
11897 +{
11898 + return xchg(&v->counter, new);
11899 +}
11900 +
11901 /**
11902 * __atomic_add_unless - add unless the number is already a given value
11903 * @v: pointer of type atomic_t
11904 @@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
11905 */
11906 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
11907 {
11908 - int c, old;
11909 + int c, old, new;
11910 c = atomic_read(v);
11911 for (;;) {
11912 - if (unlikely(c == (u)))
11913 + if (unlikely(c == u))
11914 break;
11915 - old = atomic_cmpxchg((v), c, c + (a));
11916 +
11917 + asm volatile("addl %2,%0\n"
11918 +
11919 +#ifdef CONFIG_PAX_REFCOUNT
11920 + "jno 0f\n"
11921 + "subl %2,%0\n"
11922 + "int $4\n0:\n"
11923 + _ASM_EXTABLE(0b, 0b)
11924 +#endif
11925 +
11926 + : "=r" (new)
11927 + : "0" (c), "ir" (a));
11928 +
11929 + old = atomic_cmpxchg(v, c, new);
11930 if (likely(old == c))
11931 break;
11932 c = old;
11933 @@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
11934 }
11935
11936 /**
11937 + * atomic_inc_not_zero_hint - increment if not null
11938 + * @v: pointer of type atomic_t
11939 + * @hint: probable value of the atomic before the increment
11940 + *
11941 + * This version of atomic_inc_not_zero() gives a hint of probable
11942 + * value of the atomic. This helps processor to not read the memory
11943 + * before doing the atomic read/modify/write cycle, lowering
11944 + * number of bus transactions on some arches.
11945 + *
11946 + * Returns: 0 if increment was not done, 1 otherwise.
11947 + */
11948 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
11949 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
11950 +{
11951 + int val, c = hint, new;
11952 +
11953 + /* sanity test, should be removed by compiler if hint is a constant */
11954 + if (!hint)
11955 + return __atomic_add_unless(v, 1, 0);
11956 +
11957 + do {
11958 + asm volatile("incl %0\n"
11959 +
11960 +#ifdef CONFIG_PAX_REFCOUNT
11961 + "jno 0f\n"
11962 + "decl %0\n"
11963 + "int $4\n0:\n"
11964 + _ASM_EXTABLE(0b, 0b)
11965 +#endif
11966 +
11967 + : "=r" (new)
11968 + : "0" (c));
11969 +
11970 + val = atomic_cmpxchg(v, c, new);
11971 + if (val == c)
11972 + return 1;
11973 + c = val;
11974 + } while (c);
11975 +
11976 + return 0;
11977 +}
11978 +
11979 +/**
11980 * atomic_inc_short - increment of a short integer
11981 * @v: pointer to type int
11982 *
11983 @@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
11984 #endif
11985
11986 /* These are x86-specific, used by some header files */
11987 -#define atomic_clear_mask(mask, addr) \
11988 - asm volatile(LOCK_PREFIX "andl %0,%1" \
11989 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
11990 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
11991 +{
11992 + asm volatile(LOCK_PREFIX "andl %1,%0"
11993 + : "+m" (v->counter)
11994 + : "r" (~(mask))
11995 + : "memory");
11996 +}
11997
11998 -#define atomic_set_mask(mask, addr) \
11999 - asm volatile(LOCK_PREFIX "orl %0,%1" \
12000 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12001 - : "memory")
12002 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12003 +{
12004 + asm volatile(LOCK_PREFIX "andl %1,%0"
12005 + : "+m" (v->counter)
12006 + : "r" (~(mask))
12007 + : "memory");
12008 +}
12009 +
12010 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12011 +{
12012 + asm volatile(LOCK_PREFIX "orl %1,%0"
12013 + : "+m" (v->counter)
12014 + : "r" (mask)
12015 + : "memory");
12016 +}
12017 +
12018 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12019 +{
12020 + asm volatile(LOCK_PREFIX "orl %1,%0"
12021 + : "+m" (v->counter)
12022 + : "r" (mask)
12023 + : "memory");
12024 +}
12025
12026 /* Atomic operations are already serializing on x86 */
12027 #define smp_mb__before_atomic_dec() barrier()
12028 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12029 index b154de7..aadebd8 100644
12030 --- a/arch/x86/include/asm/atomic64_32.h
12031 +++ b/arch/x86/include/asm/atomic64_32.h
12032 @@ -12,6 +12,14 @@ typedef struct {
12033 u64 __aligned(8) counter;
12034 } atomic64_t;
12035
12036 +#ifdef CONFIG_PAX_REFCOUNT
12037 +typedef struct {
12038 + u64 __aligned(8) counter;
12039 +} atomic64_unchecked_t;
12040 +#else
12041 +typedef atomic64_t atomic64_unchecked_t;
12042 +#endif
12043 +
12044 #define ATOMIC64_INIT(val) { (val) }
12045
12046 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12047 @@ -37,21 +45,31 @@ typedef struct {
12048 ATOMIC64_DECL_ONE(sym##_386)
12049
12050 ATOMIC64_DECL_ONE(add_386);
12051 +ATOMIC64_DECL_ONE(add_unchecked_386);
12052 ATOMIC64_DECL_ONE(sub_386);
12053 +ATOMIC64_DECL_ONE(sub_unchecked_386);
12054 ATOMIC64_DECL_ONE(inc_386);
12055 +ATOMIC64_DECL_ONE(inc_unchecked_386);
12056 ATOMIC64_DECL_ONE(dec_386);
12057 +ATOMIC64_DECL_ONE(dec_unchecked_386);
12058 #endif
12059
12060 #define alternative_atomic64(f, out, in...) \
12061 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12062
12063 ATOMIC64_DECL(read);
12064 +ATOMIC64_DECL(read_unchecked);
12065 ATOMIC64_DECL(set);
12066 +ATOMIC64_DECL(set_unchecked);
12067 ATOMIC64_DECL(xchg);
12068 ATOMIC64_DECL(add_return);
12069 +ATOMIC64_DECL(add_return_unchecked);
12070 ATOMIC64_DECL(sub_return);
12071 +ATOMIC64_DECL(sub_return_unchecked);
12072 ATOMIC64_DECL(inc_return);
12073 +ATOMIC64_DECL(inc_return_unchecked);
12074 ATOMIC64_DECL(dec_return);
12075 +ATOMIC64_DECL(dec_return_unchecked);
12076 ATOMIC64_DECL(dec_if_positive);
12077 ATOMIC64_DECL(inc_not_zero);
12078 ATOMIC64_DECL(add_unless);
12079 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12080 }
12081
12082 /**
12083 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12084 + * @p: pointer to type atomic64_unchecked_t
12085 + * @o: expected value
12086 + * @n: new value
12087 + *
12088 + * Atomically sets @v to @n if it was equal to @o and returns
12089 + * the old value.
12090 + */
12091 +
12092 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12093 +{
12094 + return cmpxchg64(&v->counter, o, n);
12095 +}
12096 +
12097 +/**
12098 * atomic64_xchg - xchg atomic64 variable
12099 * @v: pointer to type atomic64_t
12100 * @n: value to assign
12101 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12102 }
12103
12104 /**
12105 + * atomic64_set_unchecked - set atomic64 variable
12106 + * @v: pointer to type atomic64_unchecked_t
12107 + * @n: value to assign
12108 + *
12109 + * Atomically sets the value of @v to @n.
12110 + */
12111 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12112 +{
12113 + unsigned high = (unsigned)(i >> 32);
12114 + unsigned low = (unsigned)i;
12115 + alternative_atomic64(set, /* no output */,
12116 + "S" (v), "b" (low), "c" (high)
12117 + : "eax", "edx", "memory");
12118 +}
12119 +
12120 +/**
12121 * atomic64_read - read atomic64 variable
12122 * @v: pointer to type atomic64_t
12123 *
12124 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12125 }
12126
12127 /**
12128 + * atomic64_read_unchecked - read atomic64 variable
12129 + * @v: pointer to type atomic64_unchecked_t
12130 + *
12131 + * Atomically reads the value of @v and returns it.
12132 + */
12133 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12134 +{
12135 + long long r;
12136 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12137 + return r;
12138 + }
12139 +
12140 +/**
12141 * atomic64_add_return - add and return
12142 * @i: integer value to add
12143 * @v: pointer to type atomic64_t
12144 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12145 return i;
12146 }
12147
12148 +/**
12149 + * atomic64_add_return_unchecked - add and return
12150 + * @i: integer value to add
12151 + * @v: pointer to type atomic64_unchecked_t
12152 + *
12153 + * Atomically adds @i to @v and returns @i + *@v
12154 + */
12155 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12156 +{
12157 + alternative_atomic64(add_return_unchecked,
12158 + ASM_OUTPUT2("+A" (i), "+c" (v)),
12159 + ASM_NO_INPUT_CLOBBER("memory"));
12160 + return i;
12161 +}
12162 +
12163 /*
12164 * Other variants with different arithmetic operators:
12165 */
12166 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12167 return a;
12168 }
12169
12170 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12171 +{
12172 + long long a;
12173 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
12174 + "S" (v) : "memory", "ecx");
12175 + return a;
12176 +}
12177 +
12178 static inline long long atomic64_dec_return(atomic64_t *v)
12179 {
12180 long long a;
12181 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12182 }
12183
12184 /**
12185 + * atomic64_add_unchecked - add integer to atomic64 variable
12186 + * @i: integer value to add
12187 + * @v: pointer to type atomic64_unchecked_t
12188 + *
12189 + * Atomically adds @i to @v.
12190 + */
12191 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12192 +{
12193 + __alternative_atomic64(add_unchecked, add_return_unchecked,
12194 + ASM_OUTPUT2("+A" (i), "+c" (v)),
12195 + ASM_NO_INPUT_CLOBBER("memory"));
12196 + return i;
12197 +}
12198 +
12199 +/**
12200 * atomic64_sub - subtract the atomic64 variable
12201 * @i: integer value to subtract
12202 * @v: pointer to type atomic64_t
12203 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12204 index 0e1cbfc..5623683 100644
12205 --- a/arch/x86/include/asm/atomic64_64.h
12206 +++ b/arch/x86/include/asm/atomic64_64.h
12207 @@ -18,7 +18,19 @@
12208 */
12209 static inline long atomic64_read(const atomic64_t *v)
12210 {
12211 - return (*(volatile long *)&(v)->counter);
12212 + return (*(volatile const long *)&(v)->counter);
12213 +}
12214 +
12215 +/**
12216 + * atomic64_read_unchecked - read atomic64 variable
12217 + * @v: pointer of type atomic64_unchecked_t
12218 + *
12219 + * Atomically reads the value of @v.
12220 + * Doesn't imply a read memory barrier.
12221 + */
12222 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12223 +{
12224 + return (*(volatile const long *)&(v)->counter);
12225 }
12226
12227 /**
12228 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12229 }
12230
12231 /**
12232 + * atomic64_set_unchecked - set atomic64 variable
12233 + * @v: pointer to type atomic64_unchecked_t
12234 + * @i: required value
12235 + *
12236 + * Atomically sets the value of @v to @i.
12237 + */
12238 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12239 +{
12240 + v->counter = i;
12241 +}
12242 +
12243 +/**
12244 * atomic64_add - add integer to atomic64 variable
12245 * @i: integer value to add
12246 * @v: pointer to type atomic64_t
12247 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12248 */
12249 static inline void atomic64_add(long i, atomic64_t *v)
12250 {
12251 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
12252 +
12253 +#ifdef CONFIG_PAX_REFCOUNT
12254 + "jno 0f\n"
12255 + LOCK_PREFIX "subq %1,%0\n"
12256 + "int $4\n0:\n"
12257 + _ASM_EXTABLE(0b, 0b)
12258 +#endif
12259 +
12260 + : "=m" (v->counter)
12261 + : "er" (i), "m" (v->counter));
12262 +}
12263 +
12264 +/**
12265 + * atomic64_add_unchecked - add integer to atomic64 variable
12266 + * @i: integer value to add
12267 + * @v: pointer to type atomic64_unchecked_t
12268 + *
12269 + * Atomically adds @i to @v.
12270 + */
12271 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12272 +{
12273 asm volatile(LOCK_PREFIX "addq %1,%0"
12274 : "=m" (v->counter)
12275 : "er" (i), "m" (v->counter));
12276 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12277 */
12278 static inline void atomic64_sub(long i, atomic64_t *v)
12279 {
12280 - asm volatile(LOCK_PREFIX "subq %1,%0"
12281 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
12282 +
12283 +#ifdef CONFIG_PAX_REFCOUNT
12284 + "jno 0f\n"
12285 + LOCK_PREFIX "addq %1,%0\n"
12286 + "int $4\n0:\n"
12287 + _ASM_EXTABLE(0b, 0b)
12288 +#endif
12289 +
12290 + : "=m" (v->counter)
12291 + : "er" (i), "m" (v->counter));
12292 +}
12293 +
12294 +/**
12295 + * atomic64_sub_unchecked - subtract the atomic64 variable
12296 + * @i: integer value to subtract
12297 + * @v: pointer to type atomic64_unchecked_t
12298 + *
12299 + * Atomically subtracts @i from @v.
12300 + */
12301 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12302 +{
12303 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
12304 : "=m" (v->counter)
12305 : "er" (i), "m" (v->counter));
12306 }
12307 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12308 {
12309 unsigned char c;
12310
12311 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12312 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
12313 +
12314 +#ifdef CONFIG_PAX_REFCOUNT
12315 + "jno 0f\n"
12316 + LOCK_PREFIX "addq %2,%0\n"
12317 + "int $4\n0:\n"
12318 + _ASM_EXTABLE(0b, 0b)
12319 +#endif
12320 +
12321 + "sete %1\n"
12322 : "=m" (v->counter), "=qm" (c)
12323 : "er" (i), "m" (v->counter) : "memory");
12324 return c;
12325 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12326 */
12327 static inline void atomic64_inc(atomic64_t *v)
12328 {
12329 + asm volatile(LOCK_PREFIX "incq %0\n"
12330 +
12331 +#ifdef CONFIG_PAX_REFCOUNT
12332 + "jno 0f\n"
12333 + LOCK_PREFIX "decq %0\n"
12334 + "int $4\n0:\n"
12335 + _ASM_EXTABLE(0b, 0b)
12336 +#endif
12337 +
12338 + : "=m" (v->counter)
12339 + : "m" (v->counter));
12340 +}
12341 +
12342 +/**
12343 + * atomic64_inc_unchecked - increment atomic64 variable
12344 + * @v: pointer to type atomic64_unchecked_t
12345 + *
12346 + * Atomically increments @v by 1.
12347 + */
12348 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12349 +{
12350 asm volatile(LOCK_PREFIX "incq %0"
12351 : "=m" (v->counter)
12352 : "m" (v->counter));
12353 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12354 */
12355 static inline void atomic64_dec(atomic64_t *v)
12356 {
12357 - asm volatile(LOCK_PREFIX "decq %0"
12358 + asm volatile(LOCK_PREFIX "decq %0\n"
12359 +
12360 +#ifdef CONFIG_PAX_REFCOUNT
12361 + "jno 0f\n"
12362 + LOCK_PREFIX "incq %0\n"
12363 + "int $4\n0:\n"
12364 + _ASM_EXTABLE(0b, 0b)
12365 +#endif
12366 +
12367 + : "=m" (v->counter)
12368 + : "m" (v->counter));
12369 +}
12370 +
12371 +/**
12372 + * atomic64_dec_unchecked - decrement atomic64 variable
12373 + * @v: pointer to type atomic64_t
12374 + *
12375 + * Atomically decrements @v by 1.
12376 + */
12377 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12378 +{
12379 + asm volatile(LOCK_PREFIX "decq %0\n"
12380 : "=m" (v->counter)
12381 : "m" (v->counter));
12382 }
12383 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12384 {
12385 unsigned char c;
12386
12387 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
12388 + asm volatile(LOCK_PREFIX "decq %0\n"
12389 +
12390 +#ifdef CONFIG_PAX_REFCOUNT
12391 + "jno 0f\n"
12392 + LOCK_PREFIX "incq %0\n"
12393 + "int $4\n0:\n"
12394 + _ASM_EXTABLE(0b, 0b)
12395 +#endif
12396 +
12397 + "sete %1\n"
12398 : "=m" (v->counter), "=qm" (c)
12399 : "m" (v->counter) : "memory");
12400 return c != 0;
12401 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12402 {
12403 unsigned char c;
12404
12405 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
12406 + asm volatile(LOCK_PREFIX "incq %0\n"
12407 +
12408 +#ifdef CONFIG_PAX_REFCOUNT
12409 + "jno 0f\n"
12410 + LOCK_PREFIX "decq %0\n"
12411 + "int $4\n0:\n"
12412 + _ASM_EXTABLE(0b, 0b)
12413 +#endif
12414 +
12415 + "sete %1\n"
12416 : "=m" (v->counter), "=qm" (c)
12417 : "m" (v->counter) : "memory");
12418 return c != 0;
12419 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12420 {
12421 unsigned char c;
12422
12423 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12424 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
12425 +
12426 +#ifdef CONFIG_PAX_REFCOUNT
12427 + "jno 0f\n"
12428 + LOCK_PREFIX "subq %2,%0\n"
12429 + "int $4\n0:\n"
12430 + _ASM_EXTABLE(0b, 0b)
12431 +#endif
12432 +
12433 + "sets %1\n"
12434 : "=m" (v->counter), "=qm" (c)
12435 : "er" (i), "m" (v->counter) : "memory");
12436 return c;
12437 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12438 */
12439 static inline long atomic64_add_return(long i, atomic64_t *v)
12440 {
12441 + return i + xadd_check_overflow(&v->counter, i);
12442 +}
12443 +
12444 +/**
12445 + * atomic64_add_return_unchecked - add and return
12446 + * @i: integer value to add
12447 + * @v: pointer to type atomic64_unchecked_t
12448 + *
12449 + * Atomically adds @i to @v and returns @i + @v
12450 + */
12451 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12452 +{
12453 return i + xadd(&v->counter, i);
12454 }
12455
12456 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12457 }
12458
12459 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12460 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12461 +{
12462 + return atomic64_add_return_unchecked(1, v);
12463 +}
12464 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12465
12466 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12467 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12468 return cmpxchg(&v->counter, old, new);
12469 }
12470
12471 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12472 +{
12473 + return cmpxchg(&v->counter, old, new);
12474 +}
12475 +
12476 static inline long atomic64_xchg(atomic64_t *v, long new)
12477 {
12478 return xchg(&v->counter, new);
12479 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12480 */
12481 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12482 {
12483 - long c, old;
12484 + long c, old, new;
12485 c = atomic64_read(v);
12486 for (;;) {
12487 - if (unlikely(c == (u)))
12488 + if (unlikely(c == u))
12489 break;
12490 - old = atomic64_cmpxchg((v), c, c + (a));
12491 +
12492 + asm volatile("add %2,%0\n"
12493 +
12494 +#ifdef CONFIG_PAX_REFCOUNT
12495 + "jno 0f\n"
12496 + "sub %2,%0\n"
12497 + "int $4\n0:\n"
12498 + _ASM_EXTABLE(0b, 0b)
12499 +#endif
12500 +
12501 + : "=r" (new)
12502 + : "0" (c), "ir" (a));
12503 +
12504 + old = atomic64_cmpxchg(v, c, new);
12505 if (likely(old == c))
12506 break;
12507 c = old;
12508 }
12509 - return c != (u);
12510 + return c != u;
12511 }
12512
12513 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12514 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12515 index 6dfd019..0c6699f 100644
12516 --- a/arch/x86/include/asm/bitops.h
12517 +++ b/arch/x86/include/asm/bitops.h
12518 @@ -40,7 +40,7 @@
12519 * a mask operation on a byte.
12520 */
12521 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12522 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12523 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12524 #define CONST_MASK(nr) (1 << ((nr) & 7))
12525
12526 /**
12527 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12528 index 4fa687a..60f2d39 100644
12529 --- a/arch/x86/include/asm/boot.h
12530 +++ b/arch/x86/include/asm/boot.h
12531 @@ -6,10 +6,15 @@
12532 #include <uapi/asm/boot.h>
12533
12534 /* Physical address where kernel should be loaded. */
12535 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12536 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12537 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12538 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12539
12540 +#ifndef __ASSEMBLY__
12541 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
12542 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12543 +#endif
12544 +
12545 /* Minimum kernel alignment, as a power of two */
12546 #ifdef CONFIG_X86_64
12547 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12548 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12549 index 48f99f1..d78ebf9 100644
12550 --- a/arch/x86/include/asm/cache.h
12551 +++ b/arch/x86/include/asm/cache.h
12552 @@ -5,12 +5,13 @@
12553
12554 /* L1 cache line size */
12555 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12556 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12557 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12558
12559 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12560 +#define __read_only __attribute__((__section__(".data..read_only")))
12561
12562 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12563 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12564 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12565
12566 #ifdef CONFIG_X86_VSMP
12567 #ifdef CONFIG_SMP
12568 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12569 index 9863ee3..4a1f8e1 100644
12570 --- a/arch/x86/include/asm/cacheflush.h
12571 +++ b/arch/x86/include/asm/cacheflush.h
12572 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12573 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12574
12575 if (pg_flags == _PGMT_DEFAULT)
12576 - return -1;
12577 + return ~0UL;
12578 else if (pg_flags == _PGMT_WC)
12579 return _PAGE_CACHE_WC;
12580 else if (pg_flags == _PGMT_UC_MINUS)
12581 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12582 index 46fc474..b02b0f9 100644
12583 --- a/arch/x86/include/asm/checksum_32.h
12584 +++ b/arch/x86/include/asm/checksum_32.h
12585 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12586 int len, __wsum sum,
12587 int *src_err_ptr, int *dst_err_ptr);
12588
12589 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12590 + int len, __wsum sum,
12591 + int *src_err_ptr, int *dst_err_ptr);
12592 +
12593 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
12594 + int len, __wsum sum,
12595 + int *src_err_ptr, int *dst_err_ptr);
12596 +
12597 /*
12598 * Note: when you get a NULL pointer exception here this means someone
12599 * passed in an incorrect kernel address to one of these functions.
12600 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
12601 int *err_ptr)
12602 {
12603 might_sleep();
12604 - return csum_partial_copy_generic((__force void *)src, dst,
12605 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
12606 len, sum, err_ptr, NULL);
12607 }
12608
12609 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
12610 {
12611 might_sleep();
12612 if (access_ok(VERIFY_WRITE, dst, len))
12613 - return csum_partial_copy_generic(src, (__force void *)dst,
12614 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
12615 len, sum, NULL, err_ptr);
12616
12617 if (len)
12618 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
12619 index 8d871ea..c1a0dc9 100644
12620 --- a/arch/x86/include/asm/cmpxchg.h
12621 +++ b/arch/x86/include/asm/cmpxchg.h
12622 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
12623 __compiletime_error("Bad argument size for cmpxchg");
12624 extern void __xadd_wrong_size(void)
12625 __compiletime_error("Bad argument size for xadd");
12626 +extern void __xadd_check_overflow_wrong_size(void)
12627 + __compiletime_error("Bad argument size for xadd_check_overflow");
12628 extern void __add_wrong_size(void)
12629 __compiletime_error("Bad argument size for add");
12630 +extern void __add_check_overflow_wrong_size(void)
12631 + __compiletime_error("Bad argument size for add_check_overflow");
12632
12633 /*
12634 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
12635 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
12636 __ret; \
12637 })
12638
12639 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
12640 + ({ \
12641 + __typeof__ (*(ptr)) __ret = (arg); \
12642 + switch (sizeof(*(ptr))) { \
12643 + case __X86_CASE_L: \
12644 + asm volatile (lock #op "l %0, %1\n" \
12645 + "jno 0f\n" \
12646 + "mov %0,%1\n" \
12647 + "int $4\n0:\n" \
12648 + _ASM_EXTABLE(0b, 0b) \
12649 + : "+r" (__ret), "+m" (*(ptr)) \
12650 + : : "memory", "cc"); \
12651 + break; \
12652 + case __X86_CASE_Q: \
12653 + asm volatile (lock #op "q %q0, %1\n" \
12654 + "jno 0f\n" \
12655 + "mov %0,%1\n" \
12656 + "int $4\n0:\n" \
12657 + _ASM_EXTABLE(0b, 0b) \
12658 + : "+r" (__ret), "+m" (*(ptr)) \
12659 + : : "memory", "cc"); \
12660 + break; \
12661 + default: \
12662 + __ ## op ## _check_overflow_wrong_size(); \
12663 + } \
12664 + __ret; \
12665 + })
12666 +
12667 /*
12668 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
12669 * Since this is generally used to protect other memory information, we
12670 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
12671 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
12672 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
12673
12674 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
12675 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
12676 +
12677 #define __add(ptr, inc, lock) \
12678 ({ \
12679 __typeof__ (*(ptr)) __ret = (inc); \
12680 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
12681 index 2d9075e..b75a844 100644
12682 --- a/arch/x86/include/asm/cpufeature.h
12683 +++ b/arch/x86/include/asm/cpufeature.h
12684 @@ -206,7 +206,7 @@
12685 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
12686 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
12687 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
12688 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
12689 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
12690 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
12691 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
12692 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
12693 @@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
12694 ".section .discard,\"aw\",@progbits\n"
12695 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
12696 ".previous\n"
12697 - ".section .altinstr_replacement,\"ax\"\n"
12698 + ".section .altinstr_replacement,\"a\"\n"
12699 "3: movb $1,%0\n"
12700 "4:\n"
12701 ".previous\n"
12702 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
12703 index 8bf1c06..f723dfd 100644
12704 --- a/arch/x86/include/asm/desc.h
12705 +++ b/arch/x86/include/asm/desc.h
12706 @@ -4,6 +4,7 @@
12707 #include <asm/desc_defs.h>
12708 #include <asm/ldt.h>
12709 #include <asm/mmu.h>
12710 +#include <asm/pgtable.h>
12711
12712 #include <linux/smp.h>
12713 #include <linux/percpu.h>
12714 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12715
12716 desc->type = (info->read_exec_only ^ 1) << 1;
12717 desc->type |= info->contents << 2;
12718 + desc->type |= info->seg_not_present ^ 1;
12719
12720 desc->s = 1;
12721 desc->dpl = 0x3;
12722 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12723 }
12724
12725 extern struct desc_ptr idt_descr;
12726 -extern gate_desc idt_table[];
12727 extern struct desc_ptr nmi_idt_descr;
12728 -extern gate_desc nmi_idt_table[];
12729 -
12730 -struct gdt_page {
12731 - struct desc_struct gdt[GDT_ENTRIES];
12732 -} __attribute__((aligned(PAGE_SIZE)));
12733 -
12734 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
12735 +extern gate_desc idt_table[256];
12736 +extern gate_desc nmi_idt_table[256];
12737
12738 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
12739 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
12740 {
12741 - return per_cpu(gdt_page, cpu).gdt;
12742 + return cpu_gdt_table[cpu];
12743 }
12744
12745 #ifdef CONFIG_X86_64
12746 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
12747 unsigned long base, unsigned dpl, unsigned flags,
12748 unsigned short seg)
12749 {
12750 - gate->a = (seg << 16) | (base & 0xffff);
12751 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
12752 + gate->gate.offset_low = base;
12753 + gate->gate.seg = seg;
12754 + gate->gate.reserved = 0;
12755 + gate->gate.type = type;
12756 + gate->gate.s = 0;
12757 + gate->gate.dpl = dpl;
12758 + gate->gate.p = 1;
12759 + gate->gate.offset_high = base >> 16;
12760 }
12761
12762 #endif
12763 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
12764
12765 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
12766 {
12767 + pax_open_kernel();
12768 memcpy(&idt[entry], gate, sizeof(*gate));
12769 + pax_close_kernel();
12770 }
12771
12772 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
12773 {
12774 + pax_open_kernel();
12775 memcpy(&ldt[entry], desc, 8);
12776 + pax_close_kernel();
12777 }
12778
12779 static inline void
12780 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
12781 default: size = sizeof(*gdt); break;
12782 }
12783
12784 + pax_open_kernel();
12785 memcpy(&gdt[entry], desc, size);
12786 + pax_close_kernel();
12787 }
12788
12789 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
12790 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
12791
12792 static inline void native_load_tr_desc(void)
12793 {
12794 + pax_open_kernel();
12795 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
12796 + pax_close_kernel();
12797 }
12798
12799 static inline void native_load_gdt(const struct desc_ptr *dtr)
12800 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
12801 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
12802 unsigned int i;
12803
12804 + pax_open_kernel();
12805 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
12806 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
12807 + pax_close_kernel();
12808 }
12809
12810 #define _LDT_empty(info) \
12811 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
12812 }
12813
12814 #ifdef CONFIG_X86_64
12815 -static inline void set_nmi_gate(int gate, void *addr)
12816 +static inline void set_nmi_gate(int gate, const void *addr)
12817 {
12818 gate_desc s;
12819
12820 @@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
12821 }
12822 #endif
12823
12824 -static inline void _set_gate(int gate, unsigned type, void *addr,
12825 +static inline void _set_gate(int gate, unsigned type, const void *addr,
12826 unsigned dpl, unsigned ist, unsigned seg)
12827 {
12828 gate_desc s;
12829 @@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
12830 * Pentium F0 0F bugfix can have resulted in the mapped
12831 * IDT being write-protected.
12832 */
12833 -static inline void set_intr_gate(unsigned int n, void *addr)
12834 +static inline void set_intr_gate(unsigned int n, const void *addr)
12835 {
12836 BUG_ON((unsigned)n > 0xFF);
12837 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
12838 @@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
12839 /*
12840 * This routine sets up an interrupt gate at directory privilege level 3.
12841 */
12842 -static inline void set_system_intr_gate(unsigned int n, void *addr)
12843 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
12844 {
12845 BUG_ON((unsigned)n > 0xFF);
12846 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
12847 }
12848
12849 -static inline void set_system_trap_gate(unsigned int n, void *addr)
12850 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
12851 {
12852 BUG_ON((unsigned)n > 0xFF);
12853 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
12854 }
12855
12856 -static inline void set_trap_gate(unsigned int n, void *addr)
12857 +static inline void set_trap_gate(unsigned int n, const void *addr)
12858 {
12859 BUG_ON((unsigned)n > 0xFF);
12860 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
12861 @@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
12862 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
12863 {
12864 BUG_ON((unsigned)n > 0xFF);
12865 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
12866 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
12867 }
12868
12869 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
12870 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
12871 {
12872 BUG_ON((unsigned)n > 0xFF);
12873 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
12874 }
12875
12876 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
12877 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
12878 {
12879 BUG_ON((unsigned)n > 0xFF);
12880 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
12881 }
12882
12883 +#ifdef CONFIG_X86_32
12884 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
12885 +{
12886 + struct desc_struct d;
12887 +
12888 + if (likely(limit))
12889 + limit = (limit - 1UL) >> PAGE_SHIFT;
12890 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
12891 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
12892 +}
12893 +#endif
12894 +
12895 #endif /* _ASM_X86_DESC_H */
12896 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
12897 index 278441f..b95a174 100644
12898 --- a/arch/x86/include/asm/desc_defs.h
12899 +++ b/arch/x86/include/asm/desc_defs.h
12900 @@ -31,6 +31,12 @@ struct desc_struct {
12901 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
12902 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
12903 };
12904 + struct {
12905 + u16 offset_low;
12906 + u16 seg;
12907 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
12908 + unsigned offset_high: 16;
12909 + } gate;
12910 };
12911 } __attribute__((packed));
12912
12913 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
12914 index 9c999c1..3860cb8 100644
12915 --- a/arch/x86/include/asm/elf.h
12916 +++ b/arch/x86/include/asm/elf.h
12917 @@ -243,7 +243,25 @@ extern int force_personality32;
12918 the loader. We need to make sure that it is out of the way of the program
12919 that it will "exec", and that there is sufficient room for the brk. */
12920
12921 +#ifdef CONFIG_PAX_SEGMEXEC
12922 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
12923 +#else
12924 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
12925 +#endif
12926 +
12927 +#ifdef CONFIG_PAX_ASLR
12928 +#ifdef CONFIG_X86_32
12929 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
12930 +
12931 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
12932 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
12933 +#else
12934 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
12935 +
12936 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
12937 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
12938 +#endif
12939 +#endif
12940
12941 /* This yields a mask that user programs can use to figure out what
12942 instruction set this CPU supports. This could be done in user space,
12943 @@ -296,16 +314,12 @@ do { \
12944
12945 #define ARCH_DLINFO \
12946 do { \
12947 - if (vdso_enabled) \
12948 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
12949 - (unsigned long)current->mm->context.vdso); \
12950 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
12951 } while (0)
12952
12953 #define ARCH_DLINFO_X32 \
12954 do { \
12955 - if (vdso_enabled) \
12956 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
12957 - (unsigned long)current->mm->context.vdso); \
12958 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
12959 } while (0)
12960
12961 #define AT_SYSINFO 32
12962 @@ -320,7 +334,7 @@ else \
12963
12964 #endif /* !CONFIG_X86_32 */
12965
12966 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
12967 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
12968
12969 #define VDSO_ENTRY \
12970 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
12971 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
12972 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
12973 #define compat_arch_setup_additional_pages syscall32_setup_pages
12974
12975 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
12976 -#define arch_randomize_brk arch_randomize_brk
12977 -
12978 /*
12979 * True on X86_32 or when emulating IA32 on X86_64
12980 */
12981 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
12982 index 75ce3f4..882e801 100644
12983 --- a/arch/x86/include/asm/emergency-restart.h
12984 +++ b/arch/x86/include/asm/emergency-restart.h
12985 @@ -13,6 +13,6 @@ enum reboot_type {
12986
12987 extern enum reboot_type reboot_type;
12988
12989 -extern void machine_emergency_restart(void);
12990 +extern void machine_emergency_restart(void) __noreturn;
12991
12992 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
12993 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
12994 index 41ab26e..a88c9e6 100644
12995 --- a/arch/x86/include/asm/fpu-internal.h
12996 +++ b/arch/x86/include/asm/fpu-internal.h
12997 @@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
12998 ({ \
12999 int err; \
13000 asm volatile(ASM_STAC "\n" \
13001 - "1:" #insn "\n\t" \
13002 + "1:" \
13003 + __copyuser_seg \
13004 + #insn "\n\t" \
13005 "2: " ASM_CLAC "\n" \
13006 ".section .fixup,\"ax\"\n" \
13007 "3: movl $-1,%[err]\n" \
13008 @@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13009 "emms\n\t" /* clear stack tags */
13010 "fildl %P[addr]", /* set F?P to defined value */
13011 X86_FEATURE_FXSAVE_LEAK,
13012 - [addr] "m" (tsk->thread.fpu.has_fpu));
13013 + [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13014
13015 return fpu_restore_checking(&tsk->thread.fpu);
13016 }
13017 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13018 index be27ba1..8f13ff9 100644
13019 --- a/arch/x86/include/asm/futex.h
13020 +++ b/arch/x86/include/asm/futex.h
13021 @@ -12,6 +12,7 @@
13022 #include <asm/smap.h>
13023
13024 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13025 + typecheck(u32 __user *, uaddr); \
13026 asm volatile("\t" ASM_STAC "\n" \
13027 "1:\t" insn "\n" \
13028 "2:\t" ASM_CLAC "\n" \
13029 @@ -20,15 +21,16 @@
13030 "\tjmp\t2b\n" \
13031 "\t.previous\n" \
13032 _ASM_EXTABLE(1b, 3b) \
13033 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13034 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13035 : "i" (-EFAULT), "0" (oparg), "1" (0))
13036
13037 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13038 + typecheck(u32 __user *, uaddr); \
13039 asm volatile("\t" ASM_STAC "\n" \
13040 "1:\tmovl %2, %0\n" \
13041 "\tmovl\t%0, %3\n" \
13042 "\t" insn "\n" \
13043 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13044 + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13045 "\tjnz\t1b\n" \
13046 "3:\t" ASM_CLAC "\n" \
13047 "\t.section .fixup,\"ax\"\n" \
13048 @@ -38,7 +40,7 @@
13049 _ASM_EXTABLE(1b, 4b) \
13050 _ASM_EXTABLE(2b, 4b) \
13051 : "=&a" (oldval), "=&r" (ret), \
13052 - "+m" (*uaddr), "=&r" (tem) \
13053 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13054 : "r" (oparg), "i" (-EFAULT), "1" (0))
13055
13056 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13057 @@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13058
13059 switch (op) {
13060 case FUTEX_OP_SET:
13061 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13062 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13063 break;
13064 case FUTEX_OP_ADD:
13065 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13066 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13067 uaddr, oparg);
13068 break;
13069 case FUTEX_OP_OR:
13070 @@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13071 return -EFAULT;
13072
13073 asm volatile("\t" ASM_STAC "\n"
13074 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13075 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13076 "2:\t" ASM_CLAC "\n"
13077 "\t.section .fixup, \"ax\"\n"
13078 "3:\tmov %3, %0\n"
13079 "\tjmp 2b\n"
13080 "\t.previous\n"
13081 _ASM_EXTABLE(1b, 3b)
13082 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13083 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13084 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13085 : "memory"
13086 );
13087 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13088 index eb92a6e..b98b2f4 100644
13089 --- a/arch/x86/include/asm/hw_irq.h
13090 +++ b/arch/x86/include/asm/hw_irq.h
13091 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13092 extern void enable_IO_APIC(void);
13093
13094 /* Statistics */
13095 -extern atomic_t irq_err_count;
13096 -extern atomic_t irq_mis_count;
13097 +extern atomic_unchecked_t irq_err_count;
13098 +extern atomic_unchecked_t irq_mis_count;
13099
13100 /* EISA */
13101 extern void eisa_set_level_irq(unsigned int irq);
13102 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13103 index d8e8eef..15b1179 100644
13104 --- a/arch/x86/include/asm/io.h
13105 +++ b/arch/x86/include/asm/io.h
13106 @@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13107 return ioremap_nocache(offset, size);
13108 }
13109
13110 -extern void iounmap(volatile void __iomem *addr);
13111 +extern void iounmap(const volatile void __iomem *addr);
13112
13113 extern void set_iounmap_nonlazy(void);
13114
13115 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13116
13117 #include <linux/vmalloc.h>
13118
13119 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13120 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13121 +{
13122 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13123 +}
13124 +
13125 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13126 +{
13127 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13128 +}
13129 +
13130 /*
13131 * Convert a virtual cached pointer to an uncached pointer
13132 */
13133 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13134 index bba3cf8..06bc8da 100644
13135 --- a/arch/x86/include/asm/irqflags.h
13136 +++ b/arch/x86/include/asm/irqflags.h
13137 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13138 sti; \
13139 sysexit
13140
13141 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
13142 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13143 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
13144 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13145 +
13146 #else
13147 #define INTERRUPT_RETURN iret
13148 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13149 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13150 index d3ddd17..c9fb0cc 100644
13151 --- a/arch/x86/include/asm/kprobes.h
13152 +++ b/arch/x86/include/asm/kprobes.h
13153 @@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13154 #define RELATIVEJUMP_SIZE 5
13155 #define RELATIVECALL_OPCODE 0xe8
13156 #define RELATIVE_ADDR_SIZE 4
13157 -#define MAX_STACK_SIZE 64
13158 -#define MIN_STACK_SIZE(ADDR) \
13159 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13160 - THREAD_SIZE - (unsigned long)(ADDR))) \
13161 - ? (MAX_STACK_SIZE) \
13162 - : (((unsigned long)current_thread_info()) + \
13163 - THREAD_SIZE - (unsigned long)(ADDR)))
13164 +#define MAX_STACK_SIZE 64UL
13165 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13166
13167 #define flush_insn_slot(p) do { } while (0)
13168
13169 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13170 index 2d89e39..baee879 100644
13171 --- a/arch/x86/include/asm/local.h
13172 +++ b/arch/x86/include/asm/local.h
13173 @@ -10,33 +10,97 @@ typedef struct {
13174 atomic_long_t a;
13175 } local_t;
13176
13177 +typedef struct {
13178 + atomic_long_unchecked_t a;
13179 +} local_unchecked_t;
13180 +
13181 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13182
13183 #define local_read(l) atomic_long_read(&(l)->a)
13184 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13185 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13186 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13187
13188 static inline void local_inc(local_t *l)
13189 {
13190 - asm volatile(_ASM_INC "%0"
13191 + asm volatile(_ASM_INC "%0\n"
13192 +
13193 +#ifdef CONFIG_PAX_REFCOUNT
13194 + "jno 0f\n"
13195 + _ASM_DEC "%0\n"
13196 + "int $4\n0:\n"
13197 + _ASM_EXTABLE(0b, 0b)
13198 +#endif
13199 +
13200 + : "+m" (l->a.counter));
13201 +}
13202 +
13203 +static inline void local_inc_unchecked(local_unchecked_t *l)
13204 +{
13205 + asm volatile(_ASM_INC "%0\n"
13206 : "+m" (l->a.counter));
13207 }
13208
13209 static inline void local_dec(local_t *l)
13210 {
13211 - asm volatile(_ASM_DEC "%0"
13212 + asm volatile(_ASM_DEC "%0\n"
13213 +
13214 +#ifdef CONFIG_PAX_REFCOUNT
13215 + "jno 0f\n"
13216 + _ASM_INC "%0\n"
13217 + "int $4\n0:\n"
13218 + _ASM_EXTABLE(0b, 0b)
13219 +#endif
13220 +
13221 + : "+m" (l->a.counter));
13222 +}
13223 +
13224 +static inline void local_dec_unchecked(local_unchecked_t *l)
13225 +{
13226 + asm volatile(_ASM_DEC "%0\n"
13227 : "+m" (l->a.counter));
13228 }
13229
13230 static inline void local_add(long i, local_t *l)
13231 {
13232 - asm volatile(_ASM_ADD "%1,%0"
13233 + asm volatile(_ASM_ADD "%1,%0\n"
13234 +
13235 +#ifdef CONFIG_PAX_REFCOUNT
13236 + "jno 0f\n"
13237 + _ASM_SUB "%1,%0\n"
13238 + "int $4\n0:\n"
13239 + _ASM_EXTABLE(0b, 0b)
13240 +#endif
13241 +
13242 + : "+m" (l->a.counter)
13243 + : "ir" (i));
13244 +}
13245 +
13246 +static inline void local_add_unchecked(long i, local_unchecked_t *l)
13247 +{
13248 + asm volatile(_ASM_ADD "%1,%0\n"
13249 : "+m" (l->a.counter)
13250 : "ir" (i));
13251 }
13252
13253 static inline void local_sub(long i, local_t *l)
13254 {
13255 - asm volatile(_ASM_SUB "%1,%0"
13256 + asm volatile(_ASM_SUB "%1,%0\n"
13257 +
13258 +#ifdef CONFIG_PAX_REFCOUNT
13259 + "jno 0f\n"
13260 + _ASM_ADD "%1,%0\n"
13261 + "int $4\n0:\n"
13262 + _ASM_EXTABLE(0b, 0b)
13263 +#endif
13264 +
13265 + : "+m" (l->a.counter)
13266 + : "ir" (i));
13267 +}
13268 +
13269 +static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13270 +{
13271 + asm volatile(_ASM_SUB "%1,%0\n"
13272 : "+m" (l->a.counter)
13273 : "ir" (i));
13274 }
13275 @@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13276 {
13277 unsigned char c;
13278
13279 - asm volatile(_ASM_SUB "%2,%0; sete %1"
13280 + asm volatile(_ASM_SUB "%2,%0\n"
13281 +
13282 +#ifdef CONFIG_PAX_REFCOUNT
13283 + "jno 0f\n"
13284 + _ASM_ADD "%2,%0\n"
13285 + "int $4\n0:\n"
13286 + _ASM_EXTABLE(0b, 0b)
13287 +#endif
13288 +
13289 + "sete %1\n"
13290 : "+m" (l->a.counter), "=qm" (c)
13291 : "ir" (i) : "memory");
13292 return c;
13293 @@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13294 {
13295 unsigned char c;
13296
13297 - asm volatile(_ASM_DEC "%0; sete %1"
13298 + asm volatile(_ASM_DEC "%0\n"
13299 +
13300 +#ifdef CONFIG_PAX_REFCOUNT
13301 + "jno 0f\n"
13302 + _ASM_INC "%0\n"
13303 + "int $4\n0:\n"
13304 + _ASM_EXTABLE(0b, 0b)
13305 +#endif
13306 +
13307 + "sete %1\n"
13308 : "+m" (l->a.counter), "=qm" (c)
13309 : : "memory");
13310 return c != 0;
13311 @@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13312 {
13313 unsigned char c;
13314
13315 - asm volatile(_ASM_INC "%0; sete %1"
13316 + asm volatile(_ASM_INC "%0\n"
13317 +
13318 +#ifdef CONFIG_PAX_REFCOUNT
13319 + "jno 0f\n"
13320 + _ASM_DEC "%0\n"
13321 + "int $4\n0:\n"
13322 + _ASM_EXTABLE(0b, 0b)
13323 +#endif
13324 +
13325 + "sete %1\n"
13326 : "+m" (l->a.counter), "=qm" (c)
13327 : : "memory");
13328 return c != 0;
13329 @@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13330 {
13331 unsigned char c;
13332
13333 - asm volatile(_ASM_ADD "%2,%0; sets %1"
13334 + asm volatile(_ASM_ADD "%2,%0\n"
13335 +
13336 +#ifdef CONFIG_PAX_REFCOUNT
13337 + "jno 0f\n"
13338 + _ASM_SUB "%2,%0\n"
13339 + "int $4\n0:\n"
13340 + _ASM_EXTABLE(0b, 0b)
13341 +#endif
13342 +
13343 + "sets %1\n"
13344 : "+m" (l->a.counter), "=qm" (c)
13345 : "ir" (i) : "memory");
13346 return c;
13347 @@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13348 static inline long local_add_return(long i, local_t *l)
13349 {
13350 long __i = i;
13351 + asm volatile(_ASM_XADD "%0, %1\n"
13352 +
13353 +#ifdef CONFIG_PAX_REFCOUNT
13354 + "jno 0f\n"
13355 + _ASM_MOV "%0,%1\n"
13356 + "int $4\n0:\n"
13357 + _ASM_EXTABLE(0b, 0b)
13358 +#endif
13359 +
13360 + : "+r" (i), "+m" (l->a.counter)
13361 + : : "memory");
13362 + return i + __i;
13363 +}
13364 +
13365 +/**
13366 + * local_add_return_unchecked - add and return
13367 + * @i: integer value to add
13368 + * @l: pointer to type local_unchecked_t
13369 + *
13370 + * Atomically adds @i to @l and returns @i + @l
13371 + */
13372 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13373 +{
13374 + long __i = i;
13375 asm volatile(_ASM_XADD "%0, %1;"
13376 : "+r" (i), "+m" (l->a.counter)
13377 : : "memory");
13378 @@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13379
13380 #define local_cmpxchg(l, o, n) \
13381 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13382 +#define local_cmpxchg_unchecked(l, o, n) \
13383 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
13384 /* Always has a lock prefix */
13385 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13386
13387 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13388 new file mode 100644
13389 index 0000000..2bfd3ba
13390 --- /dev/null
13391 +++ b/arch/x86/include/asm/mman.h
13392 @@ -0,0 +1,15 @@
13393 +#ifndef _X86_MMAN_H
13394 +#define _X86_MMAN_H
13395 +
13396 +#include <uapi/asm/mman.h>
13397 +
13398 +#ifdef __KERNEL__
13399 +#ifndef __ASSEMBLY__
13400 +#ifdef CONFIG_X86_32
13401 +#define arch_mmap_check i386_mmap_check
13402 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13403 +#endif
13404 +#endif
13405 +#endif
13406 +
13407 +#endif /* X86_MMAN_H */
13408 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13409 index 5f55e69..e20bfb1 100644
13410 --- a/arch/x86/include/asm/mmu.h
13411 +++ b/arch/x86/include/asm/mmu.h
13412 @@ -9,7 +9,7 @@
13413 * we put the segment information here.
13414 */
13415 typedef struct {
13416 - void *ldt;
13417 + struct desc_struct *ldt;
13418 int size;
13419
13420 #ifdef CONFIG_X86_64
13421 @@ -18,7 +18,19 @@ typedef struct {
13422 #endif
13423
13424 struct mutex lock;
13425 - void *vdso;
13426 + unsigned long vdso;
13427 +
13428 +#ifdef CONFIG_X86_32
13429 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13430 + unsigned long user_cs_base;
13431 + unsigned long user_cs_limit;
13432 +
13433 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13434 + cpumask_t cpu_user_cs_mask;
13435 +#endif
13436 +
13437 +#endif
13438 +#endif
13439 } mm_context_t;
13440
13441 #ifdef CONFIG_SMP
13442 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13443 index cdbf367..adb37ac 100644
13444 --- a/arch/x86/include/asm/mmu_context.h
13445 +++ b/arch/x86/include/asm/mmu_context.h
13446 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13447
13448 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13449 {
13450 +
13451 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13452 + unsigned int i;
13453 + pgd_t *pgd;
13454 +
13455 + pax_open_kernel();
13456 + pgd = get_cpu_pgd(smp_processor_id());
13457 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13458 + set_pgd_batched(pgd+i, native_make_pgd(0));
13459 + pax_close_kernel();
13460 +#endif
13461 +
13462 #ifdef CONFIG_SMP
13463 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13464 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13465 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13466 struct task_struct *tsk)
13467 {
13468 unsigned cpu = smp_processor_id();
13469 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13470 + int tlbstate = TLBSTATE_OK;
13471 +#endif
13472
13473 if (likely(prev != next)) {
13474 #ifdef CONFIG_SMP
13475 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13476 + tlbstate = this_cpu_read(cpu_tlbstate.state);
13477 +#endif
13478 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13479 this_cpu_write(cpu_tlbstate.active_mm, next);
13480 #endif
13481 cpumask_set_cpu(cpu, mm_cpumask(next));
13482
13483 /* Re-load page tables */
13484 +#ifdef CONFIG_PAX_PER_CPU_PGD
13485 + pax_open_kernel();
13486 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13487 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13488 + pax_close_kernel();
13489 + load_cr3(get_cpu_pgd(cpu));
13490 +#else
13491 load_cr3(next->pgd);
13492 +#endif
13493
13494 /* stop flush ipis for the previous mm */
13495 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13496 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13497 */
13498 if (unlikely(prev->context.ldt != next->context.ldt))
13499 load_LDT_nolock(&next->context);
13500 - }
13501 +
13502 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13503 + if (!(__supported_pte_mask & _PAGE_NX)) {
13504 + smp_mb__before_clear_bit();
13505 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13506 + smp_mb__after_clear_bit();
13507 + cpu_set(cpu, next->context.cpu_user_cs_mask);
13508 + }
13509 +#endif
13510 +
13511 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13512 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13513 + prev->context.user_cs_limit != next->context.user_cs_limit))
13514 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13515 #ifdef CONFIG_SMP
13516 + else if (unlikely(tlbstate != TLBSTATE_OK))
13517 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13518 +#endif
13519 +#endif
13520 +
13521 + }
13522 else {
13523 +
13524 +#ifdef CONFIG_PAX_PER_CPU_PGD
13525 + pax_open_kernel();
13526 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13527 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13528 + pax_close_kernel();
13529 + load_cr3(get_cpu_pgd(cpu));
13530 +#endif
13531 +
13532 +#ifdef CONFIG_SMP
13533 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13534 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
13535
13536 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13537 * tlb flush IPI delivery. We must reload CR3
13538 * to make sure to use no freed page tables.
13539 */
13540 +
13541 +#ifndef CONFIG_PAX_PER_CPU_PGD
13542 load_cr3(next->pgd);
13543 +#endif
13544 +
13545 load_LDT_nolock(&next->context);
13546 +
13547 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
13548 + if (!(__supported_pte_mask & _PAGE_NX))
13549 + cpu_set(cpu, next->context.cpu_user_cs_mask);
13550 +#endif
13551 +
13552 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13553 +#ifdef CONFIG_PAX_PAGEEXEC
13554 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
13555 +#endif
13556 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13557 +#endif
13558 +
13559 }
13560 +#endif
13561 }
13562 -#endif
13563 }
13564
13565 #define activate_mm(prev, next) \
13566 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
13567 index e3b7819..b257c64 100644
13568 --- a/arch/x86/include/asm/module.h
13569 +++ b/arch/x86/include/asm/module.h
13570 @@ -5,6 +5,7 @@
13571
13572 #ifdef CONFIG_X86_64
13573 /* X86_64 does not define MODULE_PROC_FAMILY */
13574 +#define MODULE_PROC_FAMILY ""
13575 #elif defined CONFIG_M486
13576 #define MODULE_PROC_FAMILY "486 "
13577 #elif defined CONFIG_M586
13578 @@ -57,8 +58,20 @@
13579 #error unknown processor family
13580 #endif
13581
13582 -#ifdef CONFIG_X86_32
13583 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
13584 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13585 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
13586 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
13587 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
13588 +#else
13589 +#define MODULE_PAX_KERNEXEC ""
13590 #endif
13591
13592 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13593 +#define MODULE_PAX_UDEREF "UDEREF "
13594 +#else
13595 +#define MODULE_PAX_UDEREF ""
13596 +#endif
13597 +
13598 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
13599 +
13600 #endif /* _ASM_X86_MODULE_H */
13601 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
13602 index 320f7bb..e89f8f8 100644
13603 --- a/arch/x86/include/asm/page_64_types.h
13604 +++ b/arch/x86/include/asm/page_64_types.h
13605 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
13606
13607 /* duplicated to the one in bootmem.h */
13608 extern unsigned long max_pfn;
13609 -extern unsigned long phys_base;
13610 +extern const unsigned long phys_base;
13611
13612 extern unsigned long __phys_addr(unsigned long);
13613 #define __phys_reloc_hide(x) (x)
13614 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
13615 index 5edd174..9cf5821 100644
13616 --- a/arch/x86/include/asm/paravirt.h
13617 +++ b/arch/x86/include/asm/paravirt.h
13618 @@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
13619 val);
13620 }
13621
13622 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
13623 +{
13624 + pgdval_t val = native_pgd_val(pgd);
13625 +
13626 + if (sizeof(pgdval_t) > sizeof(long))
13627 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
13628 + val, (u64)val >> 32);
13629 + else
13630 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
13631 + val);
13632 +}
13633 +
13634 static inline void pgd_clear(pgd_t *pgdp)
13635 {
13636 set_pgd(pgdp, __pgd(0));
13637 @@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
13638 pv_mmu_ops.set_fixmap(idx, phys, flags);
13639 }
13640
13641 +#ifdef CONFIG_PAX_KERNEXEC
13642 +static inline unsigned long pax_open_kernel(void)
13643 +{
13644 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
13645 +}
13646 +
13647 +static inline unsigned long pax_close_kernel(void)
13648 +{
13649 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
13650 +}
13651 +#else
13652 +static inline unsigned long pax_open_kernel(void) { return 0; }
13653 +static inline unsigned long pax_close_kernel(void) { return 0; }
13654 +#endif
13655 +
13656 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
13657
13658 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
13659 @@ -927,7 +954,7 @@ extern void default_banner(void);
13660
13661 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
13662 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
13663 -#define PARA_INDIRECT(addr) *%cs:addr
13664 +#define PARA_INDIRECT(addr) *%ss:addr
13665 #endif
13666
13667 #define INTERRUPT_RETURN \
13668 @@ -1002,6 +1029,21 @@ extern void default_banner(void);
13669 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
13670 CLBR_NONE, \
13671 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
13672 +
13673 +#define GET_CR0_INTO_RDI \
13674 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
13675 + mov %rax,%rdi
13676 +
13677 +#define SET_RDI_INTO_CR0 \
13678 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13679 +
13680 +#define GET_CR3_INTO_RDI \
13681 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
13682 + mov %rax,%rdi
13683 +
13684 +#define SET_RDI_INTO_CR3 \
13685 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
13686 +
13687 #endif /* CONFIG_X86_32 */
13688
13689 #endif /* __ASSEMBLY__ */
13690 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
13691 index 142236e..5446ffbc 100644
13692 --- a/arch/x86/include/asm/paravirt_types.h
13693 +++ b/arch/x86/include/asm/paravirt_types.h
13694 @@ -84,7 +84,7 @@ struct pv_init_ops {
13695 */
13696 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
13697 unsigned long addr, unsigned len);
13698 -};
13699 +} __no_const;
13700
13701
13702 struct pv_lazy_ops {
13703 @@ -97,7 +97,7 @@ struct pv_time_ops {
13704 unsigned long long (*sched_clock)(void);
13705 unsigned long long (*steal_clock)(int cpu);
13706 unsigned long (*get_tsc_khz)(void);
13707 -};
13708 +} __no_const;
13709
13710 struct pv_cpu_ops {
13711 /* hooks for various privileged instructions */
13712 @@ -191,7 +191,7 @@ struct pv_cpu_ops {
13713
13714 void (*start_context_switch)(struct task_struct *prev);
13715 void (*end_context_switch)(struct task_struct *next);
13716 -};
13717 +} __no_const;
13718
13719 struct pv_irq_ops {
13720 /*
13721 @@ -222,7 +222,7 @@ struct pv_apic_ops {
13722 unsigned long start_eip,
13723 unsigned long start_esp);
13724 #endif
13725 -};
13726 +} __no_const;
13727
13728 struct pv_mmu_ops {
13729 unsigned long (*read_cr2)(void);
13730 @@ -312,6 +312,7 @@ struct pv_mmu_ops {
13731 struct paravirt_callee_save make_pud;
13732
13733 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
13734 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
13735 #endif /* PAGETABLE_LEVELS == 4 */
13736 #endif /* PAGETABLE_LEVELS >= 3 */
13737
13738 @@ -323,6 +324,12 @@ struct pv_mmu_ops {
13739 an mfn. We can tell which is which from the index. */
13740 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
13741 phys_addr_t phys, pgprot_t flags);
13742 +
13743 +#ifdef CONFIG_PAX_KERNEXEC
13744 + unsigned long (*pax_open_kernel)(void);
13745 + unsigned long (*pax_close_kernel)(void);
13746 +#endif
13747 +
13748 };
13749
13750 struct arch_spinlock;
13751 @@ -333,7 +340,7 @@ struct pv_lock_ops {
13752 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
13753 int (*spin_trylock)(struct arch_spinlock *lock);
13754 void (*spin_unlock)(struct arch_spinlock *lock);
13755 -};
13756 +} __no_const;
13757
13758 /* This contains all the paravirt structures: we get a convenient
13759 * number for each function using the offset which we use to indicate
13760 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
13761 index b4389a4..7024269 100644
13762 --- a/arch/x86/include/asm/pgalloc.h
13763 +++ b/arch/x86/include/asm/pgalloc.h
13764 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
13765 pmd_t *pmd, pte_t *pte)
13766 {
13767 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
13768 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
13769 +}
13770 +
13771 +static inline void pmd_populate_user(struct mm_struct *mm,
13772 + pmd_t *pmd, pte_t *pte)
13773 +{
13774 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
13775 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
13776 }
13777
13778 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
13779
13780 #ifdef CONFIG_X86_PAE
13781 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
13782 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
13783 +{
13784 + pud_populate(mm, pudp, pmd);
13785 +}
13786 #else /* !CONFIG_X86_PAE */
13787 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
13788 {
13789 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
13790 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
13791 }
13792 +
13793 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
13794 +{
13795 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
13796 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
13797 +}
13798 #endif /* CONFIG_X86_PAE */
13799
13800 #if PAGETABLE_LEVELS > 3
13801 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
13802 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
13803 }
13804
13805 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
13806 +{
13807 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
13808 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
13809 +}
13810 +
13811 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
13812 {
13813 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
13814 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
13815 index f2b489c..4f7e2e5 100644
13816 --- a/arch/x86/include/asm/pgtable-2level.h
13817 +++ b/arch/x86/include/asm/pgtable-2level.h
13818 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
13819
13820 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
13821 {
13822 + pax_open_kernel();
13823 *pmdp = pmd;
13824 + pax_close_kernel();
13825 }
13826
13827 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
13828 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
13829 index 4cc9f2b..5fd9226 100644
13830 --- a/arch/x86/include/asm/pgtable-3level.h
13831 +++ b/arch/x86/include/asm/pgtable-3level.h
13832 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
13833
13834 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
13835 {
13836 + pax_open_kernel();
13837 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
13838 + pax_close_kernel();
13839 }
13840
13841 static inline void native_set_pud(pud_t *pudp, pud_t pud)
13842 {
13843 + pax_open_kernel();
13844 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
13845 + pax_close_kernel();
13846 }
13847
13848 /*
13849 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
13850 index 1c1a955..50f828c 100644
13851 --- a/arch/x86/include/asm/pgtable.h
13852 +++ b/arch/x86/include/asm/pgtable.h
13853 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
13854
13855 #ifndef __PAGETABLE_PUD_FOLDED
13856 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
13857 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
13858 #define pgd_clear(pgd) native_pgd_clear(pgd)
13859 #endif
13860
13861 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
13862
13863 #define arch_end_context_switch(prev) do {} while(0)
13864
13865 +#define pax_open_kernel() native_pax_open_kernel()
13866 +#define pax_close_kernel() native_pax_close_kernel()
13867 #endif /* CONFIG_PARAVIRT */
13868
13869 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
13870 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
13871 +
13872 +#ifdef CONFIG_PAX_KERNEXEC
13873 +static inline unsigned long native_pax_open_kernel(void)
13874 +{
13875 + unsigned long cr0;
13876 +
13877 + preempt_disable();
13878 + barrier();
13879 + cr0 = read_cr0() ^ X86_CR0_WP;
13880 + BUG_ON(cr0 & X86_CR0_WP);
13881 + write_cr0(cr0);
13882 + return cr0 ^ X86_CR0_WP;
13883 +}
13884 +
13885 +static inline unsigned long native_pax_close_kernel(void)
13886 +{
13887 + unsigned long cr0;
13888 +
13889 + cr0 = read_cr0() ^ X86_CR0_WP;
13890 + BUG_ON(!(cr0 & X86_CR0_WP));
13891 + write_cr0(cr0);
13892 + barrier();
13893 + preempt_enable_no_resched();
13894 + return cr0 ^ X86_CR0_WP;
13895 +}
13896 +#else
13897 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
13898 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
13899 +#endif
13900 +
13901 /*
13902 * The following only work if pte_present() is true.
13903 * Undefined behaviour if not..
13904 */
13905 +static inline int pte_user(pte_t pte)
13906 +{
13907 + return pte_val(pte) & _PAGE_USER;
13908 +}
13909 +
13910 static inline int pte_dirty(pte_t pte)
13911 {
13912 return pte_flags(pte) & _PAGE_DIRTY;
13913 @@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
13914 return pte_clear_flags(pte, _PAGE_RW);
13915 }
13916
13917 +static inline pte_t pte_mkread(pte_t pte)
13918 +{
13919 + return __pte(pte_val(pte) | _PAGE_USER);
13920 +}
13921 +
13922 static inline pte_t pte_mkexec(pte_t pte)
13923 {
13924 - return pte_clear_flags(pte, _PAGE_NX);
13925 +#ifdef CONFIG_X86_PAE
13926 + if (__supported_pte_mask & _PAGE_NX)
13927 + return pte_clear_flags(pte, _PAGE_NX);
13928 + else
13929 +#endif
13930 + return pte_set_flags(pte, _PAGE_USER);
13931 +}
13932 +
13933 +static inline pte_t pte_exprotect(pte_t pte)
13934 +{
13935 +#ifdef CONFIG_X86_PAE
13936 + if (__supported_pte_mask & _PAGE_NX)
13937 + return pte_set_flags(pte, _PAGE_NX);
13938 + else
13939 +#endif
13940 + return pte_clear_flags(pte, _PAGE_USER);
13941 }
13942
13943 static inline pte_t pte_mkdirty(pte_t pte)
13944 @@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
13945 #endif
13946
13947 #ifndef __ASSEMBLY__
13948 +
13949 +#ifdef CONFIG_PAX_PER_CPU_PGD
13950 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
13951 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
13952 +{
13953 + return cpu_pgd[cpu];
13954 +}
13955 +#endif
13956 +
13957 #include <linux/mm_types.h>
13958
13959 static inline int pte_none(pte_t pte)
13960 @@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
13961
13962 static inline int pgd_bad(pgd_t pgd)
13963 {
13964 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
13965 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
13966 }
13967
13968 static inline int pgd_none(pgd_t pgd)
13969 @@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
13970 * pgd_offset() returns a (pgd_t *)
13971 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
13972 */
13973 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
13974 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
13975 +
13976 +#ifdef CONFIG_PAX_PER_CPU_PGD
13977 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
13978 +#endif
13979 +
13980 /*
13981 * a shortcut which implies the use of the kernel's pgd, instead
13982 * of a process's
13983 @@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
13984 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
13985 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
13986
13987 +#ifdef CONFIG_X86_32
13988 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
13989 +#else
13990 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
13991 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
13992 +
13993 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13994 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
13995 +#else
13996 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
13997 +#endif
13998 +
13999 +#endif
14000 +
14001 #ifndef __ASSEMBLY__
14002
14003 extern int direct_gbpages;
14004 @@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14005 * dst and src can be on the same page, but the range must not overlap,
14006 * and must not cross a page boundary.
14007 */
14008 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14009 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14010 {
14011 - memcpy(dst, src, count * sizeof(pgd_t));
14012 + pax_open_kernel();
14013 + while (count--)
14014 + *dst++ = *src++;
14015 + pax_close_kernel();
14016 }
14017
14018 +#ifdef CONFIG_PAX_PER_CPU_PGD
14019 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14020 +#endif
14021 +
14022 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14023 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14024 +#else
14025 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14026 +#endif
14027
14028 #include <asm-generic/pgtable.h>
14029 #endif /* __ASSEMBLY__ */
14030 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14031 index 8faa215..a8a17ea 100644
14032 --- a/arch/x86/include/asm/pgtable_32.h
14033 +++ b/arch/x86/include/asm/pgtable_32.h
14034 @@ -25,9 +25,6 @@
14035 struct mm_struct;
14036 struct vm_area_struct;
14037
14038 -extern pgd_t swapper_pg_dir[1024];
14039 -extern pgd_t initial_page_table[1024];
14040 -
14041 static inline void pgtable_cache_init(void) { }
14042 static inline void check_pgt_cache(void) { }
14043 void paging_init(void);
14044 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14045 # include <asm/pgtable-2level.h>
14046 #endif
14047
14048 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14049 +extern pgd_t initial_page_table[PTRS_PER_PGD];
14050 +#ifdef CONFIG_X86_PAE
14051 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14052 +#endif
14053 +
14054 #if defined(CONFIG_HIGHPTE)
14055 #define pte_offset_map(dir, address) \
14056 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14057 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14058 /* Clear a kernel PTE and flush it from the TLB */
14059 #define kpte_clear_flush(ptep, vaddr) \
14060 do { \
14061 + pax_open_kernel(); \
14062 pte_clear(&init_mm, (vaddr), (ptep)); \
14063 + pax_close_kernel(); \
14064 __flush_tlb_one((vaddr)); \
14065 } while (0)
14066
14067 @@ -75,6 +80,9 @@ do { \
14068
14069 #endif /* !__ASSEMBLY__ */
14070
14071 +#define HAVE_ARCH_UNMAPPED_AREA
14072 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14073 +
14074 /*
14075 * kern_addr_valid() is (1) for FLATMEM and (0) for
14076 * SPARSEMEM and DISCONTIGMEM
14077 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14078 index ed5903b..c7fe163 100644
14079 --- a/arch/x86/include/asm/pgtable_32_types.h
14080 +++ b/arch/x86/include/asm/pgtable_32_types.h
14081 @@ -8,7 +8,7 @@
14082 */
14083 #ifdef CONFIG_X86_PAE
14084 # include <asm/pgtable-3level_types.h>
14085 -# define PMD_SIZE (1UL << PMD_SHIFT)
14086 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14087 # define PMD_MASK (~(PMD_SIZE - 1))
14088 #else
14089 # include <asm/pgtable-2level_types.h>
14090 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14091 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14092 #endif
14093
14094 +#ifdef CONFIG_PAX_KERNEXEC
14095 +#ifndef __ASSEMBLY__
14096 +extern unsigned char MODULES_EXEC_VADDR[];
14097 +extern unsigned char MODULES_EXEC_END[];
14098 +#endif
14099 +#include <asm/boot.h>
14100 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14101 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14102 +#else
14103 +#define ktla_ktva(addr) (addr)
14104 +#define ktva_ktla(addr) (addr)
14105 +#endif
14106 +
14107 #define MODULES_VADDR VMALLOC_START
14108 #define MODULES_END VMALLOC_END
14109 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14110 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14111 index 47356f9..deb94a2 100644
14112 --- a/arch/x86/include/asm/pgtable_64.h
14113 +++ b/arch/x86/include/asm/pgtable_64.h
14114 @@ -16,10 +16,14 @@
14115
14116 extern pud_t level3_kernel_pgt[512];
14117 extern pud_t level3_ident_pgt[512];
14118 +extern pud_t level3_vmalloc_start_pgt[512];
14119 +extern pud_t level3_vmalloc_end_pgt[512];
14120 +extern pud_t level3_vmemmap_pgt[512];
14121 +extern pud_t level2_vmemmap_pgt[512];
14122 extern pmd_t level2_kernel_pgt[512];
14123 extern pmd_t level2_fixmap_pgt[512];
14124 -extern pmd_t level2_ident_pgt[512];
14125 -extern pgd_t init_level4_pgt[];
14126 +extern pmd_t level2_ident_pgt[512*2];
14127 +extern pgd_t init_level4_pgt[512];
14128
14129 #define swapper_pg_dir init_level4_pgt
14130
14131 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14132
14133 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14134 {
14135 + pax_open_kernel();
14136 *pmdp = pmd;
14137 + pax_close_kernel();
14138 }
14139
14140 static inline void native_pmd_clear(pmd_t *pmd)
14141 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14142
14143 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14144 {
14145 + pax_open_kernel();
14146 *pudp = pud;
14147 + pax_close_kernel();
14148 }
14149
14150 static inline void native_pud_clear(pud_t *pud)
14151 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14152
14153 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14154 {
14155 + pax_open_kernel();
14156 + *pgdp = pgd;
14157 + pax_close_kernel();
14158 +}
14159 +
14160 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14161 +{
14162 *pgdp = pgd;
14163 }
14164
14165 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14166 index 766ea16..5b96cb3 100644
14167 --- a/arch/x86/include/asm/pgtable_64_types.h
14168 +++ b/arch/x86/include/asm/pgtable_64_types.h
14169 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14170 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14171 #define MODULES_END _AC(0xffffffffff000000, UL)
14172 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14173 +#define MODULES_EXEC_VADDR MODULES_VADDR
14174 +#define MODULES_EXEC_END MODULES_END
14175 +
14176 +#define ktla_ktva(addr) (addr)
14177 +#define ktva_ktla(addr) (addr)
14178
14179 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14180 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14181 index 3c32db8..1ddccf5 100644
14182 --- a/arch/x86/include/asm/pgtable_types.h
14183 +++ b/arch/x86/include/asm/pgtable_types.h
14184 @@ -16,13 +16,12 @@
14185 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14186 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14187 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14188 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14189 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14190 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14191 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14192 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14193 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14194 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14195 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14196 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14197 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14198 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14199
14200 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14201 @@ -40,7 +39,6 @@
14202 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14203 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14204 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14205 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14206 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14207 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14208 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14209 @@ -57,8 +55,10 @@
14210
14211 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14212 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14213 -#else
14214 +#elif defined(CONFIG_KMEMCHECK)
14215 #define _PAGE_NX (_AT(pteval_t, 0))
14216 +#else
14217 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14218 #endif
14219
14220 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14221 @@ -116,6 +116,9 @@
14222 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14223 _PAGE_ACCESSED)
14224
14225 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
14226 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
14227 +
14228 #define __PAGE_KERNEL_EXEC \
14229 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14230 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14231 @@ -126,7 +129,7 @@
14232 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14233 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14234 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14235 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14236 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14237 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14238 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14239 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14240 @@ -188,8 +191,8 @@
14241 * bits are combined, this will alow user to access the high address mapped
14242 * VDSO in the presence of CONFIG_COMPAT_VDSO
14243 */
14244 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14245 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14246 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14247 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14248 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14249 #endif
14250
14251 @@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14252 {
14253 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14254 }
14255 +#endif
14256
14257 +#if PAGETABLE_LEVELS == 3
14258 +#include <asm-generic/pgtable-nopud.h>
14259 +#endif
14260 +
14261 +#if PAGETABLE_LEVELS == 2
14262 +#include <asm-generic/pgtable-nopmd.h>
14263 +#endif
14264 +
14265 +#ifndef __ASSEMBLY__
14266 #if PAGETABLE_LEVELS > 3
14267 typedef struct { pudval_t pud; } pud_t;
14268
14269 @@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14270 return pud.pud;
14271 }
14272 #else
14273 -#include <asm-generic/pgtable-nopud.h>
14274 -
14275 static inline pudval_t native_pud_val(pud_t pud)
14276 {
14277 return native_pgd_val(pud.pgd);
14278 @@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14279 return pmd.pmd;
14280 }
14281 #else
14282 -#include <asm-generic/pgtable-nopmd.h>
14283 -
14284 static inline pmdval_t native_pmd_val(pmd_t pmd)
14285 {
14286 return native_pgd_val(pmd.pud.pgd);
14287 @@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14288
14289 extern pteval_t __supported_pte_mask;
14290 extern void set_nx(void);
14291 -extern int nx_enabled;
14292
14293 #define pgprot_writecombine pgprot_writecombine
14294 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14295 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14296 index 888184b..a07ac89 100644
14297 --- a/arch/x86/include/asm/processor.h
14298 +++ b/arch/x86/include/asm/processor.h
14299 @@ -287,7 +287,7 @@ struct tss_struct {
14300
14301 } ____cacheline_aligned;
14302
14303 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14304 +extern struct tss_struct init_tss[NR_CPUS];
14305
14306 /*
14307 * Save the original ist values for checking stack pointers during debugging
14308 @@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
14309 */
14310 #define TASK_SIZE PAGE_OFFSET
14311 #define TASK_SIZE_MAX TASK_SIZE
14312 +
14313 +#ifdef CONFIG_PAX_SEGMEXEC
14314 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14315 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14316 +#else
14317 #define STACK_TOP TASK_SIZE
14318 -#define STACK_TOP_MAX STACK_TOP
14319 +#endif
14320 +
14321 +#define STACK_TOP_MAX TASK_SIZE
14322
14323 #define INIT_THREAD { \
14324 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
14325 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14326 .vm86_info = NULL, \
14327 .sysenter_cs = __KERNEL_CS, \
14328 .io_bitmap_ptr = NULL, \
14329 @@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
14330 */
14331 #define INIT_TSS { \
14332 .x86_tss = { \
14333 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
14334 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14335 .ss0 = __KERNEL_DS, \
14336 .ss1 = __KERNEL_CS, \
14337 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14338 @@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
14339 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14340
14341 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14342 -#define KSTK_TOP(info) \
14343 -({ \
14344 - unsigned long *__ptr = (unsigned long *)(info); \
14345 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14346 -})
14347 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14348
14349 /*
14350 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14351 @@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14352 #define task_pt_regs(task) \
14353 ({ \
14354 struct pt_regs *__regs__; \
14355 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14356 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14357 __regs__ - 1; \
14358 })
14359
14360 @@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14361 /*
14362 * User space process size. 47bits minus one guard page.
14363 */
14364 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14365 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14366
14367 /* This decides where the kernel will search for a free chunk of vm
14368 * space during mmap's.
14369 */
14370 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14371 - 0xc0000000 : 0xFFFFe000)
14372 + 0xc0000000 : 0xFFFFf000)
14373
14374 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14375 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14376 @@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14377 #define STACK_TOP_MAX TASK_SIZE_MAX
14378
14379 #define INIT_THREAD { \
14380 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14381 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14382 }
14383
14384 #define INIT_TSS { \
14385 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14386 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14387 }
14388
14389 /*
14390 @@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14391 */
14392 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14393
14394 +#ifdef CONFIG_PAX_SEGMEXEC
14395 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14396 +#endif
14397 +
14398 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14399
14400 /* Get/set a process' ability to use the timestamp counter instruction */
14401 @@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
14402 #define cpu_has_amd_erratum(x) (false)
14403 #endif /* CONFIG_CPU_SUP_AMD */
14404
14405 -extern unsigned long arch_align_stack(unsigned long sp);
14406 +#define arch_align_stack(x) ((x) & ~0xfUL)
14407 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14408
14409 void default_idle(void);
14410 bool set_pm_idle_to_default(void);
14411
14412 -void stop_this_cpu(void *dummy);
14413 +void stop_this_cpu(void *dummy) __noreturn;
14414
14415 #endif /* _ASM_X86_PROCESSOR_H */
14416 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14417 index 942a086..6c26446 100644
14418 --- a/arch/x86/include/asm/ptrace.h
14419 +++ b/arch/x86/include/asm/ptrace.h
14420 @@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14421 }
14422
14423 /*
14424 - * user_mode_vm(regs) determines whether a register set came from user mode.
14425 + * user_mode(regs) determines whether a register set came from user mode.
14426 * This is true if V8086 mode was enabled OR if the register set was from
14427 * protected mode with RPL-3 CS value. This tricky test checks that with
14428 * one comparison. Many places in the kernel can bypass this full check
14429 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14430 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14431 + * be used.
14432 */
14433 -static inline int user_mode(struct pt_regs *regs)
14434 +static inline int user_mode_novm(struct pt_regs *regs)
14435 {
14436 #ifdef CONFIG_X86_32
14437 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14438 #else
14439 - return !!(regs->cs & 3);
14440 + return !!(regs->cs & SEGMENT_RPL_MASK);
14441 #endif
14442 }
14443
14444 -static inline int user_mode_vm(struct pt_regs *regs)
14445 +static inline int user_mode(struct pt_regs *regs)
14446 {
14447 #ifdef CONFIG_X86_32
14448 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
14449 USER_RPL;
14450 #else
14451 - return user_mode(regs);
14452 + return user_mode_novm(regs);
14453 #endif
14454 }
14455
14456 @@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
14457 #ifdef CONFIG_X86_64
14458 static inline bool user_64bit_mode(struct pt_regs *regs)
14459 {
14460 + unsigned long cs = regs->cs & 0xffff;
14461 #ifndef CONFIG_PARAVIRT
14462 /*
14463 * On non-paravirt systems, this is the only long mode CPL 3
14464 * selector. We do not allow long mode selectors in the LDT.
14465 */
14466 - return regs->cs == __USER_CS;
14467 + return cs == __USER_CS;
14468 #else
14469 /* Headers are too twisted for this to go in paravirt.h. */
14470 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
14471 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
14472 #endif
14473 }
14474
14475 @@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
14476 * Traps from the kernel do not save sp and ss.
14477 * Use the helper function to retrieve sp.
14478 */
14479 - if (offset == offsetof(struct pt_regs, sp) &&
14480 - regs->cs == __KERNEL_CS)
14481 - return kernel_stack_pointer(regs);
14482 + if (offset == offsetof(struct pt_regs, sp)) {
14483 + unsigned long cs = regs->cs & 0xffff;
14484 + if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
14485 + return kernel_stack_pointer(regs);
14486 + }
14487 #endif
14488 return *(unsigned long *)((unsigned long)regs + offset);
14489 }
14490 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
14491 index fe1ec5b..dc5c3fe 100644
14492 --- a/arch/x86/include/asm/realmode.h
14493 +++ b/arch/x86/include/asm/realmode.h
14494 @@ -22,16 +22,14 @@ struct real_mode_header {
14495 #endif
14496 /* APM/BIOS reboot */
14497 u32 machine_real_restart_asm;
14498 -#ifdef CONFIG_X86_64
14499 u32 machine_real_restart_seg;
14500 -#endif
14501 };
14502
14503 /* This must match data at trampoline_32/64.S */
14504 struct trampoline_header {
14505 #ifdef CONFIG_X86_32
14506 u32 start;
14507 - u16 gdt_pad;
14508 + u16 boot_cs;
14509 u16 gdt_limit;
14510 u32 gdt_base;
14511 #else
14512 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
14513 index a82c4f1..ac45053 100644
14514 --- a/arch/x86/include/asm/reboot.h
14515 +++ b/arch/x86/include/asm/reboot.h
14516 @@ -6,13 +6,13 @@
14517 struct pt_regs;
14518
14519 struct machine_ops {
14520 - void (*restart)(char *cmd);
14521 - void (*halt)(void);
14522 - void (*power_off)(void);
14523 + void (* __noreturn restart)(char *cmd);
14524 + void (* __noreturn halt)(void);
14525 + void (* __noreturn power_off)(void);
14526 void (*shutdown)(void);
14527 void (*crash_shutdown)(struct pt_regs *);
14528 - void (*emergency_restart)(void);
14529 -};
14530 + void (* __noreturn emergency_restart)(void);
14531 +} __no_const;
14532
14533 extern struct machine_ops machine_ops;
14534
14535 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
14536 index 2dbe4a7..ce1db00 100644
14537 --- a/arch/x86/include/asm/rwsem.h
14538 +++ b/arch/x86/include/asm/rwsem.h
14539 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
14540 {
14541 asm volatile("# beginning down_read\n\t"
14542 LOCK_PREFIX _ASM_INC "(%1)\n\t"
14543 +
14544 +#ifdef CONFIG_PAX_REFCOUNT
14545 + "jno 0f\n"
14546 + LOCK_PREFIX _ASM_DEC "(%1)\n"
14547 + "int $4\n0:\n"
14548 + _ASM_EXTABLE(0b, 0b)
14549 +#endif
14550 +
14551 /* adds 0x00000001 */
14552 " jns 1f\n"
14553 " call call_rwsem_down_read_failed\n"
14554 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
14555 "1:\n\t"
14556 " mov %1,%2\n\t"
14557 " add %3,%2\n\t"
14558 +
14559 +#ifdef CONFIG_PAX_REFCOUNT
14560 + "jno 0f\n"
14561 + "sub %3,%2\n"
14562 + "int $4\n0:\n"
14563 + _ASM_EXTABLE(0b, 0b)
14564 +#endif
14565 +
14566 " jle 2f\n\t"
14567 LOCK_PREFIX " cmpxchg %2,%0\n\t"
14568 " jnz 1b\n\t"
14569 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
14570 long tmp;
14571 asm volatile("# beginning down_write\n\t"
14572 LOCK_PREFIX " xadd %1,(%2)\n\t"
14573 +
14574 +#ifdef CONFIG_PAX_REFCOUNT
14575 + "jno 0f\n"
14576 + "mov %1,(%2)\n"
14577 + "int $4\n0:\n"
14578 + _ASM_EXTABLE(0b, 0b)
14579 +#endif
14580 +
14581 /* adds 0xffff0001, returns the old value */
14582 " test %1,%1\n\t"
14583 /* was the count 0 before? */
14584 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
14585 long tmp;
14586 asm volatile("# beginning __up_read\n\t"
14587 LOCK_PREFIX " xadd %1,(%2)\n\t"
14588 +
14589 +#ifdef CONFIG_PAX_REFCOUNT
14590 + "jno 0f\n"
14591 + "mov %1,(%2)\n"
14592 + "int $4\n0:\n"
14593 + _ASM_EXTABLE(0b, 0b)
14594 +#endif
14595 +
14596 /* subtracts 1, returns the old value */
14597 " jns 1f\n\t"
14598 " call call_rwsem_wake\n" /* expects old value in %edx */
14599 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
14600 long tmp;
14601 asm volatile("# beginning __up_write\n\t"
14602 LOCK_PREFIX " xadd %1,(%2)\n\t"
14603 +
14604 +#ifdef CONFIG_PAX_REFCOUNT
14605 + "jno 0f\n"
14606 + "mov %1,(%2)\n"
14607 + "int $4\n0:\n"
14608 + _ASM_EXTABLE(0b, 0b)
14609 +#endif
14610 +
14611 /* subtracts 0xffff0001, returns the old value */
14612 " jns 1f\n\t"
14613 " call call_rwsem_wake\n" /* expects old value in %edx */
14614 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
14615 {
14616 asm volatile("# beginning __downgrade_write\n\t"
14617 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
14618 +
14619 +#ifdef CONFIG_PAX_REFCOUNT
14620 + "jno 0f\n"
14621 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
14622 + "int $4\n0:\n"
14623 + _ASM_EXTABLE(0b, 0b)
14624 +#endif
14625 +
14626 /*
14627 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
14628 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
14629 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
14630 */
14631 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
14632 {
14633 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
14634 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
14635 +
14636 +#ifdef CONFIG_PAX_REFCOUNT
14637 + "jno 0f\n"
14638 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
14639 + "int $4\n0:\n"
14640 + _ASM_EXTABLE(0b, 0b)
14641 +#endif
14642 +
14643 : "+m" (sem->count)
14644 : "er" (delta));
14645 }
14646 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
14647 */
14648 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
14649 {
14650 - return delta + xadd(&sem->count, delta);
14651 + return delta + xadd_check_overflow(&sem->count, delta);
14652 }
14653
14654 #endif /* __KERNEL__ */
14655 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
14656 index c48a950..c6d7468 100644
14657 --- a/arch/x86/include/asm/segment.h
14658 +++ b/arch/x86/include/asm/segment.h
14659 @@ -64,10 +64,15 @@
14660 * 26 - ESPFIX small SS
14661 * 27 - per-cpu [ offset to per-cpu data area ]
14662 * 28 - stack_canary-20 [ for stack protector ]
14663 - * 29 - unused
14664 - * 30 - unused
14665 + * 29 - PCI BIOS CS
14666 + * 30 - PCI BIOS DS
14667 * 31 - TSS for double fault handler
14668 */
14669 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
14670 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
14671 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
14672 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
14673 +
14674 #define GDT_ENTRY_TLS_MIN 6
14675 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
14676
14677 @@ -79,6 +84,8 @@
14678
14679 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
14680
14681 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
14682 +
14683 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
14684
14685 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
14686 @@ -104,6 +111,12 @@
14687 #define __KERNEL_STACK_CANARY 0
14688 #endif
14689
14690 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
14691 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
14692 +
14693 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
14694 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
14695 +
14696 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
14697
14698 /*
14699 @@ -141,7 +154,7 @@
14700 */
14701
14702 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
14703 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
14704 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
14705
14706
14707 #else
14708 @@ -165,6 +178,8 @@
14709 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
14710 #define __USER32_DS __USER_DS
14711
14712 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
14713 +
14714 #define GDT_ENTRY_TSS 8 /* needs two entries */
14715 #define GDT_ENTRY_LDT 10 /* needs two entries */
14716 #define GDT_ENTRY_TLS_MIN 12
14717 @@ -185,6 +200,7 @@
14718 #endif
14719
14720 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
14721 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
14722 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
14723 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
14724 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
14725 @@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
14726 {
14727 unsigned long __limit;
14728 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
14729 - return __limit + 1;
14730 + return __limit;
14731 }
14732
14733 #endif /* !__ASSEMBLY__ */
14734 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
14735 index b073aae..39f9bdd 100644
14736 --- a/arch/x86/include/asm/smp.h
14737 +++ b/arch/x86/include/asm/smp.h
14738 @@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
14739 /* cpus sharing the last level cache: */
14740 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
14741 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
14742 -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
14743 +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
14744
14745 static inline struct cpumask *cpu_sibling_mask(int cpu)
14746 {
14747 @@ -79,7 +79,7 @@ struct smp_ops {
14748
14749 void (*send_call_func_ipi)(const struct cpumask *mask);
14750 void (*send_call_func_single_ipi)(int cpu);
14751 -};
14752 +} __no_const;
14753
14754 /* Globals due to paravirt */
14755 extern void set_cpu_sibling_map(int cpu);
14756 @@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
14757 extern int safe_smp_processor_id(void);
14758
14759 #elif defined(CONFIG_X86_64_SMP)
14760 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
14761 -
14762 -#define stack_smp_processor_id() \
14763 -({ \
14764 - struct thread_info *ti; \
14765 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
14766 - ti->cpu; \
14767 -})
14768 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
14769 +#define stack_smp_processor_id() raw_smp_processor_id()
14770 #define safe_smp_processor_id() smp_processor_id()
14771
14772 #endif
14773 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
14774 index 33692ea..350a534 100644
14775 --- a/arch/x86/include/asm/spinlock.h
14776 +++ b/arch/x86/include/asm/spinlock.h
14777 @@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
14778 static inline void arch_read_lock(arch_rwlock_t *rw)
14779 {
14780 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
14781 +
14782 +#ifdef CONFIG_PAX_REFCOUNT
14783 + "jno 0f\n"
14784 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
14785 + "int $4\n0:\n"
14786 + _ASM_EXTABLE(0b, 0b)
14787 +#endif
14788 +
14789 "jns 1f\n"
14790 "call __read_lock_failed\n\t"
14791 "1:\n"
14792 @@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
14793 static inline void arch_write_lock(arch_rwlock_t *rw)
14794 {
14795 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
14796 +
14797 +#ifdef CONFIG_PAX_REFCOUNT
14798 + "jno 0f\n"
14799 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
14800 + "int $4\n0:\n"
14801 + _ASM_EXTABLE(0b, 0b)
14802 +#endif
14803 +
14804 "jz 1f\n"
14805 "call __write_lock_failed\n\t"
14806 "1:\n"
14807 @@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
14808
14809 static inline void arch_read_unlock(arch_rwlock_t *rw)
14810 {
14811 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
14812 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
14813 +
14814 +#ifdef CONFIG_PAX_REFCOUNT
14815 + "jno 0f\n"
14816 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
14817 + "int $4\n0:\n"
14818 + _ASM_EXTABLE(0b, 0b)
14819 +#endif
14820 +
14821 :"+m" (rw->lock) : : "memory");
14822 }
14823
14824 static inline void arch_write_unlock(arch_rwlock_t *rw)
14825 {
14826 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
14827 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
14828 +
14829 +#ifdef CONFIG_PAX_REFCOUNT
14830 + "jno 0f\n"
14831 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
14832 + "int $4\n0:\n"
14833 + _ASM_EXTABLE(0b, 0b)
14834 +#endif
14835 +
14836 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
14837 }
14838
14839 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
14840 index 6a99859..03cb807 100644
14841 --- a/arch/x86/include/asm/stackprotector.h
14842 +++ b/arch/x86/include/asm/stackprotector.h
14843 @@ -47,7 +47,7 @@
14844 * head_32 for boot CPU and setup_per_cpu_areas() for others.
14845 */
14846 #define GDT_STACK_CANARY_INIT \
14847 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
14848 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
14849
14850 /*
14851 * Initialize the stackprotector canary value.
14852 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
14853
14854 static inline void load_stack_canary_segment(void)
14855 {
14856 -#ifdef CONFIG_X86_32
14857 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14858 asm volatile ("mov %0, %%gs" : : "r" (0));
14859 #endif
14860 }
14861 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
14862 index 70bbe39..4ae2bd4 100644
14863 --- a/arch/x86/include/asm/stacktrace.h
14864 +++ b/arch/x86/include/asm/stacktrace.h
14865 @@ -11,28 +11,20 @@
14866
14867 extern int kstack_depth_to_print;
14868
14869 -struct thread_info;
14870 +struct task_struct;
14871 struct stacktrace_ops;
14872
14873 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
14874 - unsigned long *stack,
14875 - unsigned long bp,
14876 - const struct stacktrace_ops *ops,
14877 - void *data,
14878 - unsigned long *end,
14879 - int *graph);
14880 +typedef unsigned long walk_stack_t(struct task_struct *task,
14881 + void *stack_start,
14882 + unsigned long *stack,
14883 + unsigned long bp,
14884 + const struct stacktrace_ops *ops,
14885 + void *data,
14886 + unsigned long *end,
14887 + int *graph);
14888
14889 -extern unsigned long
14890 -print_context_stack(struct thread_info *tinfo,
14891 - unsigned long *stack, unsigned long bp,
14892 - const struct stacktrace_ops *ops, void *data,
14893 - unsigned long *end, int *graph);
14894 -
14895 -extern unsigned long
14896 -print_context_stack_bp(struct thread_info *tinfo,
14897 - unsigned long *stack, unsigned long bp,
14898 - const struct stacktrace_ops *ops, void *data,
14899 - unsigned long *end, int *graph);
14900 +extern walk_stack_t print_context_stack;
14901 +extern walk_stack_t print_context_stack_bp;
14902
14903 /* Generic stack tracer with callbacks */
14904
14905 @@ -40,7 +32,7 @@ struct stacktrace_ops {
14906 void (*address)(void *data, unsigned long address, int reliable);
14907 /* On negative return stop dumping */
14908 int (*stack)(void *data, char *name);
14909 - walk_stack_t walk_stack;
14910 + walk_stack_t *walk_stack;
14911 };
14912
14913 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
14914 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
14915 index 4ec45b3..a4f0a8a 100644
14916 --- a/arch/x86/include/asm/switch_to.h
14917 +++ b/arch/x86/include/asm/switch_to.h
14918 @@ -108,7 +108,7 @@ do { \
14919 "call __switch_to\n\t" \
14920 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
14921 __switch_canary \
14922 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
14923 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
14924 "movq %%rax,%%rdi\n\t" \
14925 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
14926 "jnz ret_from_fork\n\t" \
14927 @@ -119,7 +119,7 @@ do { \
14928 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
14929 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
14930 [_tif_fork] "i" (_TIF_FORK), \
14931 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
14932 + [thread_info] "m" (current_tinfo), \
14933 [current_task] "m" (current_task) \
14934 __switch_canary_iparam \
14935 : "memory", "cc" __EXTRA_CLOBBER)
14936 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
14937 index 2d946e6..e453ec4 100644
14938 --- a/arch/x86/include/asm/thread_info.h
14939 +++ b/arch/x86/include/asm/thread_info.h
14940 @@ -10,6 +10,7 @@
14941 #include <linux/compiler.h>
14942 #include <asm/page.h>
14943 #include <asm/types.h>
14944 +#include <asm/percpu.h>
14945
14946 /*
14947 * low level task data that entry.S needs immediate access to
14948 @@ -24,7 +25,6 @@ struct exec_domain;
14949 #include <linux/atomic.h>
14950
14951 struct thread_info {
14952 - struct task_struct *task; /* main task structure */
14953 struct exec_domain *exec_domain; /* execution domain */
14954 __u32 flags; /* low level flags */
14955 __u32 status; /* thread synchronous flags */
14956 @@ -34,19 +34,13 @@ struct thread_info {
14957 mm_segment_t addr_limit;
14958 struct restart_block restart_block;
14959 void __user *sysenter_return;
14960 -#ifdef CONFIG_X86_32
14961 - unsigned long previous_esp; /* ESP of the previous stack in
14962 - case of nested (IRQ) stacks
14963 - */
14964 - __u8 supervisor_stack[0];
14965 -#endif
14966 + unsigned long lowest_stack;
14967 unsigned int sig_on_uaccess_error:1;
14968 unsigned int uaccess_err:1; /* uaccess failed */
14969 };
14970
14971 -#define INIT_THREAD_INFO(tsk) \
14972 +#define INIT_THREAD_INFO \
14973 { \
14974 - .task = &tsk, \
14975 .exec_domain = &default_exec_domain, \
14976 .flags = 0, \
14977 .cpu = 0, \
14978 @@ -57,7 +51,7 @@ struct thread_info {
14979 }, \
14980 }
14981
14982 -#define init_thread_info (init_thread_union.thread_info)
14983 +#define init_thread_info (init_thread_union.stack)
14984 #define init_stack (init_thread_union.stack)
14985
14986 #else /* !__ASSEMBLY__ */
14987 @@ -98,6 +92,7 @@ struct thread_info {
14988 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
14989 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
14990 #define TIF_X32 30 /* 32-bit native x86-64 binary */
14991 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
14992
14993 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
14994 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
14995 @@ -122,17 +117,18 @@ struct thread_info {
14996 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
14997 #define _TIF_ADDR32 (1 << TIF_ADDR32)
14998 #define _TIF_X32 (1 << TIF_X32)
14999 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15000
15001 /* work to do in syscall_trace_enter() */
15002 #define _TIF_WORK_SYSCALL_ENTRY \
15003 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15004 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15005 - _TIF_NOHZ)
15006 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
15007
15008 /* work to do in syscall_trace_leave() */
15009 #define _TIF_WORK_SYSCALL_EXIT \
15010 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15011 - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15012 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15013
15014 /* work to do on interrupt/exception return */
15015 #define _TIF_WORK_MASK \
15016 @@ -143,7 +139,7 @@ struct thread_info {
15017 /* work to do on any return to user space */
15018 #define _TIF_ALLWORK_MASK \
15019 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15020 - _TIF_NOHZ)
15021 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
15022
15023 /* Only used for 64 bit */
15024 #define _TIF_DO_NOTIFY_MASK \
15025 @@ -159,45 +155,40 @@ struct thread_info {
15026
15027 #define PREEMPT_ACTIVE 0x10000000
15028
15029 -#ifdef CONFIG_X86_32
15030 -
15031 -#define STACK_WARN (THREAD_SIZE/8)
15032 -/*
15033 - * macros/functions for gaining access to the thread information structure
15034 - *
15035 - * preempt_count needs to be 1 initially, until the scheduler is functional.
15036 - */
15037 -#ifndef __ASSEMBLY__
15038 -
15039 -
15040 -/* how to get the current stack pointer from C */
15041 -register unsigned long current_stack_pointer asm("esp") __used;
15042 -
15043 -/* how to get the thread information struct from C */
15044 -static inline struct thread_info *current_thread_info(void)
15045 -{
15046 - return (struct thread_info *)
15047 - (current_stack_pointer & ~(THREAD_SIZE - 1));
15048 -}
15049 -
15050 -#else /* !__ASSEMBLY__ */
15051 -
15052 +#ifdef __ASSEMBLY__
15053 /* how to get the thread information struct from ASM */
15054 #define GET_THREAD_INFO(reg) \
15055 - movl $-THREAD_SIZE, reg; \
15056 - andl %esp, reg
15057 + mov PER_CPU_VAR(current_tinfo), reg
15058
15059 /* use this one if reg already contains %esp */
15060 -#define GET_THREAD_INFO_WITH_ESP(reg) \
15061 - andl $-THREAD_SIZE, reg
15062 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15063 +#else
15064 +/* how to get the thread information struct from C */
15065 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15066 +
15067 +static __always_inline struct thread_info *current_thread_info(void)
15068 +{
15069 + return this_cpu_read_stable(current_tinfo);
15070 +}
15071 +#endif
15072 +
15073 +#ifdef CONFIG_X86_32
15074 +
15075 +#define STACK_WARN (THREAD_SIZE/8)
15076 +/*
15077 + * macros/functions for gaining access to the thread information structure
15078 + *
15079 + * preempt_count needs to be 1 initially, until the scheduler is functional.
15080 + */
15081 +#ifndef __ASSEMBLY__
15082 +
15083 +/* how to get the current stack pointer from C */
15084 +register unsigned long current_stack_pointer asm("esp") __used;
15085
15086 #endif
15087
15088 #else /* X86_32 */
15089
15090 -#include <asm/percpu.h>
15091 -#define KERNEL_STACK_OFFSET (5*8)
15092 -
15093 /*
15094 * macros/functions for gaining access to the thread information structure
15095 * preempt_count needs to be 1 initially, until the scheduler is functional.
15096 @@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15097 #ifndef __ASSEMBLY__
15098 DECLARE_PER_CPU(unsigned long, kernel_stack);
15099
15100 -static inline struct thread_info *current_thread_info(void)
15101 -{
15102 - struct thread_info *ti;
15103 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
15104 - KERNEL_STACK_OFFSET - THREAD_SIZE);
15105 - return ti;
15106 -}
15107 -
15108 -#else /* !__ASSEMBLY__ */
15109 -
15110 -/* how to get the thread information struct from ASM */
15111 -#define GET_THREAD_INFO(reg) \
15112 - movq PER_CPU_VAR(kernel_stack),reg ; \
15113 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15114 -
15115 -/*
15116 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15117 - * a certain register (to be used in assembler memory operands).
15118 - */
15119 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15120 -
15121 +/* how to get the current stack pointer from C */
15122 +register unsigned long current_stack_pointer asm("rsp") __used;
15123 #endif
15124
15125 #endif /* !X86_32 */
15126 @@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15127 extern void arch_task_cache_init(void);
15128 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15129 extern void arch_release_task_struct(struct task_struct *tsk);
15130 +
15131 +#define __HAVE_THREAD_FUNCTIONS
15132 +#define task_thread_info(task) (&(task)->tinfo)
15133 +#define task_stack_page(task) ((task)->stack)
15134 +#define setup_thread_stack(p, org) do {} while (0)
15135 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15136 +
15137 #endif
15138 #endif /* _ASM_X86_THREAD_INFO_H */
15139 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15140 index 1709801..0a60f2f 100644
15141 --- a/arch/x86/include/asm/uaccess.h
15142 +++ b/arch/x86/include/asm/uaccess.h
15143 @@ -7,6 +7,7 @@
15144 #include <linux/compiler.h>
15145 #include <linux/thread_info.h>
15146 #include <linux/string.h>
15147 +#include <linux/sched.h>
15148 #include <asm/asm.h>
15149 #include <asm/page.h>
15150 #include <asm/smap.h>
15151 @@ -29,7 +30,12 @@
15152
15153 #define get_ds() (KERNEL_DS)
15154 #define get_fs() (current_thread_info()->addr_limit)
15155 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15156 +void __set_fs(mm_segment_t x);
15157 +void set_fs(mm_segment_t x);
15158 +#else
15159 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15160 +#endif
15161
15162 #define segment_eq(a, b) ((a).seg == (b).seg)
15163
15164 @@ -77,8 +83,33 @@
15165 * checks that the pointer is in the user space range - after calling
15166 * this function, memory access functions may still return -EFAULT.
15167 */
15168 -#define access_ok(type, addr, size) \
15169 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15170 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15171 +#define access_ok(type, addr, size) \
15172 +({ \
15173 + long __size = size; \
15174 + unsigned long __addr = (unsigned long)addr; \
15175 + unsigned long __addr_ao = __addr & PAGE_MASK; \
15176 + unsigned long __end_ao = __addr + __size - 1; \
15177 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15178 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15179 + while(__addr_ao <= __end_ao) { \
15180 + char __c_ao; \
15181 + __addr_ao += PAGE_SIZE; \
15182 + if (__size > PAGE_SIZE) \
15183 + cond_resched(); \
15184 + if (__get_user(__c_ao, (char __user *)__addr)) \
15185 + break; \
15186 + if (type != VERIFY_WRITE) { \
15187 + __addr = __addr_ao; \
15188 + continue; \
15189 + } \
15190 + if (__put_user(__c_ao, (char __user *)__addr)) \
15191 + break; \
15192 + __addr = __addr_ao; \
15193 + } \
15194 + } \
15195 + __ret_ao; \
15196 +})
15197
15198 /*
15199 * The exception table consists of pairs of addresses relative to the
15200 @@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15201 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15202 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15203
15204 -
15205 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15206 +#define __copyuser_seg "gs;"
15207 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15208 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15209 +#else
15210 +#define __copyuser_seg
15211 +#define __COPYUSER_SET_ES
15212 +#define __COPYUSER_RESTORE_ES
15213 +#endif
15214
15215 #ifdef CONFIG_X86_32
15216 #define __put_user_asm_u64(x, addr, err, errret) \
15217 asm volatile(ASM_STAC "\n" \
15218 - "1: movl %%eax,0(%2)\n" \
15219 - "2: movl %%edx,4(%2)\n" \
15220 + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15221 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15222 "3: " ASM_CLAC "\n" \
15223 ".section .fixup,\"ax\"\n" \
15224 "4: movl %3,%0\n" \
15225 @@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15226
15227 #define __put_user_asm_ex_u64(x, addr) \
15228 asm volatile(ASM_STAC "\n" \
15229 - "1: movl %%eax,0(%1)\n" \
15230 - "2: movl %%edx,4(%1)\n" \
15231 + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15232 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15233 "3: " ASM_CLAC "\n" \
15234 _ASM_EXTABLE_EX(1b, 2b) \
15235 _ASM_EXTABLE_EX(2b, 3b) \
15236 @@ -259,7 +298,7 @@ extern void __put_user_8(void);
15237 __typeof__(*(ptr)) __pu_val; \
15238 __chk_user_ptr(ptr); \
15239 might_fault(); \
15240 - __pu_val = x; \
15241 + __pu_val = (x); \
15242 switch (sizeof(*(ptr))) { \
15243 case 1: \
15244 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15245 @@ -358,7 +397,7 @@ do { \
15246
15247 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15248 asm volatile(ASM_STAC "\n" \
15249 - "1: mov"itype" %2,%"rtype"1\n" \
15250 + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15251 "2: " ASM_CLAC "\n" \
15252 ".section .fixup,\"ax\"\n" \
15253 "3: mov %3,%0\n" \
15254 @@ -366,7 +405,7 @@ do { \
15255 " jmp 2b\n" \
15256 ".previous\n" \
15257 _ASM_EXTABLE(1b, 3b) \
15258 - : "=r" (err), ltype(x) \
15259 + : "=r" (err), ltype (x) \
15260 : "m" (__m(addr)), "i" (errret), "0" (err))
15261
15262 #define __get_user_size_ex(x, ptr, size) \
15263 @@ -391,7 +430,7 @@ do { \
15264 } while (0)
15265
15266 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15267 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15268 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15269 "2:\n" \
15270 _ASM_EXTABLE_EX(1b, 2b) \
15271 : ltype(x) : "m" (__m(addr)))
15272 @@ -408,13 +447,24 @@ do { \
15273 int __gu_err; \
15274 unsigned long __gu_val; \
15275 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15276 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
15277 + (x) = (__typeof__(*(ptr)))__gu_val; \
15278 __gu_err; \
15279 })
15280
15281 /* FIXME: this hack is definitely wrong -AK */
15282 struct __large_struct { unsigned long buf[100]; };
15283 -#define __m(x) (*(struct __large_struct __user *)(x))
15284 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15285 +#define ____m(x) \
15286 +({ \
15287 + unsigned long ____x = (unsigned long)(x); \
15288 + if (____x < PAX_USER_SHADOW_BASE) \
15289 + ____x += PAX_USER_SHADOW_BASE; \
15290 + (void __user *)____x; \
15291 +})
15292 +#else
15293 +#define ____m(x) (x)
15294 +#endif
15295 +#define __m(x) (*(struct __large_struct __user *)____m(x))
15296
15297 /*
15298 * Tell gcc we read from memory instead of writing: this is because
15299 @@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
15300 */
15301 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15302 asm volatile(ASM_STAC "\n" \
15303 - "1: mov"itype" %"rtype"1,%2\n" \
15304 + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15305 "2: " ASM_CLAC "\n" \
15306 ".section .fixup,\"ax\"\n" \
15307 "3: mov %3,%0\n" \
15308 @@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
15309 ".previous\n" \
15310 _ASM_EXTABLE(1b, 3b) \
15311 : "=r"(err) \
15312 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15313 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15314
15315 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15316 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15317 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15318 "2:\n" \
15319 _ASM_EXTABLE_EX(1b, 2b) \
15320 : : ltype(x), "m" (__m(addr)))
15321 @@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
15322 * On error, the variable @x is set to zero.
15323 */
15324
15325 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15326 +#define __get_user(x, ptr) get_user((x), (ptr))
15327 +#else
15328 #define __get_user(x, ptr) \
15329 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15330 +#endif
15331
15332 /**
15333 * __put_user: - Write a simple value into user space, with less checking.
15334 @@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
15335 * Returns zero on success, or -EFAULT on error.
15336 */
15337
15338 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15339 +#define __put_user(x, ptr) put_user((x), (ptr))
15340 +#else
15341 #define __put_user(x, ptr) \
15342 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15343 +#endif
15344
15345 #define __get_user_unaligned __get_user
15346 #define __put_user_unaligned __put_user
15347 @@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
15348 #define get_user_ex(x, ptr) do { \
15349 unsigned long __gue_val; \
15350 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15351 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
15352 + (x) = (__typeof__(*(ptr)))__gue_val; \
15353 } while (0)
15354
15355 #define put_user_try uaccess_try
15356 @@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15357 extern __must_check long strlen_user(const char __user *str);
15358 extern __must_check long strnlen_user(const char __user *str, long n);
15359
15360 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15361 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15362 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15363 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15364
15365 /*
15366 * movsl can be slow when source and dest are not both 8-byte aligned
15367 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15368 index 7f760a9..04b1c65 100644
15369 --- a/arch/x86/include/asm/uaccess_32.h
15370 +++ b/arch/x86/include/asm/uaccess_32.h
15371 @@ -11,15 +11,15 @@
15372 #include <asm/page.h>
15373
15374 unsigned long __must_check __copy_to_user_ll
15375 - (void __user *to, const void *from, unsigned long n);
15376 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15377 unsigned long __must_check __copy_from_user_ll
15378 - (void *to, const void __user *from, unsigned long n);
15379 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15380 unsigned long __must_check __copy_from_user_ll_nozero
15381 - (void *to, const void __user *from, unsigned long n);
15382 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15383 unsigned long __must_check __copy_from_user_ll_nocache
15384 - (void *to, const void __user *from, unsigned long n);
15385 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15386 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15387 - (void *to, const void __user *from, unsigned long n);
15388 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15389
15390 /**
15391 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15392 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15393 static __always_inline unsigned long __must_check
15394 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15395 {
15396 + if ((long)n < 0)
15397 + return n;
15398 +
15399 + check_object_size(from, n, true);
15400 +
15401 if (__builtin_constant_p(n)) {
15402 unsigned long ret;
15403
15404 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15405 __copy_to_user(void __user *to, const void *from, unsigned long n)
15406 {
15407 might_fault();
15408 +
15409 return __copy_to_user_inatomic(to, from, n);
15410 }
15411
15412 static __always_inline unsigned long
15413 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15414 {
15415 + if ((long)n < 0)
15416 + return n;
15417 +
15418 /* Avoid zeroing the tail if the copy fails..
15419 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15420 * but as the zeroing behaviour is only significant when n is not
15421 @@ -137,6 +146,12 @@ static __always_inline unsigned long
15422 __copy_from_user(void *to, const void __user *from, unsigned long n)
15423 {
15424 might_fault();
15425 +
15426 + if ((long)n < 0)
15427 + return n;
15428 +
15429 + check_object_size(to, n, false);
15430 +
15431 if (__builtin_constant_p(n)) {
15432 unsigned long ret;
15433
15434 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15435 const void __user *from, unsigned long n)
15436 {
15437 might_fault();
15438 +
15439 + if ((long)n < 0)
15440 + return n;
15441 +
15442 if (__builtin_constant_p(n)) {
15443 unsigned long ret;
15444
15445 @@ -181,15 +200,19 @@ static __always_inline unsigned long
15446 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
15447 unsigned long n)
15448 {
15449 - return __copy_from_user_ll_nocache_nozero(to, from, n);
15450 + if ((long)n < 0)
15451 + return n;
15452 +
15453 + return __copy_from_user_ll_nocache_nozero(to, from, n);
15454 }
15455
15456 -unsigned long __must_check copy_to_user(void __user *to,
15457 - const void *from, unsigned long n);
15458 -unsigned long __must_check _copy_from_user(void *to,
15459 - const void __user *from,
15460 - unsigned long n);
15461 -
15462 +extern void copy_to_user_overflow(void)
15463 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15464 + __compiletime_error("copy_to_user() buffer size is not provably correct")
15465 +#else
15466 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
15467 +#endif
15468 +;
15469
15470 extern void copy_from_user_overflow(void)
15471 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15472 @@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
15473 #endif
15474 ;
15475
15476 -static inline unsigned long __must_check copy_from_user(void *to,
15477 - const void __user *from,
15478 - unsigned long n)
15479 +/**
15480 + * copy_to_user: - Copy a block of data into user space.
15481 + * @to: Destination address, in user space.
15482 + * @from: Source address, in kernel space.
15483 + * @n: Number of bytes to copy.
15484 + *
15485 + * Context: User context only. This function may sleep.
15486 + *
15487 + * Copy data from kernel space to user space.
15488 + *
15489 + * Returns number of bytes that could not be copied.
15490 + * On success, this will be zero.
15491 + */
15492 +static inline unsigned long __must_check
15493 +copy_to_user(void __user *to, const void *from, unsigned long n)
15494 {
15495 - int sz = __compiletime_object_size(to);
15496 + size_t sz = __compiletime_object_size(from);
15497
15498 - if (likely(sz == -1 || sz >= n))
15499 - n = _copy_from_user(to, from, n);
15500 - else
15501 + if (unlikely(sz != (size_t)-1 && sz < n))
15502 + copy_to_user_overflow();
15503 + else if (access_ok(VERIFY_WRITE, to, n))
15504 + n = __copy_to_user(to, from, n);
15505 + return n;
15506 +}
15507 +
15508 +/**
15509 + * copy_from_user: - Copy a block of data from user space.
15510 + * @to: Destination address, in kernel space.
15511 + * @from: Source address, in user space.
15512 + * @n: Number of bytes to copy.
15513 + *
15514 + * Context: User context only. This function may sleep.
15515 + *
15516 + * Copy data from user space to kernel space.
15517 + *
15518 + * Returns number of bytes that could not be copied.
15519 + * On success, this will be zero.
15520 + *
15521 + * If some data could not be copied, this function will pad the copied
15522 + * data to the requested size using zero bytes.
15523 + */
15524 +static inline unsigned long __must_check
15525 +copy_from_user(void *to, const void __user *from, unsigned long n)
15526 +{
15527 + size_t sz = __compiletime_object_size(to);
15528 +
15529 + check_object_size(to, n, false);
15530 +
15531 + if (unlikely(sz != (size_t)-1 && sz < n))
15532 copy_from_user_overflow();
15533 -
15534 + else if (access_ok(VERIFY_READ, from, n))
15535 + n = __copy_from_user(to, from, n);
15536 + else if ((long)n > 0)
15537 + memset(to, 0, n);
15538 return n;
15539 }
15540
15541 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
15542 index 142810c..747941a 100644
15543 --- a/arch/x86/include/asm/uaccess_64.h
15544 +++ b/arch/x86/include/asm/uaccess_64.h
15545 @@ -10,6 +10,9 @@
15546 #include <asm/alternative.h>
15547 #include <asm/cpufeature.h>
15548 #include <asm/page.h>
15549 +#include <asm/pgtable.h>
15550 +
15551 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
15552
15553 /*
15554 * Copy To/From Userspace
15555 @@ -17,13 +20,13 @@
15556
15557 /* Handles exceptions in both to and from, but doesn't do access_ok */
15558 __must_check unsigned long
15559 -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
15560 +copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
15561 __must_check unsigned long
15562 -copy_user_generic_string(void *to, const void *from, unsigned len);
15563 +copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
15564 __must_check unsigned long
15565 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
15566 +copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
15567
15568 -static __always_inline __must_check unsigned long
15569 +static __always_inline __must_check __size_overflow(3) unsigned long
15570 copy_user_generic(void *to, const void *from, unsigned len)
15571 {
15572 unsigned ret;
15573 @@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
15574 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
15575 "=d" (len)),
15576 "1" (to), "2" (from), "3" (len)
15577 - : "memory", "rcx", "r8", "r9", "r10", "r11");
15578 + : "memory", "rcx", "r8", "r9", "r11");
15579 return ret;
15580 }
15581
15582 +static __always_inline __must_check unsigned long
15583 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
15584 +static __always_inline __must_check unsigned long
15585 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
15586 __must_check unsigned long
15587 -_copy_to_user(void __user *to, const void *from, unsigned len);
15588 -__must_check unsigned long
15589 -_copy_from_user(void *to, const void __user *from, unsigned len);
15590 -__must_check unsigned long
15591 -copy_in_user(void __user *to, const void __user *from, unsigned len);
15592 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
15593 +
15594 +extern void copy_to_user_overflow(void)
15595 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15596 + __compiletime_error("copy_to_user() buffer size is not provably correct")
15597 +#else
15598 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
15599 +#endif
15600 +;
15601 +
15602 +extern void copy_from_user_overflow(void)
15603 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15604 + __compiletime_error("copy_from_user() buffer size is not provably correct")
15605 +#else
15606 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
15607 +#endif
15608 +;
15609
15610 static inline unsigned long __must_check copy_from_user(void *to,
15611 const void __user *from,
15612 unsigned long n)
15613 {
15614 - int sz = __compiletime_object_size(to);
15615 -
15616 might_fault();
15617 - if (likely(sz == -1 || sz >= n))
15618 - n = _copy_from_user(to, from, n);
15619 -#ifdef CONFIG_DEBUG_VM
15620 - else
15621 - WARN(1, "Buffer overflow detected!\n");
15622 -#endif
15623 +
15624 + check_object_size(to, n, false);
15625 +
15626 + if (access_ok(VERIFY_READ, from, n))
15627 + n = __copy_from_user(to, from, n);
15628 + else if (n < INT_MAX)
15629 + memset(to, 0, n);
15630 return n;
15631 }
15632
15633 static __always_inline __must_check
15634 -int copy_to_user(void __user *dst, const void *src, unsigned size)
15635 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
15636 {
15637 might_fault();
15638
15639 - return _copy_to_user(dst, src, size);
15640 + if (access_ok(VERIFY_WRITE, dst, size))
15641 + size = __copy_to_user(dst, src, size);
15642 + return size;
15643 }
15644
15645 static __always_inline __must_check
15646 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
15647 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
15648 {
15649 - int ret = 0;
15650 + size_t sz = __compiletime_object_size(dst);
15651 + unsigned ret = 0;
15652
15653 might_fault();
15654 +
15655 + if (size > INT_MAX)
15656 + return size;
15657 +
15658 + check_object_size(dst, size, false);
15659 +
15660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15661 + if (!__access_ok(VERIFY_READ, src, size))
15662 + return size;
15663 +#endif
15664 +
15665 + if (unlikely(sz != (size_t)-1 && sz < size)) {
15666 + copy_from_user_overflow();
15667 + return size;
15668 + }
15669 +
15670 if (!__builtin_constant_p(size))
15671 - return copy_user_generic(dst, (__force void *)src, size);
15672 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15673 switch (size) {
15674 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
15675 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
15676 ret, "b", "b", "=q", 1);
15677 return ret;
15678 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
15679 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
15680 ret, "w", "w", "=r", 2);
15681 return ret;
15682 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
15683 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
15684 ret, "l", "k", "=r", 4);
15685 return ret;
15686 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
15687 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15688 ret, "q", "", "=r", 8);
15689 return ret;
15690 case 10:
15691 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
15692 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15693 ret, "q", "", "=r", 10);
15694 if (unlikely(ret))
15695 return ret;
15696 __get_user_asm(*(u16 *)(8 + (char *)dst),
15697 - (u16 __user *)(8 + (char __user *)src),
15698 + (const u16 __user *)(8 + (const char __user *)src),
15699 ret, "w", "w", "=r", 2);
15700 return ret;
15701 case 16:
15702 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
15703 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15704 ret, "q", "", "=r", 16);
15705 if (unlikely(ret))
15706 return ret;
15707 __get_user_asm(*(u64 *)(8 + (char *)dst),
15708 - (u64 __user *)(8 + (char __user *)src),
15709 + (const u64 __user *)(8 + (const char __user *)src),
15710 ret, "q", "", "=r", 8);
15711 return ret;
15712 default:
15713 - return copy_user_generic(dst, (__force void *)src, size);
15714 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15715 }
15716 }
15717
15718 static __always_inline __must_check
15719 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
15720 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
15721 {
15722 - int ret = 0;
15723 + size_t sz = __compiletime_object_size(src);
15724 + unsigned ret = 0;
15725
15726 might_fault();
15727 +
15728 + if (size > INT_MAX)
15729 + return size;
15730 +
15731 + check_object_size(src, size, true);
15732 +
15733 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15734 + if (!__access_ok(VERIFY_WRITE, dst, size))
15735 + return size;
15736 +#endif
15737 +
15738 + if (unlikely(sz != (size_t)-1 && sz < size)) {
15739 + copy_to_user_overflow();
15740 + return size;
15741 + }
15742 +
15743 if (!__builtin_constant_p(size))
15744 - return copy_user_generic((__force void *)dst, src, size);
15745 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15746 switch (size) {
15747 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
15748 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
15749 ret, "b", "b", "iq", 1);
15750 return ret;
15751 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
15752 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
15753 ret, "w", "w", "ir", 2);
15754 return ret;
15755 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
15756 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
15757 ret, "l", "k", "ir", 4);
15758 return ret;
15759 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
15760 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15761 ret, "q", "", "er", 8);
15762 return ret;
15763 case 10:
15764 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
15765 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15766 ret, "q", "", "er", 10);
15767 if (unlikely(ret))
15768 return ret;
15769 asm("":::"memory");
15770 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
15771 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
15772 ret, "w", "w", "ir", 2);
15773 return ret;
15774 case 16:
15775 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
15776 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15777 ret, "q", "", "er", 16);
15778 if (unlikely(ret))
15779 return ret;
15780 asm("":::"memory");
15781 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
15782 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
15783 ret, "q", "", "er", 8);
15784 return ret;
15785 default:
15786 - return copy_user_generic((__force void *)dst, src, size);
15787 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15788 }
15789 }
15790
15791 static __always_inline __must_check
15792 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15793 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
15794 {
15795 - int ret = 0;
15796 + unsigned ret = 0;
15797
15798 might_fault();
15799 +
15800 + if (size > INT_MAX)
15801 + return size;
15802 +
15803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15804 + if (!__access_ok(VERIFY_READ, src, size))
15805 + return size;
15806 + if (!__access_ok(VERIFY_WRITE, dst, size))
15807 + return size;
15808 +#endif
15809 +
15810 if (!__builtin_constant_p(size))
15811 - return copy_user_generic((__force void *)dst,
15812 - (__force void *)src, size);
15813 + return copy_user_generic((__force_kernel void *)____m(dst),
15814 + (__force_kernel const void *)____m(src), size);
15815 switch (size) {
15816 case 1: {
15817 u8 tmp;
15818 - __get_user_asm(tmp, (u8 __user *)src,
15819 + __get_user_asm(tmp, (const u8 __user *)src,
15820 ret, "b", "b", "=q", 1);
15821 if (likely(!ret))
15822 __put_user_asm(tmp, (u8 __user *)dst,
15823 @@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15824 }
15825 case 2: {
15826 u16 tmp;
15827 - __get_user_asm(tmp, (u16 __user *)src,
15828 + __get_user_asm(tmp, (const u16 __user *)src,
15829 ret, "w", "w", "=r", 2);
15830 if (likely(!ret))
15831 __put_user_asm(tmp, (u16 __user *)dst,
15832 @@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15833
15834 case 4: {
15835 u32 tmp;
15836 - __get_user_asm(tmp, (u32 __user *)src,
15837 + __get_user_asm(tmp, (const u32 __user *)src,
15838 ret, "l", "k", "=r", 4);
15839 if (likely(!ret))
15840 __put_user_asm(tmp, (u32 __user *)dst,
15841 @@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15842 }
15843 case 8: {
15844 u64 tmp;
15845 - __get_user_asm(tmp, (u64 __user *)src,
15846 + __get_user_asm(tmp, (const u64 __user *)src,
15847 ret, "q", "", "=r", 8);
15848 if (likely(!ret))
15849 __put_user_asm(tmp, (u64 __user *)dst,
15850 @@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15851 return ret;
15852 }
15853 default:
15854 - return copy_user_generic((__force void *)dst,
15855 - (__force void *)src, size);
15856 + return copy_user_generic((__force_kernel void *)____m(dst),
15857 + (__force_kernel const void *)____m(src), size);
15858 }
15859 }
15860
15861 static __must_check __always_inline int
15862 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
15863 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
15864 {
15865 - return copy_user_generic(dst, (__force const void *)src, size);
15866 + if (size > INT_MAX)
15867 + return size;
15868 +
15869 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15870 + if (!__access_ok(VERIFY_READ, src, size))
15871 + return size;
15872 +#endif
15873 +
15874 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15875 }
15876
15877 -static __must_check __always_inline int
15878 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
15879 +static __must_check __always_inline unsigned long
15880 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
15881 {
15882 - return copy_user_generic((__force void *)dst, src, size);
15883 + if (size > INT_MAX)
15884 + return size;
15885 +
15886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15887 + if (!__access_ok(VERIFY_WRITE, dst, size))
15888 + return size;
15889 +#endif
15890 +
15891 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15892 }
15893
15894 -extern long __copy_user_nocache(void *dst, const void __user *src,
15895 - unsigned size, int zerorest);
15896 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
15897 + unsigned long size, int zerorest) __size_overflow(3);
15898
15899 -static inline int
15900 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
15901 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
15902 {
15903 might_sleep();
15904 +
15905 + if (size > INT_MAX)
15906 + return size;
15907 +
15908 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15909 + if (!__access_ok(VERIFY_READ, src, size))
15910 + return size;
15911 +#endif
15912 +
15913 return __copy_user_nocache(dst, src, size, 1);
15914 }
15915
15916 -static inline int
15917 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
15918 - unsigned size)
15919 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
15920 + unsigned long size)
15921 {
15922 + if (size > INT_MAX)
15923 + return size;
15924 +
15925 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15926 + if (!__access_ok(VERIFY_READ, src, size))
15927 + return size;
15928 +#endif
15929 +
15930 return __copy_user_nocache(dst, src, size, 0);
15931 }
15932
15933 -unsigned long
15934 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
15935 +extern unsigned long
15936 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
15937
15938 #endif /* _ASM_X86_UACCESS_64_H */
15939 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
15940 index 5b238981..77fdd78 100644
15941 --- a/arch/x86/include/asm/word-at-a-time.h
15942 +++ b/arch/x86/include/asm/word-at-a-time.h
15943 @@ -11,7 +11,7 @@
15944 * and shift, for example.
15945 */
15946 struct word_at_a_time {
15947 - const unsigned long one_bits, high_bits;
15948 + unsigned long one_bits, high_bits;
15949 };
15950
15951 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
15952 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
15953 index 5769349..a3d3e2a 100644
15954 --- a/arch/x86/include/asm/x86_init.h
15955 +++ b/arch/x86/include/asm/x86_init.h
15956 @@ -141,7 +141,7 @@ struct x86_init_ops {
15957 struct x86_init_timers timers;
15958 struct x86_init_iommu iommu;
15959 struct x86_init_pci pci;
15960 -};
15961 +} __no_const;
15962
15963 /**
15964 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
15965 @@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
15966 void (*setup_percpu_clockev)(void);
15967 void (*early_percpu_clock_init)(void);
15968 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
15969 -};
15970 +} __no_const;
15971
15972 /**
15973 * struct x86_platform_ops - platform specific runtime functions
15974 @@ -178,7 +178,7 @@ struct x86_platform_ops {
15975 void (*save_sched_clock_state)(void);
15976 void (*restore_sched_clock_state)(void);
15977 void (*apic_post_init)(void);
15978 -};
15979 +} __no_const;
15980
15981 struct pci_dev;
15982
15983 @@ -187,14 +187,14 @@ struct x86_msi_ops {
15984 void (*teardown_msi_irq)(unsigned int irq);
15985 void (*teardown_msi_irqs)(struct pci_dev *dev);
15986 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
15987 -};
15988 +} __no_const;
15989
15990 struct x86_io_apic_ops {
15991 void (*init) (void);
15992 unsigned int (*read) (unsigned int apic, unsigned int reg);
15993 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
15994 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
15995 -};
15996 +} __no_const;
15997
15998 extern struct x86_init_ops x86_init;
15999 extern struct x86_cpuinit_ops x86_cpuinit;
16000 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16001 index 0415cda..b43d877 100644
16002 --- a/arch/x86/include/asm/xsave.h
16003 +++ b/arch/x86/include/asm/xsave.h
16004 @@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16005 return -EFAULT;
16006
16007 __asm__ __volatile__(ASM_STAC "\n"
16008 - "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16009 + "1:"
16010 + __copyuser_seg
16011 + ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16012 "2: " ASM_CLAC "\n"
16013 ".section .fixup,\"ax\"\n"
16014 "3: movl $-1,%[err]\n"
16015 @@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16016 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16017 {
16018 int err;
16019 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16020 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16021 u32 lmask = mask;
16022 u32 hmask = mask >> 32;
16023
16024 __asm__ __volatile__(ASM_STAC "\n"
16025 - "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16026 + "1:"
16027 + __copyuser_seg
16028 + ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16029 "2: " ASM_CLAC "\n"
16030 ".section .fixup,\"ax\"\n"
16031 "3: movl $-1,%[err]\n"
16032 diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16033 index bbae024..e1528f9 100644
16034 --- a/arch/x86/include/uapi/asm/e820.h
16035 +++ b/arch/x86/include/uapi/asm/e820.h
16036 @@ -63,7 +63,7 @@ struct e820map {
16037 #define ISA_START_ADDRESS 0xa0000
16038 #define ISA_END_ADDRESS 0x100000
16039
16040 -#define BIOS_BEGIN 0x000a0000
16041 +#define BIOS_BEGIN 0x000c0000
16042 #define BIOS_END 0x00100000
16043
16044 #define BIOS_ROM_BASE 0xffe00000
16045 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16046 index 34e923a..0c6bb6e 100644
16047 --- a/arch/x86/kernel/Makefile
16048 +++ b/arch/x86/kernel/Makefile
16049 @@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16050 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16051 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16052 obj-y += probe_roms.o
16053 -obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16054 +obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16055 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16056 obj-y += syscall_$(BITS).o
16057 obj-$(CONFIG_X86_64) += vsyscall_64.o
16058 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16059 index d5e0d71..6533e08 100644
16060 --- a/arch/x86/kernel/acpi/sleep.c
16061 +++ b/arch/x86/kernel/acpi/sleep.c
16062 @@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16063 #else /* CONFIG_64BIT */
16064 #ifdef CONFIG_SMP
16065 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16066 +
16067 + pax_open_kernel();
16068 early_gdt_descr.address =
16069 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16070 + pax_close_kernel();
16071 +
16072 initial_gs = per_cpu_offset(smp_processor_id());
16073 #endif
16074 initial_code = (unsigned long)wakeup_long64;
16075 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16076 index 13ab720..95d5442 100644
16077 --- a/arch/x86/kernel/acpi/wakeup_32.S
16078 +++ b/arch/x86/kernel/acpi/wakeup_32.S
16079 @@ -30,13 +30,11 @@ wakeup_pmode_return:
16080 # and restore the stack ... but you need gdt for this to work
16081 movl saved_context_esp, %esp
16082
16083 - movl %cs:saved_magic, %eax
16084 - cmpl $0x12345678, %eax
16085 + cmpl $0x12345678, saved_magic
16086 jne bogus_magic
16087
16088 # jump to place where we left off
16089 - movl saved_eip, %eax
16090 - jmp *%eax
16091 + jmp *(saved_eip)
16092
16093 bogus_magic:
16094 jmp bogus_magic
16095 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16096 index ef5ccca..bd83949 100644
16097 --- a/arch/x86/kernel/alternative.c
16098 +++ b/arch/x86/kernel/alternative.c
16099 @@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16100 */
16101 for (a = start; a < end; a++) {
16102 instr = (u8 *)&a->instr_offset + a->instr_offset;
16103 +
16104 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16105 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16106 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16107 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16108 +#endif
16109 +
16110 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16111 BUG_ON(a->replacementlen > a->instrlen);
16112 BUG_ON(a->instrlen > sizeof(insnbuf));
16113 @@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16114 for (poff = start; poff < end; poff++) {
16115 u8 *ptr = (u8 *)poff + *poff;
16116
16117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16118 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16119 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16120 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16121 +#endif
16122 +
16123 if (!*poff || ptr < text || ptr >= text_end)
16124 continue;
16125 /* turn DS segment override prefix into lock prefix */
16126 - if (*ptr == 0x3e)
16127 + if (*ktla_ktva(ptr) == 0x3e)
16128 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16129 }
16130 mutex_unlock(&text_mutex);
16131 @@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16132 for (poff = start; poff < end; poff++) {
16133 u8 *ptr = (u8 *)poff + *poff;
16134
16135 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16136 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16137 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16138 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16139 +#endif
16140 +
16141 if (!*poff || ptr < text || ptr >= text_end)
16142 continue;
16143 /* turn lock prefix into DS segment override prefix */
16144 - if (*ptr == 0xf0)
16145 + if (*ktla_ktva(ptr) == 0xf0)
16146 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16147 }
16148 mutex_unlock(&text_mutex);
16149 @@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16150
16151 BUG_ON(p->len > MAX_PATCH_LEN);
16152 /* prep the buffer with the original instructions */
16153 - memcpy(insnbuf, p->instr, p->len);
16154 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16155 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16156 (unsigned long)p->instr, p->len);
16157
16158 @@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16159 if (!uniproc_patched || num_possible_cpus() == 1)
16160 free_init_pages("SMP alternatives",
16161 (unsigned long)__smp_locks,
16162 - (unsigned long)__smp_locks_end);
16163 + PAGE_ALIGN((unsigned long)__smp_locks_end));
16164 #endif
16165
16166 apply_paravirt(__parainstructions, __parainstructions_end);
16167 @@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16168 * instructions. And on the local CPU you need to be protected again NMI or MCE
16169 * handlers seeing an inconsistent instruction while you patch.
16170 */
16171 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
16172 +void *__kprobes text_poke_early(void *addr, const void *opcode,
16173 size_t len)
16174 {
16175 unsigned long flags;
16176 local_irq_save(flags);
16177 - memcpy(addr, opcode, len);
16178 +
16179 + pax_open_kernel();
16180 + memcpy(ktla_ktva(addr), opcode, len);
16181 sync_core();
16182 + pax_close_kernel();
16183 +
16184 local_irq_restore(flags);
16185 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16186 that causes hangs on some VIA CPUs. */
16187 @@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16188 */
16189 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16190 {
16191 - unsigned long flags;
16192 - char *vaddr;
16193 + unsigned char *vaddr = ktla_ktva(addr);
16194 struct page *pages[2];
16195 - int i;
16196 + size_t i;
16197
16198 if (!core_kernel_text((unsigned long)addr)) {
16199 - pages[0] = vmalloc_to_page(addr);
16200 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16201 + pages[0] = vmalloc_to_page(vaddr);
16202 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16203 } else {
16204 - pages[0] = virt_to_page(addr);
16205 + pages[0] = virt_to_page(vaddr);
16206 WARN_ON(!PageReserved(pages[0]));
16207 - pages[1] = virt_to_page(addr + PAGE_SIZE);
16208 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16209 }
16210 BUG_ON(!pages[0]);
16211 - local_irq_save(flags);
16212 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16213 - if (pages[1])
16214 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16215 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16216 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16217 - clear_fixmap(FIX_TEXT_POKE0);
16218 - if (pages[1])
16219 - clear_fixmap(FIX_TEXT_POKE1);
16220 - local_flush_tlb();
16221 - sync_core();
16222 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
16223 - that causes hangs on some VIA CPUs. */
16224 + text_poke_early(addr, opcode, len);
16225 for (i = 0; i < len; i++)
16226 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16227 - local_irq_restore(flags);
16228 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16229 return addr;
16230 }
16231
16232 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16233 index cbf5121..812b537 100644
16234 --- a/arch/x86/kernel/apic/apic.c
16235 +++ b/arch/x86/kernel/apic/apic.c
16236 @@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16237 /*
16238 * Debug level, exported for io_apic.c
16239 */
16240 -unsigned int apic_verbosity;
16241 +int apic_verbosity;
16242
16243 int pic_mode;
16244
16245 @@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16246 apic_write(APIC_ESR, 0);
16247 v1 = apic_read(APIC_ESR);
16248 ack_APIC_irq();
16249 - atomic_inc(&irq_err_count);
16250 + atomic_inc_unchecked(&irq_err_count);
16251
16252 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16253 smp_processor_id(), v0 , v1);
16254 diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16255 index 00c77cf..2dc6a2d 100644
16256 --- a/arch/x86/kernel/apic/apic_flat_64.c
16257 +++ b/arch/x86/kernel/apic/apic_flat_64.c
16258 @@ -157,7 +157,7 @@ static int flat_probe(void)
16259 return 1;
16260 }
16261
16262 -static struct apic apic_flat = {
16263 +static struct apic apic_flat __read_only = {
16264 .name = "flat",
16265 .probe = flat_probe,
16266 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16267 @@ -271,7 +271,7 @@ static int physflat_probe(void)
16268 return 0;
16269 }
16270
16271 -static struct apic apic_physflat = {
16272 +static struct apic apic_physflat __read_only = {
16273
16274 .name = "physical flat",
16275 .probe = physflat_probe,
16276 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16277 index d50e364..543bee3 100644
16278 --- a/arch/x86/kernel/apic/bigsmp_32.c
16279 +++ b/arch/x86/kernel/apic/bigsmp_32.c
16280 @@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16281 return dmi_bigsmp;
16282 }
16283
16284 -static struct apic apic_bigsmp = {
16285 +static struct apic apic_bigsmp __read_only = {
16286
16287 .name = "bigsmp",
16288 .probe = probe_bigsmp,
16289 diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16290 index 0874799..24a836e 100644
16291 --- a/arch/x86/kernel/apic/es7000_32.c
16292 +++ b/arch/x86/kernel/apic/es7000_32.c
16293 @@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16294 return ret && es7000_apic_is_cluster();
16295 }
16296
16297 -/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16298 -static struct apic __refdata apic_es7000_cluster = {
16299 +static struct apic apic_es7000_cluster __read_only = {
16300
16301 .name = "es7000",
16302 .probe = probe_es7000,
16303 @@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16304 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16305 };
16306
16307 -static struct apic __refdata apic_es7000 = {
16308 +static struct apic __refdata apic_es7000 __read_only = {
16309
16310 .name = "es7000",
16311 .probe = probe_es7000,
16312 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16313 index b739d39..6e4f1db 100644
16314 --- a/arch/x86/kernel/apic/io_apic.c
16315 +++ b/arch/x86/kernel/apic/io_apic.c
16316 @@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16317 }
16318 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16319
16320 -void lock_vector_lock(void)
16321 +void lock_vector_lock(void) __acquires(vector_lock)
16322 {
16323 /* Used to the online set of cpus does not change
16324 * during assign_irq_vector.
16325 @@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
16326 raw_spin_lock(&vector_lock);
16327 }
16328
16329 -void unlock_vector_lock(void)
16330 +void unlock_vector_lock(void) __releases(vector_lock)
16331 {
16332 raw_spin_unlock(&vector_lock);
16333 }
16334 @@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
16335 ack_APIC_irq();
16336 }
16337
16338 -atomic_t irq_mis_count;
16339 +atomic_unchecked_t irq_mis_count;
16340
16341 #ifdef CONFIG_GENERIC_PENDING_IRQ
16342 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16343 @@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
16344 * at the cpu.
16345 */
16346 if (!(v & (1 << (i & 0x1f)))) {
16347 - atomic_inc(&irq_mis_count);
16348 + atomic_inc_unchecked(&irq_mis_count);
16349
16350 eoi_ioapic_irq(irq, cfg);
16351 }
16352 diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16353 index d661ee9..791fd33 100644
16354 --- a/arch/x86/kernel/apic/numaq_32.c
16355 +++ b/arch/x86/kernel/apic/numaq_32.c
16356 @@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16357 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16358 }
16359
16360 -/* Use __refdata to keep false positive warning calm. */
16361 -static struct apic __refdata apic_numaq = {
16362 +static struct apic apic_numaq __read_only = {
16363
16364 .name = "NUMAQ",
16365 .probe = probe_numaq,
16366 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16367 index eb35ef9..f184a21 100644
16368 --- a/arch/x86/kernel/apic/probe_32.c
16369 +++ b/arch/x86/kernel/apic/probe_32.c
16370 @@ -72,7 +72,7 @@ static int probe_default(void)
16371 return 1;
16372 }
16373
16374 -static struct apic apic_default = {
16375 +static struct apic apic_default __read_only = {
16376
16377 .name = "default",
16378 .probe = probe_default,
16379 diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16380 index 77c95c0..434f8a4 100644
16381 --- a/arch/x86/kernel/apic/summit_32.c
16382 +++ b/arch/x86/kernel/apic/summit_32.c
16383 @@ -486,7 +486,7 @@ void setup_summit(void)
16384 }
16385 #endif
16386
16387 -static struct apic apic_summit = {
16388 +static struct apic apic_summit __read_only = {
16389
16390 .name = "summit",
16391 .probe = probe_summit,
16392 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16393 index c88baa4..757aee1 100644
16394 --- a/arch/x86/kernel/apic/x2apic_cluster.c
16395 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
16396 @@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16397 return notifier_from_errno(err);
16398 }
16399
16400 -static struct notifier_block __refdata x2apic_cpu_notifier = {
16401 +static struct notifier_block x2apic_cpu_notifier = {
16402 .notifier_call = update_clusterinfo,
16403 };
16404
16405 @@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
16406 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
16407 }
16408
16409 -static struct apic apic_x2apic_cluster = {
16410 +static struct apic apic_x2apic_cluster __read_only = {
16411
16412 .name = "cluster x2apic",
16413 .probe = x2apic_cluster_probe,
16414 diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
16415 index 562a76d..a003c0f 100644
16416 --- a/arch/x86/kernel/apic/x2apic_phys.c
16417 +++ b/arch/x86/kernel/apic/x2apic_phys.c
16418 @@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
16419 return apic == &apic_x2apic_phys;
16420 }
16421
16422 -static struct apic apic_x2apic_phys = {
16423 +static struct apic apic_x2apic_phys __read_only = {
16424
16425 .name = "physical x2apic",
16426 .probe = x2apic_phys_probe,
16427 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
16428 index 8cfade9..b9d04fc 100644
16429 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
16430 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
16431 @@ -333,7 +333,7 @@ static int uv_probe(void)
16432 return apic == &apic_x2apic_uv_x;
16433 }
16434
16435 -static struct apic __refdata apic_x2apic_uv_x = {
16436 +static struct apic apic_x2apic_uv_x __read_only = {
16437
16438 .name = "UV large system",
16439 .probe = uv_probe,
16440 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
16441 index d65464e..1035d31 100644
16442 --- a/arch/x86/kernel/apm_32.c
16443 +++ b/arch/x86/kernel/apm_32.c
16444 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
16445 * This is for buggy BIOS's that refer to (real mode) segment 0x40
16446 * even though they are called in protected mode.
16447 */
16448 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
16449 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
16450 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
16451
16452 static const char driver_version[] = "1.16ac"; /* no spaces */
16453 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
16454 BUG_ON(cpu != 0);
16455 gdt = get_cpu_gdt_table(cpu);
16456 save_desc_40 = gdt[0x40 / 8];
16457 +
16458 + pax_open_kernel();
16459 gdt[0x40 / 8] = bad_bios_desc;
16460 + pax_close_kernel();
16461
16462 apm_irq_save(flags);
16463 APM_DO_SAVE_SEGS;
16464 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
16465 &call->esi);
16466 APM_DO_RESTORE_SEGS;
16467 apm_irq_restore(flags);
16468 +
16469 + pax_open_kernel();
16470 gdt[0x40 / 8] = save_desc_40;
16471 + pax_close_kernel();
16472 +
16473 put_cpu();
16474
16475 return call->eax & 0xff;
16476 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
16477 BUG_ON(cpu != 0);
16478 gdt = get_cpu_gdt_table(cpu);
16479 save_desc_40 = gdt[0x40 / 8];
16480 +
16481 + pax_open_kernel();
16482 gdt[0x40 / 8] = bad_bios_desc;
16483 + pax_close_kernel();
16484
16485 apm_irq_save(flags);
16486 APM_DO_SAVE_SEGS;
16487 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
16488 &call->eax);
16489 APM_DO_RESTORE_SEGS;
16490 apm_irq_restore(flags);
16491 +
16492 + pax_open_kernel();
16493 gdt[0x40 / 8] = save_desc_40;
16494 + pax_close_kernel();
16495 +
16496 put_cpu();
16497 return error;
16498 }
16499 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
16500 * code to that CPU.
16501 */
16502 gdt = get_cpu_gdt_table(0);
16503 +
16504 + pax_open_kernel();
16505 set_desc_base(&gdt[APM_CS >> 3],
16506 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
16507 set_desc_base(&gdt[APM_CS_16 >> 3],
16508 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
16509 set_desc_base(&gdt[APM_DS >> 3],
16510 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
16511 + pax_close_kernel();
16512
16513 proc_create("apm", 0, NULL, &apm_file_ops);
16514
16515 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
16516 index 2861082..6d4718e 100644
16517 --- a/arch/x86/kernel/asm-offsets.c
16518 +++ b/arch/x86/kernel/asm-offsets.c
16519 @@ -33,6 +33,8 @@ void common(void) {
16520 OFFSET(TI_status, thread_info, status);
16521 OFFSET(TI_addr_limit, thread_info, addr_limit);
16522 OFFSET(TI_preempt_count, thread_info, preempt_count);
16523 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
16524 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
16525
16526 BLANK();
16527 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
16528 @@ -53,8 +55,26 @@ void common(void) {
16529 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
16530 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
16531 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
16532 +
16533 +#ifdef CONFIG_PAX_KERNEXEC
16534 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
16535 #endif
16536
16537 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16538 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
16539 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
16540 +#ifdef CONFIG_X86_64
16541 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
16542 +#endif
16543 +#endif
16544 +
16545 +#endif
16546 +
16547 + BLANK();
16548 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
16549 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
16550 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
16551 +
16552 #ifdef CONFIG_XEN
16553 BLANK();
16554 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
16555 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
16556 index 1b4754f..fbb4227 100644
16557 --- a/arch/x86/kernel/asm-offsets_64.c
16558 +++ b/arch/x86/kernel/asm-offsets_64.c
16559 @@ -76,6 +76,7 @@ int main(void)
16560 BLANK();
16561 #undef ENTRY
16562
16563 + DEFINE(TSS_size, sizeof(struct tss_struct));
16564 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
16565 BLANK();
16566
16567 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
16568 index a0e067d..9c7db16 100644
16569 --- a/arch/x86/kernel/cpu/Makefile
16570 +++ b/arch/x86/kernel/cpu/Makefile
16571 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
16572 CFLAGS_REMOVE_perf_event.o = -pg
16573 endif
16574
16575 -# Make sure load_percpu_segment has no stackprotector
16576 -nostackp := $(call cc-option, -fno-stack-protector)
16577 -CFLAGS_common.o := $(nostackp)
16578 -
16579 obj-y := intel_cacheinfo.o scattered.o topology.o
16580 obj-y += proc.o capflags.o powerflags.o common.o
16581 obj-y += vmware.o hypervisor.o mshyperv.o
16582 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
16583 index 15239ff..e23e04e 100644
16584 --- a/arch/x86/kernel/cpu/amd.c
16585 +++ b/arch/x86/kernel/cpu/amd.c
16586 @@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
16587 unsigned int size)
16588 {
16589 /* AMD errata T13 (order #21922) */
16590 - if ((c->x86 == 6)) {
16591 + if (c->x86 == 6) {
16592 /* Duron Rev A0 */
16593 if (c->x86_model == 3 && c->x86_mask == 0)
16594 size = 64;
16595 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
16596 index 9c3ab43..51e6366 100644
16597 --- a/arch/x86/kernel/cpu/common.c
16598 +++ b/arch/x86/kernel/cpu/common.c
16599 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
16600
16601 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
16602
16603 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
16604 -#ifdef CONFIG_X86_64
16605 - /*
16606 - * We need valid kernel segments for data and code in long mode too
16607 - * IRET will check the segment types kkeil 2000/10/28
16608 - * Also sysret mandates a special GDT layout
16609 - *
16610 - * TLS descriptors are currently at a different place compared to i386.
16611 - * Hopefully nobody expects them at a fixed place (Wine?)
16612 - */
16613 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
16614 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
16615 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
16616 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
16617 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
16618 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
16619 -#else
16620 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
16621 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16622 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
16623 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
16624 - /*
16625 - * Segments used for calling PnP BIOS have byte granularity.
16626 - * They code segments and data segments have fixed 64k limits,
16627 - * the transfer segment sizes are set at run time.
16628 - */
16629 - /* 32-bit code */
16630 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
16631 - /* 16-bit code */
16632 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
16633 - /* 16-bit data */
16634 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
16635 - /* 16-bit data */
16636 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
16637 - /* 16-bit data */
16638 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
16639 - /*
16640 - * The APM segments have byte granularity and their bases
16641 - * are set at run time. All have 64k limits.
16642 - */
16643 - /* 32-bit code */
16644 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
16645 - /* 16-bit code */
16646 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
16647 - /* data */
16648 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
16649 -
16650 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16651 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16652 - GDT_STACK_CANARY_INIT
16653 -#endif
16654 -} };
16655 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
16656 -
16657 static int __init x86_xsave_setup(char *s)
16658 {
16659 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
16660 @@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
16661 {
16662 struct desc_ptr gdt_descr;
16663
16664 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
16665 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16666 gdt_descr.size = GDT_SIZE - 1;
16667 load_gdt(&gdt_descr);
16668 /* Reload the per-cpu base */
16669 @@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
16670 /* Filter out anything that depends on CPUID levels we don't have */
16671 filter_cpuid_features(c, true);
16672
16673 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16674 + setup_clear_cpu_cap(X86_FEATURE_SEP);
16675 +#endif
16676 +
16677 /* If the model name is still unset, do table lookup. */
16678 if (!c->x86_model_id[0]) {
16679 const char *p;
16680 @@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
16681 }
16682 __setup("clearcpuid=", setup_disablecpuid);
16683
16684 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
16685 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
16686 +
16687 #ifdef CONFIG_X86_64
16688 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
16689 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
16690 - (unsigned long) nmi_idt_table };
16691 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
16692
16693 DEFINE_PER_CPU_FIRST(union irq_stack_union,
16694 irq_stack_union) __aligned(PAGE_SIZE);
16695 @@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
16696 EXPORT_PER_CPU_SYMBOL(current_task);
16697
16698 DEFINE_PER_CPU(unsigned long, kernel_stack) =
16699 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
16700 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
16701 EXPORT_PER_CPU_SYMBOL(kernel_stack);
16702
16703 DEFINE_PER_CPU(char *, irq_stack_ptr) =
16704 @@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
16705 int i;
16706
16707 cpu = stack_smp_processor_id();
16708 - t = &per_cpu(init_tss, cpu);
16709 + t = init_tss + cpu;
16710 oist = &per_cpu(orig_ist, cpu);
16711
16712 #ifdef CONFIG_NUMA
16713 @@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
16714 switch_to_new_gdt(cpu);
16715 loadsegment(fs, 0);
16716
16717 - load_idt((const struct desc_ptr *)&idt_descr);
16718 + load_idt(&idt_descr);
16719
16720 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
16721 syscall_init();
16722 @@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
16723 wrmsrl(MSR_KERNEL_GS_BASE, 0);
16724 barrier();
16725
16726 - x86_configure_nx();
16727 enable_x2apic();
16728
16729 /*
16730 @@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
16731 {
16732 int cpu = smp_processor_id();
16733 struct task_struct *curr = current;
16734 - struct tss_struct *t = &per_cpu(init_tss, cpu);
16735 + struct tss_struct *t = init_tss + cpu;
16736 struct thread_struct *thread = &curr->thread;
16737
16738 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
16739 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
16740 index fcaabd0..7b55a26 100644
16741 --- a/arch/x86/kernel/cpu/intel.c
16742 +++ b/arch/x86/kernel/cpu/intel.c
16743 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
16744 * Update the IDT descriptor and reload the IDT so that
16745 * it uses the read-only mapped virtual address.
16746 */
16747 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
16748 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
16749 load_idt(&idt_descr);
16750 }
16751 #endif
16752 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
16753 index 84c1309..39b7224 100644
16754 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
16755 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
16756 @@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
16757 };
16758
16759 #ifdef CONFIG_AMD_NB
16760 +static struct attribute *default_attrs_amd_nb[] = {
16761 + &type.attr,
16762 + &level.attr,
16763 + &coherency_line_size.attr,
16764 + &physical_line_partition.attr,
16765 + &ways_of_associativity.attr,
16766 + &number_of_sets.attr,
16767 + &size.attr,
16768 + &shared_cpu_map.attr,
16769 + &shared_cpu_list.attr,
16770 + NULL,
16771 + NULL,
16772 + NULL,
16773 + NULL
16774 +};
16775 +
16776 static struct attribute ** __cpuinit amd_l3_attrs(void)
16777 {
16778 static struct attribute **attrs;
16779 @@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
16780
16781 n = ARRAY_SIZE(default_attrs);
16782
16783 - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
16784 - n += 2;
16785 -
16786 - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
16787 - n += 1;
16788 -
16789 - attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
16790 - if (attrs == NULL)
16791 - return attrs = default_attrs;
16792 -
16793 - for (n = 0; default_attrs[n]; n++)
16794 - attrs[n] = default_attrs[n];
16795 + attrs = default_attrs_amd_nb;
16796
16797 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
16798 attrs[n++] = &cache_disable_0.attr;
16799 @@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
16800 .default_attrs = default_attrs,
16801 };
16802
16803 +#ifdef CONFIG_AMD_NB
16804 +static struct kobj_type ktype_cache_amd_nb = {
16805 + .sysfs_ops = &sysfs_ops,
16806 + .default_attrs = default_attrs_amd_nb,
16807 +};
16808 +#endif
16809 +
16810 static struct kobj_type ktype_percpu_entry = {
16811 .sysfs_ops = &sysfs_ops,
16812 };
16813 @@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
16814 return retval;
16815 }
16816
16817 +#ifdef CONFIG_AMD_NB
16818 + amd_l3_attrs();
16819 +#endif
16820 +
16821 for (i = 0; i < num_cache_leaves; i++) {
16822 + struct kobj_type *ktype;
16823 +
16824 this_object = INDEX_KOBJECT_PTR(cpu, i);
16825 this_object->cpu = cpu;
16826 this_object->index = i;
16827
16828 this_leaf = CPUID4_INFO_IDX(cpu, i);
16829
16830 - ktype_cache.default_attrs = default_attrs;
16831 + ktype = &ktype_cache;
16832 #ifdef CONFIG_AMD_NB
16833 if (this_leaf->base.nb)
16834 - ktype_cache.default_attrs = amd_l3_attrs();
16835 + ktype = &ktype_cache_amd_nb;
16836 #endif
16837 retval = kobject_init_and_add(&(this_object->kobj),
16838 - &ktype_cache,
16839 + ktype,
16840 per_cpu(ici_cache_kobject, cpu),
16841 "index%1lu", i);
16842 if (unlikely(retval)) {
16843 @@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
16844 return NOTIFY_OK;
16845 }
16846
16847 -static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
16848 +static struct notifier_block cacheinfo_cpu_notifier = {
16849 .notifier_call = cacheinfo_cpu_callback,
16850 };
16851
16852 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
16853 index 80dbda8..b45ebad 100644
16854 --- a/arch/x86/kernel/cpu/mcheck/mce.c
16855 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
16856 @@ -45,6 +45,7 @@
16857 #include <asm/processor.h>
16858 #include <asm/mce.h>
16859 #include <asm/msr.h>
16860 +#include <asm/local.h>
16861
16862 #include "mce-internal.h"
16863
16864 @@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
16865 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
16866 m->cs, m->ip);
16867
16868 - if (m->cs == __KERNEL_CS)
16869 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
16870 print_symbol("{%s}", m->ip);
16871 pr_cont("\n");
16872 }
16873 @@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
16874
16875 #define PANIC_TIMEOUT 5 /* 5 seconds */
16876
16877 -static atomic_t mce_paniced;
16878 +static atomic_unchecked_t mce_paniced;
16879
16880 static int fake_panic;
16881 -static atomic_t mce_fake_paniced;
16882 +static atomic_unchecked_t mce_fake_paniced;
16883
16884 /* Panic in progress. Enable interrupts and wait for final IPI */
16885 static void wait_for_panic(void)
16886 @@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
16887 /*
16888 * Make sure only one CPU runs in machine check panic
16889 */
16890 - if (atomic_inc_return(&mce_paniced) > 1)
16891 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
16892 wait_for_panic();
16893 barrier();
16894
16895 @@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
16896 console_verbose();
16897 } else {
16898 /* Don't log too much for fake panic */
16899 - if (atomic_inc_return(&mce_fake_paniced) > 1)
16900 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
16901 return;
16902 }
16903 /* First print corrected ones that are still unlogged */
16904 @@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
16905 * might have been modified by someone else.
16906 */
16907 rmb();
16908 - if (atomic_read(&mce_paniced))
16909 + if (atomic_read_unchecked(&mce_paniced))
16910 wait_for_panic();
16911 if (!mca_cfg.monarch_timeout)
16912 goto out;
16913 @@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
16914 }
16915
16916 /* Call the installed machine check handler for this CPU setup. */
16917 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
16918 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
16919 unexpected_machine_check;
16920
16921 /*
16922 @@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
16923 return;
16924 }
16925
16926 + pax_open_kernel();
16927 machine_check_vector = do_machine_check;
16928 + pax_close_kernel();
16929
16930 __mcheck_cpu_init_generic();
16931 __mcheck_cpu_init_vendor(c);
16932 @@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
16933 */
16934
16935 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
16936 -static int mce_chrdev_open_count; /* #times opened */
16937 +static local_t mce_chrdev_open_count; /* #times opened */
16938 static int mce_chrdev_open_exclu; /* already open exclusive? */
16939
16940 static int mce_chrdev_open(struct inode *inode, struct file *file)
16941 @@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
16942 spin_lock(&mce_chrdev_state_lock);
16943
16944 if (mce_chrdev_open_exclu ||
16945 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
16946 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
16947 spin_unlock(&mce_chrdev_state_lock);
16948
16949 return -EBUSY;
16950 @@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
16951
16952 if (file->f_flags & O_EXCL)
16953 mce_chrdev_open_exclu = 1;
16954 - mce_chrdev_open_count++;
16955 + local_inc(&mce_chrdev_open_count);
16956
16957 spin_unlock(&mce_chrdev_state_lock);
16958
16959 @@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
16960 {
16961 spin_lock(&mce_chrdev_state_lock);
16962
16963 - mce_chrdev_open_count--;
16964 + local_dec(&mce_chrdev_open_count);
16965 mce_chrdev_open_exclu = 0;
16966
16967 spin_unlock(&mce_chrdev_state_lock);
16968 @@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
16969 return NOTIFY_OK;
16970 }
16971
16972 -static struct notifier_block mce_cpu_notifier __cpuinitdata = {
16973 +static struct notifier_block mce_cpu_notifier = {
16974 .notifier_call = mce_cpu_callback,
16975 };
16976
16977 @@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
16978 static void mce_reset(void)
16979 {
16980 cpu_missing = 0;
16981 - atomic_set(&mce_fake_paniced, 0);
16982 + atomic_set_unchecked(&mce_fake_paniced, 0);
16983 atomic_set(&mce_executing, 0);
16984 atomic_set(&mce_callin, 0);
16985 atomic_set(&global_nwo, 0);
16986 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
16987 index 2d5454c..51987eb 100644
16988 --- a/arch/x86/kernel/cpu/mcheck/p5.c
16989 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
16990 @@ -11,6 +11,7 @@
16991 #include <asm/processor.h>
16992 #include <asm/mce.h>
16993 #include <asm/msr.h>
16994 +#include <asm/pgtable.h>
16995
16996 /* By default disabled */
16997 int mce_p5_enabled __read_mostly;
16998 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
16999 if (!cpu_has(c, X86_FEATURE_MCE))
17000 return;
17001
17002 + pax_open_kernel();
17003 machine_check_vector = pentium_machine_check;
17004 + pax_close_kernel();
17005 /* Make sure the vector pointer is visible before we enable MCEs: */
17006 wmb();
17007
17008 diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17009 index 47a1870..8c019a7 100644
17010 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17011 +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17012 @@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17013 return notifier_from_errno(err);
17014 }
17015
17016 -static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17017 +static struct notifier_block thermal_throttle_cpu_notifier =
17018 {
17019 .notifier_call = thermal_throttle_cpu_callback,
17020 };
17021 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17022 index 2d7998f..17c9de1 100644
17023 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
17024 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17025 @@ -10,6 +10,7 @@
17026 #include <asm/processor.h>
17027 #include <asm/mce.h>
17028 #include <asm/msr.h>
17029 +#include <asm/pgtable.h>
17030
17031 /* Machine check handler for WinChip C6: */
17032 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17033 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17034 {
17035 u32 lo, hi;
17036
17037 + pax_open_kernel();
17038 machine_check_vector = winchip_machine_check;
17039 + pax_close_kernel();
17040 /* Make sure the vector pointer is visible before we enable MCEs: */
17041 wmb();
17042
17043 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17044 index 726bf96..81f0526 100644
17045 --- a/arch/x86/kernel/cpu/mtrr/main.c
17046 +++ b/arch/x86/kernel/cpu/mtrr/main.c
17047 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17048 u64 size_or_mask, size_and_mask;
17049 static bool mtrr_aps_delayed_init;
17050
17051 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17052 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17053
17054 const struct mtrr_ops *mtrr_if;
17055
17056 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17057 index df5e41f..816c719 100644
17058 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17059 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17060 @@ -25,7 +25,7 @@ struct mtrr_ops {
17061 int (*validate_add_page)(unsigned long base, unsigned long size,
17062 unsigned int type);
17063 int (*have_wrcomb)(void);
17064 -};
17065 +} __do_const;
17066
17067 extern int generic_get_free_region(unsigned long base, unsigned long size,
17068 int replace_reg);
17069 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17070 index 6774c17..a691911 100644
17071 --- a/arch/x86/kernel/cpu/perf_event.c
17072 +++ b/arch/x86/kernel/cpu/perf_event.c
17073 @@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17074 if (idx > GDT_ENTRIES)
17075 return 0;
17076
17077 - desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17078 + desc = get_cpu_gdt_table(smp_processor_id());
17079 }
17080
17081 return get_desc_base(desc + idx);
17082 @@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17083 break;
17084
17085 perf_callchain_store(entry, frame.return_address);
17086 - fp = frame.next_frame;
17087 + fp = (const void __force_user *)frame.next_frame;
17088 }
17089 }
17090
17091 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17092 index 4914e94..60b06e3 100644
17093 --- a/arch/x86/kernel/cpu/perf_event_intel.c
17094 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
17095 @@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17096 * v2 and above have a perf capabilities MSR
17097 */
17098 if (version > 1) {
17099 - u64 capabilities;
17100 + u64 capabilities = x86_pmu.intel_cap.capabilities;
17101
17102 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17103 - x86_pmu.intel_cap.capabilities = capabilities;
17104 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17105 + x86_pmu.intel_cap.capabilities = capabilities;
17106 }
17107
17108 intel_ds_init();
17109 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17110 index b43200d..62cddfe 100644
17111 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17112 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17113 @@ -2826,7 +2826,7 @@ static int
17114 return NOTIFY_OK;
17115 }
17116
17117 -static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17118 +static struct notifier_block uncore_cpu_nb = {
17119 .notifier_call = uncore_cpu_notifier,
17120 /*
17121 * to migrate uncore events, our notifier should be executed
17122 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17123 index 60c7891..9e911d3 100644
17124 --- a/arch/x86/kernel/cpuid.c
17125 +++ b/arch/x86/kernel/cpuid.c
17126 @@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17127 return notifier_from_errno(err);
17128 }
17129
17130 -static struct notifier_block __refdata cpuid_class_cpu_notifier =
17131 +static struct notifier_block cpuid_class_cpu_notifier =
17132 {
17133 .notifier_call = cpuid_class_cpu_callback,
17134 };
17135 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17136 index 74467fe..18793d5 100644
17137 --- a/arch/x86/kernel/crash.c
17138 +++ b/arch/x86/kernel/crash.c
17139 @@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17140 {
17141 #ifdef CONFIG_X86_32
17142 struct pt_regs fixed_regs;
17143 -#endif
17144
17145 -#ifdef CONFIG_X86_32
17146 - if (!user_mode_vm(regs)) {
17147 + if (!user_mode(regs)) {
17148 crash_fixup_ss_esp(&fixed_regs, regs);
17149 regs = &fixed_regs;
17150 }
17151 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17152 index 37250fe..bf2ec74 100644
17153 --- a/arch/x86/kernel/doublefault_32.c
17154 +++ b/arch/x86/kernel/doublefault_32.c
17155 @@ -11,7 +11,7 @@
17156
17157 #define DOUBLEFAULT_STACKSIZE (1024)
17158 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17159 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17160 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17161
17162 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17163
17164 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
17165 unsigned long gdt, tss;
17166
17167 store_gdt(&gdt_desc);
17168 - gdt = gdt_desc.address;
17169 + gdt = (unsigned long)gdt_desc.address;
17170
17171 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17172
17173 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17174 /* 0x2 bit is always set */
17175 .flags = X86_EFLAGS_SF | 0x2,
17176 .sp = STACK_START,
17177 - .es = __USER_DS,
17178 + .es = __KERNEL_DS,
17179 .cs = __KERNEL_CS,
17180 .ss = __KERNEL_DS,
17181 - .ds = __USER_DS,
17182 + .ds = __KERNEL_DS,
17183 .fs = __KERNEL_PERCPU,
17184
17185 .__cr3 = __pa_nodebug(swapper_pg_dir),
17186 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17187 index ae42418b..787c16b 100644
17188 --- a/arch/x86/kernel/dumpstack.c
17189 +++ b/arch/x86/kernel/dumpstack.c
17190 @@ -2,6 +2,9 @@
17191 * Copyright (C) 1991, 1992 Linus Torvalds
17192 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17193 */
17194 +#ifdef CONFIG_GRKERNSEC_HIDESYM
17195 +#define __INCLUDED_BY_HIDESYM 1
17196 +#endif
17197 #include <linux/kallsyms.h>
17198 #include <linux/kprobes.h>
17199 #include <linux/uaccess.h>
17200 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17201 static void
17202 print_ftrace_graph_addr(unsigned long addr, void *data,
17203 const struct stacktrace_ops *ops,
17204 - struct thread_info *tinfo, int *graph)
17205 + struct task_struct *task, int *graph)
17206 {
17207 - struct task_struct *task;
17208 unsigned long ret_addr;
17209 int index;
17210
17211 if (addr != (unsigned long)return_to_handler)
17212 return;
17213
17214 - task = tinfo->task;
17215 index = task->curr_ret_stack;
17216
17217 if (!task->ret_stack || index < *graph)
17218 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17219 static inline void
17220 print_ftrace_graph_addr(unsigned long addr, void *data,
17221 const struct stacktrace_ops *ops,
17222 - struct thread_info *tinfo, int *graph)
17223 + struct task_struct *task, int *graph)
17224 { }
17225 #endif
17226
17227 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17228 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17229 */
17230
17231 -static inline int valid_stack_ptr(struct thread_info *tinfo,
17232 - void *p, unsigned int size, void *end)
17233 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17234 {
17235 - void *t = tinfo;
17236 if (end) {
17237 if (p < end && p >= (end-THREAD_SIZE))
17238 return 1;
17239 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17240 }
17241
17242 unsigned long
17243 -print_context_stack(struct thread_info *tinfo,
17244 +print_context_stack(struct task_struct *task, void *stack_start,
17245 unsigned long *stack, unsigned long bp,
17246 const struct stacktrace_ops *ops, void *data,
17247 unsigned long *end, int *graph)
17248 {
17249 struct stack_frame *frame = (struct stack_frame *)bp;
17250
17251 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17252 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17253 unsigned long addr;
17254
17255 addr = *stack;
17256 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17257 } else {
17258 ops->address(data, addr, 0);
17259 }
17260 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17261 + print_ftrace_graph_addr(addr, data, ops, task, graph);
17262 }
17263 stack++;
17264 }
17265 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17266 EXPORT_SYMBOL_GPL(print_context_stack);
17267
17268 unsigned long
17269 -print_context_stack_bp(struct thread_info *tinfo,
17270 +print_context_stack_bp(struct task_struct *task, void *stack_start,
17271 unsigned long *stack, unsigned long bp,
17272 const struct stacktrace_ops *ops, void *data,
17273 unsigned long *end, int *graph)
17274 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17275 struct stack_frame *frame = (struct stack_frame *)bp;
17276 unsigned long *ret_addr = &frame->return_address;
17277
17278 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17279 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17280 unsigned long addr = *ret_addr;
17281
17282 if (!__kernel_text_address(addr))
17283 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17284 ops->address(data, addr, 1);
17285 frame = frame->next_frame;
17286 ret_addr = &frame->return_address;
17287 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17288 + print_ftrace_graph_addr(addr, data, ops, task, graph);
17289 }
17290
17291 return (unsigned long)frame;
17292 @@ -189,7 +188,7 @@ void dump_stack(void)
17293
17294 bp = stack_frame(current, NULL);
17295 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
17296 - current->pid, current->comm, print_tainted(),
17297 + task_pid_nr(current), current->comm, print_tainted(),
17298 init_utsname()->release,
17299 (int)strcspn(init_utsname()->version, " "),
17300 init_utsname()->version);
17301 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
17302 }
17303 EXPORT_SYMBOL_GPL(oops_begin);
17304
17305 +extern void gr_handle_kernel_exploit(void);
17306 +
17307 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17308 {
17309 if (regs && kexec_should_crash(current))
17310 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17311 panic("Fatal exception in interrupt");
17312 if (panic_on_oops)
17313 panic("Fatal exception");
17314 - do_exit(signr);
17315 +
17316 + gr_handle_kernel_exploit();
17317 +
17318 + do_group_exit(signr);
17319 }
17320
17321 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17322 @@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17323 print_modules();
17324 show_regs(regs);
17325 #ifdef CONFIG_X86_32
17326 - if (user_mode_vm(regs)) {
17327 + if (user_mode(regs)) {
17328 sp = regs->sp;
17329 ss = regs->ss & 0xffff;
17330 } else {
17331 @@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17332 unsigned long flags = oops_begin();
17333 int sig = SIGSEGV;
17334
17335 - if (!user_mode_vm(regs))
17336 + if (!user_mode(regs))
17337 report_bug(regs->ip, regs);
17338
17339 if (__die(str, regs, err))
17340 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17341 index 1038a41..db2c12b 100644
17342 --- a/arch/x86/kernel/dumpstack_32.c
17343 +++ b/arch/x86/kernel/dumpstack_32.c
17344 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17345 bp = stack_frame(task, regs);
17346
17347 for (;;) {
17348 - struct thread_info *context;
17349 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17350
17351 - context = (struct thread_info *)
17352 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17353 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17354 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17355
17356 - stack = (unsigned long *)context->previous_esp;
17357 - if (!stack)
17358 + if (stack_start == task_stack_page(task))
17359 break;
17360 + stack = *(unsigned long **)stack_start;
17361 if (ops->stack(data, "IRQ") < 0)
17362 break;
17363 touch_nmi_watchdog();
17364 @@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
17365 {
17366 int i;
17367
17368 - __show_regs(regs, !user_mode_vm(regs));
17369 + __show_regs(regs, !user_mode(regs));
17370
17371 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
17372 TASK_COMM_LEN, current->comm, task_pid_nr(current),
17373 @@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
17374 * When in-kernel, we also print out the stack and code at the
17375 * time of the fault..
17376 */
17377 - if (!user_mode_vm(regs)) {
17378 + if (!user_mode(regs)) {
17379 unsigned int code_prologue = code_bytes * 43 / 64;
17380 unsigned int code_len = code_bytes;
17381 unsigned char c;
17382 u8 *ip;
17383 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
17384
17385 pr_emerg("Stack:\n");
17386 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
17387
17388 pr_emerg("Code:");
17389
17390 - ip = (u8 *)regs->ip - code_prologue;
17391 + ip = (u8 *)regs->ip - code_prologue + cs_base;
17392 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
17393 /* try starting at IP */
17394 - ip = (u8 *)regs->ip;
17395 + ip = (u8 *)regs->ip + cs_base;
17396 code_len = code_len - code_prologue + 1;
17397 }
17398 for (i = 0; i < code_len; i++, ip++) {
17399 @@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
17400 pr_cont(" Bad EIP value.");
17401 break;
17402 }
17403 - if (ip == (u8 *)regs->ip)
17404 + if (ip == (u8 *)regs->ip + cs_base)
17405 pr_cont(" <%02x>", c);
17406 else
17407 pr_cont(" %02x", c);
17408 @@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
17409 {
17410 unsigned short ud2;
17411
17412 + ip = ktla_ktva(ip);
17413 if (ip < PAGE_OFFSET)
17414 return 0;
17415 if (probe_kernel_address((unsigned short *)ip, ud2))
17416 @@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
17417
17418 return ud2 == 0x0b0f;
17419 }
17420 +
17421 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17422 +void pax_check_alloca(unsigned long size)
17423 +{
17424 + unsigned long sp = (unsigned long)&sp, stack_left;
17425 +
17426 + /* all kernel stacks are of the same size */
17427 + stack_left = sp & (THREAD_SIZE - 1);
17428 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
17429 +}
17430 +EXPORT_SYMBOL(pax_check_alloca);
17431 +#endif
17432 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
17433 index b653675..51cc8c0 100644
17434 --- a/arch/x86/kernel/dumpstack_64.c
17435 +++ b/arch/x86/kernel/dumpstack_64.c
17436 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17437 unsigned long *irq_stack_end =
17438 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
17439 unsigned used = 0;
17440 - struct thread_info *tinfo;
17441 int graph = 0;
17442 unsigned long dummy;
17443 + void *stack_start;
17444
17445 if (!task)
17446 task = current;
17447 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17448 * current stack address. If the stacks consist of nested
17449 * exceptions
17450 */
17451 - tinfo = task_thread_info(task);
17452 for (;;) {
17453 char *id;
17454 unsigned long *estack_end;
17455 +
17456 estack_end = in_exception_stack(cpu, (unsigned long)stack,
17457 &used, &id);
17458
17459 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17460 if (ops->stack(data, id) < 0)
17461 break;
17462
17463 - bp = ops->walk_stack(tinfo, stack, bp, ops,
17464 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
17465 data, estack_end, &graph);
17466 ops->stack(data, "<EOE>");
17467 /*
17468 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17469 * second-to-last pointer (index -2 to end) in the
17470 * exception stack:
17471 */
17472 + if ((u16)estack_end[-1] != __KERNEL_DS)
17473 + goto out;
17474 stack = (unsigned long *) estack_end[-2];
17475 continue;
17476 }
17477 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17478 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
17479 if (ops->stack(data, "IRQ") < 0)
17480 break;
17481 - bp = ops->walk_stack(tinfo, stack, bp,
17482 + bp = ops->walk_stack(task, irq_stack, stack, bp,
17483 ops, data, irq_stack_end, &graph);
17484 /*
17485 * We link to the next stack (which would be
17486 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17487 /*
17488 * This handles the process stack:
17489 */
17490 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
17491 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17492 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17493 +out:
17494 put_cpu();
17495 }
17496 EXPORT_SYMBOL(dump_trace);
17497 @@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
17498 {
17499 int i;
17500 unsigned long sp;
17501 - const int cpu = smp_processor_id();
17502 + const int cpu = raw_smp_processor_id();
17503 struct task_struct *cur = current;
17504
17505 sp = regs->sp;
17506 @@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
17507
17508 return ud2 == 0x0b0f;
17509 }
17510 +
17511 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17512 +void pax_check_alloca(unsigned long size)
17513 +{
17514 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
17515 + unsigned cpu, used;
17516 + char *id;
17517 +
17518 + /* check the process stack first */
17519 + stack_start = (unsigned long)task_stack_page(current);
17520 + stack_end = stack_start + THREAD_SIZE;
17521 + if (likely(stack_start <= sp && sp < stack_end)) {
17522 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
17523 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
17524 + return;
17525 + }
17526 +
17527 + cpu = get_cpu();
17528 +
17529 + /* check the irq stacks */
17530 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
17531 + stack_start = stack_end - IRQ_STACK_SIZE;
17532 + if (stack_start <= sp && sp < stack_end) {
17533 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
17534 + put_cpu();
17535 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
17536 + return;
17537 + }
17538 +
17539 + /* check the exception stacks */
17540 + used = 0;
17541 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
17542 + stack_start = stack_end - EXCEPTION_STKSZ;
17543 + if (stack_end && stack_start <= sp && sp < stack_end) {
17544 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
17545 + put_cpu();
17546 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
17547 + return;
17548 + }
17549 +
17550 + put_cpu();
17551 +
17552 + /* unknown stack */
17553 + BUG();
17554 +}
17555 +EXPORT_SYMBOL(pax_check_alloca);
17556 +#endif
17557 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
17558 index 9b9f18b..9fcaa04 100644
17559 --- a/arch/x86/kernel/early_printk.c
17560 +++ b/arch/x86/kernel/early_printk.c
17561 @@ -7,6 +7,7 @@
17562 #include <linux/pci_regs.h>
17563 #include <linux/pci_ids.h>
17564 #include <linux/errno.h>
17565 +#include <linux/sched.h>
17566 #include <asm/io.h>
17567 #include <asm/processor.h>
17568 #include <asm/fcntl.h>
17569 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
17570 index 6ed91d9..6cc365b 100644
17571 --- a/arch/x86/kernel/entry_32.S
17572 +++ b/arch/x86/kernel/entry_32.S
17573 @@ -177,13 +177,153 @@
17574 /*CFI_REL_OFFSET gs, PT_GS*/
17575 .endm
17576 .macro SET_KERNEL_GS reg
17577 +
17578 +#ifdef CONFIG_CC_STACKPROTECTOR
17579 movl $(__KERNEL_STACK_CANARY), \reg
17580 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17581 + movl $(__USER_DS), \reg
17582 +#else
17583 + xorl \reg, \reg
17584 +#endif
17585 +
17586 movl \reg, %gs
17587 .endm
17588
17589 #endif /* CONFIG_X86_32_LAZY_GS */
17590
17591 -.macro SAVE_ALL
17592 +.macro pax_enter_kernel
17593 +#ifdef CONFIG_PAX_KERNEXEC
17594 + call pax_enter_kernel
17595 +#endif
17596 +.endm
17597 +
17598 +.macro pax_exit_kernel
17599 +#ifdef CONFIG_PAX_KERNEXEC
17600 + call pax_exit_kernel
17601 +#endif
17602 +.endm
17603 +
17604 +#ifdef CONFIG_PAX_KERNEXEC
17605 +ENTRY(pax_enter_kernel)
17606 +#ifdef CONFIG_PARAVIRT
17607 + pushl %eax
17608 + pushl %ecx
17609 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
17610 + mov %eax, %esi
17611 +#else
17612 + mov %cr0, %esi
17613 +#endif
17614 + bts $16, %esi
17615 + jnc 1f
17616 + mov %cs, %esi
17617 + cmp $__KERNEL_CS, %esi
17618 + jz 3f
17619 + ljmp $__KERNEL_CS, $3f
17620 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
17621 +2:
17622 +#ifdef CONFIG_PARAVIRT
17623 + mov %esi, %eax
17624 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17625 +#else
17626 + mov %esi, %cr0
17627 +#endif
17628 +3:
17629 +#ifdef CONFIG_PARAVIRT
17630 + popl %ecx
17631 + popl %eax
17632 +#endif
17633 + ret
17634 +ENDPROC(pax_enter_kernel)
17635 +
17636 +ENTRY(pax_exit_kernel)
17637 +#ifdef CONFIG_PARAVIRT
17638 + pushl %eax
17639 + pushl %ecx
17640 +#endif
17641 + mov %cs, %esi
17642 + cmp $__KERNEXEC_KERNEL_CS, %esi
17643 + jnz 2f
17644 +#ifdef CONFIG_PARAVIRT
17645 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
17646 + mov %eax, %esi
17647 +#else
17648 + mov %cr0, %esi
17649 +#endif
17650 + btr $16, %esi
17651 + ljmp $__KERNEL_CS, $1f
17652 +1:
17653 +#ifdef CONFIG_PARAVIRT
17654 + mov %esi, %eax
17655 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
17656 +#else
17657 + mov %esi, %cr0
17658 +#endif
17659 +2:
17660 +#ifdef CONFIG_PARAVIRT
17661 + popl %ecx
17662 + popl %eax
17663 +#endif
17664 + ret
17665 +ENDPROC(pax_exit_kernel)
17666 +#endif
17667 +
17668 +.macro pax_erase_kstack
17669 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17670 + call pax_erase_kstack
17671 +#endif
17672 +.endm
17673 +
17674 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17675 +/*
17676 + * ebp: thread_info
17677 + */
17678 +ENTRY(pax_erase_kstack)
17679 + pushl %edi
17680 + pushl %ecx
17681 + pushl %eax
17682 +
17683 + mov TI_lowest_stack(%ebp), %edi
17684 + mov $-0xBEEF, %eax
17685 + std
17686 +
17687 +1: mov %edi, %ecx
17688 + and $THREAD_SIZE_asm - 1, %ecx
17689 + shr $2, %ecx
17690 + repne scasl
17691 + jecxz 2f
17692 +
17693 + cmp $2*16, %ecx
17694 + jc 2f
17695 +
17696 + mov $2*16, %ecx
17697 + repe scasl
17698 + jecxz 2f
17699 + jne 1b
17700 +
17701 +2: cld
17702 + mov %esp, %ecx
17703 + sub %edi, %ecx
17704 +
17705 + cmp $THREAD_SIZE_asm, %ecx
17706 + jb 3f
17707 + ud2
17708 +3:
17709 +
17710 + shr $2, %ecx
17711 + rep stosl
17712 +
17713 + mov TI_task_thread_sp0(%ebp), %edi
17714 + sub $128, %edi
17715 + mov %edi, TI_lowest_stack(%ebp)
17716 +
17717 + popl %eax
17718 + popl %ecx
17719 + popl %edi
17720 + ret
17721 +ENDPROC(pax_erase_kstack)
17722 +#endif
17723 +
17724 +.macro __SAVE_ALL _DS
17725 cld
17726 PUSH_GS
17727 pushl_cfi %fs
17728 @@ -206,7 +346,7 @@
17729 CFI_REL_OFFSET ecx, 0
17730 pushl_cfi %ebx
17731 CFI_REL_OFFSET ebx, 0
17732 - movl $(__USER_DS), %edx
17733 + movl $\_DS, %edx
17734 movl %edx, %ds
17735 movl %edx, %es
17736 movl $(__KERNEL_PERCPU), %edx
17737 @@ -214,6 +354,15 @@
17738 SET_KERNEL_GS %edx
17739 .endm
17740
17741 +.macro SAVE_ALL
17742 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
17743 + __SAVE_ALL __KERNEL_DS
17744 + pax_enter_kernel
17745 +#else
17746 + __SAVE_ALL __USER_DS
17747 +#endif
17748 +.endm
17749 +
17750 .macro RESTORE_INT_REGS
17751 popl_cfi %ebx
17752 CFI_RESTORE ebx
17753 @@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
17754 popfl_cfi
17755 jmp syscall_exit
17756 CFI_ENDPROC
17757 -END(ret_from_fork)
17758 +ENDPROC(ret_from_fork)
17759
17760 ENTRY(ret_from_kernel_thread)
17761 CFI_STARTPROC
17762 @@ -344,7 +493,15 @@ ret_from_intr:
17763 andl $SEGMENT_RPL_MASK, %eax
17764 #endif
17765 cmpl $USER_RPL, %eax
17766 +
17767 +#ifdef CONFIG_PAX_KERNEXEC
17768 + jae resume_userspace
17769 +
17770 + pax_exit_kernel
17771 + jmp resume_kernel
17772 +#else
17773 jb resume_kernel # not returning to v8086 or userspace
17774 +#endif
17775
17776 ENTRY(resume_userspace)
17777 LOCKDEP_SYS_EXIT
17778 @@ -356,8 +513,8 @@ ENTRY(resume_userspace)
17779 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
17780 # int/exception return?
17781 jne work_pending
17782 - jmp restore_all
17783 -END(ret_from_exception)
17784 + jmp restore_all_pax
17785 +ENDPROC(ret_from_exception)
17786
17787 #ifdef CONFIG_PREEMPT
17788 ENTRY(resume_kernel)
17789 @@ -372,7 +529,7 @@ need_resched:
17790 jz restore_all
17791 call preempt_schedule_irq
17792 jmp need_resched
17793 -END(resume_kernel)
17794 +ENDPROC(resume_kernel)
17795 #endif
17796 CFI_ENDPROC
17797 /*
17798 @@ -406,30 +563,45 @@ sysenter_past_esp:
17799 /*CFI_REL_OFFSET cs, 0*/
17800 /*
17801 * Push current_thread_info()->sysenter_return to the stack.
17802 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
17803 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
17804 */
17805 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
17806 + pushl_cfi $0
17807 CFI_REL_OFFSET eip, 0
17808
17809 pushl_cfi %eax
17810 SAVE_ALL
17811 + GET_THREAD_INFO(%ebp)
17812 + movl TI_sysenter_return(%ebp),%ebp
17813 + movl %ebp,PT_EIP(%esp)
17814 ENABLE_INTERRUPTS(CLBR_NONE)
17815
17816 /*
17817 * Load the potential sixth argument from user stack.
17818 * Careful about security.
17819 */
17820 + movl PT_OLDESP(%esp),%ebp
17821 +
17822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17823 + mov PT_OLDSS(%esp),%ds
17824 +1: movl %ds:(%ebp),%ebp
17825 + push %ss
17826 + pop %ds
17827 +#else
17828 cmpl $__PAGE_OFFSET-3,%ebp
17829 jae syscall_fault
17830 ASM_STAC
17831 1: movl (%ebp),%ebp
17832 ASM_CLAC
17833 +#endif
17834 +
17835 movl %ebp,PT_EBP(%esp)
17836 _ASM_EXTABLE(1b,syscall_fault)
17837
17838 GET_THREAD_INFO(%ebp)
17839
17840 +#ifdef CONFIG_PAX_RANDKSTACK
17841 + pax_erase_kstack
17842 +#endif
17843 +
17844 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
17845 jnz sysenter_audit
17846 sysenter_do_call:
17847 @@ -444,12 +616,24 @@ sysenter_do_call:
17848 testl $_TIF_ALLWORK_MASK, %ecx
17849 jne sysexit_audit
17850 sysenter_exit:
17851 +
17852 +#ifdef CONFIG_PAX_RANDKSTACK
17853 + pushl_cfi %eax
17854 + movl %esp, %eax
17855 + call pax_randomize_kstack
17856 + popl_cfi %eax
17857 +#endif
17858 +
17859 + pax_erase_kstack
17860 +
17861 /* if something modifies registers it must also disable sysexit */
17862 movl PT_EIP(%esp), %edx
17863 movl PT_OLDESP(%esp), %ecx
17864 xorl %ebp,%ebp
17865 TRACE_IRQS_ON
17866 1: mov PT_FS(%esp), %fs
17867 +2: mov PT_DS(%esp), %ds
17868 +3: mov PT_ES(%esp), %es
17869 PTGS_TO_GS
17870 ENABLE_INTERRUPTS_SYSEXIT
17871
17872 @@ -466,6 +650,9 @@ sysenter_audit:
17873 movl %eax,%edx /* 2nd arg: syscall number */
17874 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
17875 call __audit_syscall_entry
17876 +
17877 + pax_erase_kstack
17878 +
17879 pushl_cfi %ebx
17880 movl PT_EAX(%esp),%eax /* reload syscall number */
17881 jmp sysenter_do_call
17882 @@ -491,10 +678,16 @@ sysexit_audit:
17883
17884 CFI_ENDPROC
17885 .pushsection .fixup,"ax"
17886 -2: movl $0,PT_FS(%esp)
17887 +4: movl $0,PT_FS(%esp)
17888 + jmp 1b
17889 +5: movl $0,PT_DS(%esp)
17890 + jmp 1b
17891 +6: movl $0,PT_ES(%esp)
17892 jmp 1b
17893 .popsection
17894 - _ASM_EXTABLE(1b,2b)
17895 + _ASM_EXTABLE(1b,4b)
17896 + _ASM_EXTABLE(2b,5b)
17897 + _ASM_EXTABLE(3b,6b)
17898 PTGS_TO_GS_EX
17899 ENDPROC(ia32_sysenter_target)
17900
17901 @@ -509,6 +702,11 @@ ENTRY(system_call)
17902 pushl_cfi %eax # save orig_eax
17903 SAVE_ALL
17904 GET_THREAD_INFO(%ebp)
17905 +
17906 +#ifdef CONFIG_PAX_RANDKSTACK
17907 + pax_erase_kstack
17908 +#endif
17909 +
17910 # system call tracing in operation / emulation
17911 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
17912 jnz syscall_trace_entry
17913 @@ -527,6 +725,15 @@ syscall_exit:
17914 testl $_TIF_ALLWORK_MASK, %ecx # current->work
17915 jne syscall_exit_work
17916
17917 +restore_all_pax:
17918 +
17919 +#ifdef CONFIG_PAX_RANDKSTACK
17920 + movl %esp, %eax
17921 + call pax_randomize_kstack
17922 +#endif
17923 +
17924 + pax_erase_kstack
17925 +
17926 restore_all:
17927 TRACE_IRQS_IRET
17928 restore_all_notrace:
17929 @@ -583,14 +790,34 @@ ldt_ss:
17930 * compensating for the offset by changing to the ESPFIX segment with
17931 * a base address that matches for the difference.
17932 */
17933 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
17934 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
17935 mov %esp, %edx /* load kernel esp */
17936 mov PT_OLDESP(%esp), %eax /* load userspace esp */
17937 mov %dx, %ax /* eax: new kernel esp */
17938 sub %eax, %edx /* offset (low word is 0) */
17939 +#ifdef CONFIG_SMP
17940 + movl PER_CPU_VAR(cpu_number), %ebx
17941 + shll $PAGE_SHIFT_asm, %ebx
17942 + addl $cpu_gdt_table, %ebx
17943 +#else
17944 + movl $cpu_gdt_table, %ebx
17945 +#endif
17946 shr $16, %edx
17947 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
17948 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
17949 +
17950 +#ifdef CONFIG_PAX_KERNEXEC
17951 + mov %cr0, %esi
17952 + btr $16, %esi
17953 + mov %esi, %cr0
17954 +#endif
17955 +
17956 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
17957 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
17958 +
17959 +#ifdef CONFIG_PAX_KERNEXEC
17960 + bts $16, %esi
17961 + mov %esi, %cr0
17962 +#endif
17963 +
17964 pushl_cfi $__ESPFIX_SS
17965 pushl_cfi %eax /* new kernel esp */
17966 /* Disable interrupts, but do not irqtrace this section: we
17967 @@ -619,20 +846,18 @@ work_resched:
17968 movl TI_flags(%ebp), %ecx
17969 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
17970 # than syscall tracing?
17971 - jz restore_all
17972 + jz restore_all_pax
17973 testb $_TIF_NEED_RESCHED, %cl
17974 jnz work_resched
17975
17976 work_notifysig: # deal with pending signals and
17977 # notify-resume requests
17978 + movl %esp, %eax
17979 #ifdef CONFIG_VM86
17980 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
17981 - movl %esp, %eax
17982 jne work_notifysig_v86 # returning to kernel-space or
17983 # vm86-space
17984 1:
17985 -#else
17986 - movl %esp, %eax
17987 #endif
17988 TRACE_IRQS_ON
17989 ENABLE_INTERRUPTS(CLBR_NONE)
17990 @@ -653,7 +878,7 @@ work_notifysig_v86:
17991 movl %eax, %esp
17992 jmp 1b
17993 #endif
17994 -END(work_pending)
17995 +ENDPROC(work_pending)
17996
17997 # perform syscall exit tracing
17998 ALIGN
17999 @@ -661,11 +886,14 @@ syscall_trace_entry:
18000 movl $-ENOSYS,PT_EAX(%esp)
18001 movl %esp, %eax
18002 call syscall_trace_enter
18003 +
18004 + pax_erase_kstack
18005 +
18006 /* What it returned is what we'll actually use. */
18007 cmpl $(NR_syscalls), %eax
18008 jnae syscall_call
18009 jmp syscall_exit
18010 -END(syscall_trace_entry)
18011 +ENDPROC(syscall_trace_entry)
18012
18013 # perform syscall exit tracing
18014 ALIGN
18015 @@ -678,21 +906,25 @@ syscall_exit_work:
18016 movl %esp, %eax
18017 call syscall_trace_leave
18018 jmp resume_userspace
18019 -END(syscall_exit_work)
18020 +ENDPROC(syscall_exit_work)
18021 CFI_ENDPROC
18022
18023 RING0_INT_FRAME # can't unwind into user space anyway
18024 syscall_fault:
18025 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18026 + push %ss
18027 + pop %ds
18028 +#endif
18029 ASM_CLAC
18030 GET_THREAD_INFO(%ebp)
18031 movl $-EFAULT,PT_EAX(%esp)
18032 jmp resume_userspace
18033 -END(syscall_fault)
18034 +ENDPROC(syscall_fault)
18035
18036 syscall_badsys:
18037 movl $-ENOSYS,PT_EAX(%esp)
18038 jmp resume_userspace
18039 -END(syscall_badsys)
18040 +ENDPROC(syscall_badsys)
18041 CFI_ENDPROC
18042 /*
18043 * End of kprobes section
18044 @@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18045 * normal stack and adjusts ESP with the matching offset.
18046 */
18047 /* fixup the stack */
18048 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18049 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18050 +#ifdef CONFIG_SMP
18051 + movl PER_CPU_VAR(cpu_number), %ebx
18052 + shll $PAGE_SHIFT_asm, %ebx
18053 + addl $cpu_gdt_table, %ebx
18054 +#else
18055 + movl $cpu_gdt_table, %ebx
18056 +#endif
18057 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18058 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18059 shl $16, %eax
18060 addl %esp, %eax /* the adjusted stack pointer */
18061 pushl_cfi $__KERNEL_DS
18062 @@ -807,7 +1046,7 @@ vector=vector+1
18063 .endr
18064 2: jmp common_interrupt
18065 .endr
18066 -END(irq_entries_start)
18067 +ENDPROC(irq_entries_start)
18068
18069 .previous
18070 END(interrupt)
18071 @@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18072 pushl_cfi $do_coprocessor_error
18073 jmp error_code
18074 CFI_ENDPROC
18075 -END(coprocessor_error)
18076 +ENDPROC(coprocessor_error)
18077
18078 ENTRY(simd_coprocessor_error)
18079 RING0_INT_FRAME
18080 @@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18081 #endif
18082 jmp error_code
18083 CFI_ENDPROC
18084 -END(simd_coprocessor_error)
18085 +ENDPROC(simd_coprocessor_error)
18086
18087 ENTRY(device_not_available)
18088 RING0_INT_FRAME
18089 @@ -889,18 +1128,18 @@ ENTRY(device_not_available)
18090 pushl_cfi $do_device_not_available
18091 jmp error_code
18092 CFI_ENDPROC
18093 -END(device_not_available)
18094 +ENDPROC(device_not_available)
18095
18096 #ifdef CONFIG_PARAVIRT
18097 ENTRY(native_iret)
18098 iret
18099 _ASM_EXTABLE(native_iret, iret_exc)
18100 -END(native_iret)
18101 +ENDPROC(native_iret)
18102
18103 ENTRY(native_irq_enable_sysexit)
18104 sti
18105 sysexit
18106 -END(native_irq_enable_sysexit)
18107 +ENDPROC(native_irq_enable_sysexit)
18108 #endif
18109
18110 ENTRY(overflow)
18111 @@ -910,7 +1149,7 @@ ENTRY(overflow)
18112 pushl_cfi $do_overflow
18113 jmp error_code
18114 CFI_ENDPROC
18115 -END(overflow)
18116 +ENDPROC(overflow)
18117
18118 ENTRY(bounds)
18119 RING0_INT_FRAME
18120 @@ -919,7 +1158,7 @@ ENTRY(bounds)
18121 pushl_cfi $do_bounds
18122 jmp error_code
18123 CFI_ENDPROC
18124 -END(bounds)
18125 +ENDPROC(bounds)
18126
18127 ENTRY(invalid_op)
18128 RING0_INT_FRAME
18129 @@ -928,7 +1167,7 @@ ENTRY(invalid_op)
18130 pushl_cfi $do_invalid_op
18131 jmp error_code
18132 CFI_ENDPROC
18133 -END(invalid_op)
18134 +ENDPROC(invalid_op)
18135
18136 ENTRY(coprocessor_segment_overrun)
18137 RING0_INT_FRAME
18138 @@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
18139 pushl_cfi $do_coprocessor_segment_overrun
18140 jmp error_code
18141 CFI_ENDPROC
18142 -END(coprocessor_segment_overrun)
18143 +ENDPROC(coprocessor_segment_overrun)
18144
18145 ENTRY(invalid_TSS)
18146 RING0_EC_FRAME
18147 @@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
18148 pushl_cfi $do_invalid_TSS
18149 jmp error_code
18150 CFI_ENDPROC
18151 -END(invalid_TSS)
18152 +ENDPROC(invalid_TSS)
18153
18154 ENTRY(segment_not_present)
18155 RING0_EC_FRAME
18156 @@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
18157 pushl_cfi $do_segment_not_present
18158 jmp error_code
18159 CFI_ENDPROC
18160 -END(segment_not_present)
18161 +ENDPROC(segment_not_present)
18162
18163 ENTRY(stack_segment)
18164 RING0_EC_FRAME
18165 @@ -961,7 +1200,7 @@ ENTRY(stack_segment)
18166 pushl_cfi $do_stack_segment
18167 jmp error_code
18168 CFI_ENDPROC
18169 -END(stack_segment)
18170 +ENDPROC(stack_segment)
18171
18172 ENTRY(alignment_check)
18173 RING0_EC_FRAME
18174 @@ -969,7 +1208,7 @@ ENTRY(alignment_check)
18175 pushl_cfi $do_alignment_check
18176 jmp error_code
18177 CFI_ENDPROC
18178 -END(alignment_check)
18179 +ENDPROC(alignment_check)
18180
18181 ENTRY(divide_error)
18182 RING0_INT_FRAME
18183 @@ -978,7 +1217,7 @@ ENTRY(divide_error)
18184 pushl_cfi $do_divide_error
18185 jmp error_code
18186 CFI_ENDPROC
18187 -END(divide_error)
18188 +ENDPROC(divide_error)
18189
18190 #ifdef CONFIG_X86_MCE
18191 ENTRY(machine_check)
18192 @@ -988,7 +1227,7 @@ ENTRY(machine_check)
18193 pushl_cfi machine_check_vector
18194 jmp error_code
18195 CFI_ENDPROC
18196 -END(machine_check)
18197 +ENDPROC(machine_check)
18198 #endif
18199
18200 ENTRY(spurious_interrupt_bug)
18201 @@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
18202 pushl_cfi $do_spurious_interrupt_bug
18203 jmp error_code
18204 CFI_ENDPROC
18205 -END(spurious_interrupt_bug)
18206 +ENDPROC(spurious_interrupt_bug)
18207 /*
18208 * End of kprobes section
18209 */
18210 @@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
18211
18212 ENTRY(mcount)
18213 ret
18214 -END(mcount)
18215 +ENDPROC(mcount)
18216
18217 ENTRY(ftrace_caller)
18218 cmpl $0, function_trace_stop
18219 @@ -1134,7 +1373,7 @@ ftrace_graph_call:
18220 .globl ftrace_stub
18221 ftrace_stub:
18222 ret
18223 -END(ftrace_caller)
18224 +ENDPROC(ftrace_caller)
18225
18226 ENTRY(ftrace_regs_caller)
18227 pushf /* push flags before compare (in cs location) */
18228 @@ -1235,7 +1474,7 @@ trace:
18229 popl %ecx
18230 popl %eax
18231 jmp ftrace_stub
18232 -END(mcount)
18233 +ENDPROC(mcount)
18234 #endif /* CONFIG_DYNAMIC_FTRACE */
18235 #endif /* CONFIG_FUNCTION_TRACER */
18236
18237 @@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
18238 popl %ecx
18239 popl %eax
18240 ret
18241 -END(ftrace_graph_caller)
18242 +ENDPROC(ftrace_graph_caller)
18243
18244 .globl return_to_handler
18245 return_to_handler:
18246 @@ -1309,15 +1548,18 @@ error_code:
18247 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18248 REG_TO_PTGS %ecx
18249 SET_KERNEL_GS %ecx
18250 - movl $(__USER_DS), %ecx
18251 + movl $(__KERNEL_DS), %ecx
18252 movl %ecx, %ds
18253 movl %ecx, %es
18254 +
18255 + pax_enter_kernel
18256 +
18257 TRACE_IRQS_OFF
18258 movl %esp,%eax # pt_regs pointer
18259 call *%edi
18260 jmp ret_from_exception
18261 CFI_ENDPROC
18262 -END(page_fault)
18263 +ENDPROC(page_fault)
18264
18265 /*
18266 * Debug traps and NMI can happen at the one SYSENTER instruction
18267 @@ -1360,7 +1602,7 @@ debug_stack_correct:
18268 call do_debug
18269 jmp ret_from_exception
18270 CFI_ENDPROC
18271 -END(debug)
18272 +ENDPROC(debug)
18273
18274 /*
18275 * NMI is doubly nasty. It can happen _while_ we're handling
18276 @@ -1398,6 +1640,9 @@ nmi_stack_correct:
18277 xorl %edx,%edx # zero error code
18278 movl %esp,%eax # pt_regs pointer
18279 call do_nmi
18280 +
18281 + pax_exit_kernel
18282 +
18283 jmp restore_all_notrace
18284 CFI_ENDPROC
18285
18286 @@ -1434,12 +1679,15 @@ nmi_espfix_stack:
18287 FIXUP_ESPFIX_STACK # %eax == %esp
18288 xorl %edx,%edx # zero error code
18289 call do_nmi
18290 +
18291 + pax_exit_kernel
18292 +
18293 RESTORE_REGS
18294 lss 12+4(%esp), %esp # back to espfix stack
18295 CFI_ADJUST_CFA_OFFSET -24
18296 jmp irq_return
18297 CFI_ENDPROC
18298 -END(nmi)
18299 +ENDPROC(nmi)
18300
18301 ENTRY(int3)
18302 RING0_INT_FRAME
18303 @@ -1452,14 +1700,14 @@ ENTRY(int3)
18304 call do_int3
18305 jmp ret_from_exception
18306 CFI_ENDPROC
18307 -END(int3)
18308 +ENDPROC(int3)
18309
18310 ENTRY(general_protection)
18311 RING0_EC_FRAME
18312 pushl_cfi $do_general_protection
18313 jmp error_code
18314 CFI_ENDPROC
18315 -END(general_protection)
18316 +ENDPROC(general_protection)
18317
18318 #ifdef CONFIG_KVM_GUEST
18319 ENTRY(async_page_fault)
18320 @@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
18321 pushl_cfi $do_async_page_fault
18322 jmp error_code
18323 CFI_ENDPROC
18324 -END(async_page_fault)
18325 +ENDPROC(async_page_fault)
18326 #endif
18327
18328 /*
18329 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18330 index cb3c591..bc63707 100644
18331 --- a/arch/x86/kernel/entry_64.S
18332 +++ b/arch/x86/kernel/entry_64.S
18333 @@ -59,6 +59,8 @@
18334 #include <asm/context_tracking.h>
18335 #include <asm/smap.h>
18336 #include <linux/err.h>
18337 +#include <asm/pgtable.h>
18338 +#include <asm/alternative-asm.h>
18339
18340 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18341 #include <linux/elf-em.h>
18342 @@ -80,8 +82,9 @@
18343 #ifdef CONFIG_DYNAMIC_FTRACE
18344
18345 ENTRY(function_hook)
18346 + pax_force_retaddr
18347 retq
18348 -END(function_hook)
18349 +ENDPROC(function_hook)
18350
18351 /* skip is set if stack has been adjusted */
18352 .macro ftrace_caller_setup skip=0
18353 @@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
18354 #endif
18355
18356 GLOBAL(ftrace_stub)
18357 + pax_force_retaddr
18358 retq
18359 -END(ftrace_caller)
18360 +ENDPROC(ftrace_caller)
18361
18362 ENTRY(ftrace_regs_caller)
18363 /* Save the current flags before compare (in SS location)*/
18364 @@ -191,7 +195,7 @@ ftrace_restore_flags:
18365 popfq
18366 jmp ftrace_stub
18367
18368 -END(ftrace_regs_caller)
18369 +ENDPROC(ftrace_regs_caller)
18370
18371
18372 #else /* ! CONFIG_DYNAMIC_FTRACE */
18373 @@ -212,6 +216,7 @@ ENTRY(function_hook)
18374 #endif
18375
18376 GLOBAL(ftrace_stub)
18377 + pax_force_retaddr
18378 retq
18379
18380 trace:
18381 @@ -225,12 +230,13 @@ trace:
18382 #endif
18383 subq $MCOUNT_INSN_SIZE, %rdi
18384
18385 + pax_force_fptr ftrace_trace_function
18386 call *ftrace_trace_function
18387
18388 MCOUNT_RESTORE_FRAME
18389
18390 jmp ftrace_stub
18391 -END(function_hook)
18392 +ENDPROC(function_hook)
18393 #endif /* CONFIG_DYNAMIC_FTRACE */
18394 #endif /* CONFIG_FUNCTION_TRACER */
18395
18396 @@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
18397
18398 MCOUNT_RESTORE_FRAME
18399
18400 + pax_force_retaddr
18401 retq
18402 -END(ftrace_graph_caller)
18403 +ENDPROC(ftrace_graph_caller)
18404
18405 GLOBAL(return_to_handler)
18406 subq $24, %rsp
18407 @@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
18408 movq 8(%rsp), %rdx
18409 movq (%rsp), %rax
18410 addq $24, %rsp
18411 + pax_force_fptr %rdi
18412 jmp *%rdi
18413 +ENDPROC(return_to_handler)
18414 #endif
18415
18416
18417 @@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
18418 ENDPROC(native_usergs_sysret64)
18419 #endif /* CONFIG_PARAVIRT */
18420
18421 + .macro ljmpq sel, off
18422 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
18423 + .byte 0x48; ljmp *1234f(%rip)
18424 + .pushsection .rodata
18425 + .align 16
18426 + 1234: .quad \off; .word \sel
18427 + .popsection
18428 +#else
18429 + pushq $\sel
18430 + pushq $\off
18431 + lretq
18432 +#endif
18433 + .endm
18434 +
18435 + .macro pax_enter_kernel
18436 + pax_set_fptr_mask
18437 +#ifdef CONFIG_PAX_KERNEXEC
18438 + call pax_enter_kernel
18439 +#endif
18440 + .endm
18441 +
18442 + .macro pax_exit_kernel
18443 +#ifdef CONFIG_PAX_KERNEXEC
18444 + call pax_exit_kernel
18445 +#endif
18446 + .endm
18447 +
18448 +#ifdef CONFIG_PAX_KERNEXEC
18449 +ENTRY(pax_enter_kernel)
18450 + pushq %rdi
18451 +
18452 +#ifdef CONFIG_PARAVIRT
18453 + PV_SAVE_REGS(CLBR_RDI)
18454 +#endif
18455 +
18456 + GET_CR0_INTO_RDI
18457 + bts $16,%rdi
18458 + jnc 3f
18459 + mov %cs,%edi
18460 + cmp $__KERNEL_CS,%edi
18461 + jnz 2f
18462 +1:
18463 +
18464 +#ifdef CONFIG_PARAVIRT
18465 + PV_RESTORE_REGS(CLBR_RDI)
18466 +#endif
18467 +
18468 + popq %rdi
18469 + pax_force_retaddr
18470 + retq
18471 +
18472 +2: ljmpq __KERNEL_CS,1f
18473 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
18474 +4: SET_RDI_INTO_CR0
18475 + jmp 1b
18476 +ENDPROC(pax_enter_kernel)
18477 +
18478 +ENTRY(pax_exit_kernel)
18479 + pushq %rdi
18480 +
18481 +#ifdef CONFIG_PARAVIRT
18482 + PV_SAVE_REGS(CLBR_RDI)
18483 +#endif
18484 +
18485 + mov %cs,%rdi
18486 + cmp $__KERNEXEC_KERNEL_CS,%edi
18487 + jz 2f
18488 +1:
18489 +
18490 +#ifdef CONFIG_PARAVIRT
18491 + PV_RESTORE_REGS(CLBR_RDI);
18492 +#endif
18493 +
18494 + popq %rdi
18495 + pax_force_retaddr
18496 + retq
18497 +
18498 +2: GET_CR0_INTO_RDI
18499 + btr $16,%rdi
18500 + ljmpq __KERNEL_CS,3f
18501 +3: SET_RDI_INTO_CR0
18502 + jmp 1b
18503 +ENDPROC(pax_exit_kernel)
18504 +#endif
18505 +
18506 + .macro pax_enter_kernel_user
18507 + pax_set_fptr_mask
18508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18509 + call pax_enter_kernel_user
18510 +#endif
18511 + .endm
18512 +
18513 + .macro pax_exit_kernel_user
18514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18515 + call pax_exit_kernel_user
18516 +#endif
18517 +#ifdef CONFIG_PAX_RANDKSTACK
18518 + pushq %rax
18519 + call pax_randomize_kstack
18520 + popq %rax
18521 +#endif
18522 + .endm
18523 +
18524 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18525 +ENTRY(pax_enter_kernel_user)
18526 + pushq %rdi
18527 + pushq %rbx
18528 +
18529 +#ifdef CONFIG_PARAVIRT
18530 + PV_SAVE_REGS(CLBR_RDI)
18531 +#endif
18532 +
18533 + GET_CR3_INTO_RDI
18534 + mov %rdi,%rbx
18535 + add $__START_KERNEL_map,%rbx
18536 + sub phys_base(%rip),%rbx
18537 +
18538 +#ifdef CONFIG_PARAVIRT
18539 + pushq %rdi
18540 + cmpl $0, pv_info+PARAVIRT_enabled
18541 + jz 1f
18542 + i = 0
18543 + .rept USER_PGD_PTRS
18544 + mov i*8(%rbx),%rsi
18545 + mov $0,%sil
18546 + lea i*8(%rbx),%rdi
18547 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
18548 + i = i + 1
18549 + .endr
18550 + jmp 2f
18551 +1:
18552 +#endif
18553 +
18554 + i = 0
18555 + .rept USER_PGD_PTRS
18556 + movb $0,i*8(%rbx)
18557 + i = i + 1
18558 + .endr
18559 +
18560 +#ifdef CONFIG_PARAVIRT
18561 +2: popq %rdi
18562 +#endif
18563 + SET_RDI_INTO_CR3
18564 +
18565 +#ifdef CONFIG_PAX_KERNEXEC
18566 + GET_CR0_INTO_RDI
18567 + bts $16,%rdi
18568 + SET_RDI_INTO_CR0
18569 +#endif
18570 +
18571 +#ifdef CONFIG_PARAVIRT
18572 + PV_RESTORE_REGS(CLBR_RDI)
18573 +#endif
18574 +
18575 + popq %rbx
18576 + popq %rdi
18577 + pax_force_retaddr
18578 + retq
18579 +ENDPROC(pax_enter_kernel_user)
18580 +
18581 +ENTRY(pax_exit_kernel_user)
18582 + push %rdi
18583 +
18584 +#ifdef CONFIG_PARAVIRT
18585 + pushq %rbx
18586 + PV_SAVE_REGS(CLBR_RDI)
18587 +#endif
18588 +
18589 +#ifdef CONFIG_PAX_KERNEXEC
18590 + GET_CR0_INTO_RDI
18591 + btr $16,%rdi
18592 + SET_RDI_INTO_CR0
18593 +#endif
18594 +
18595 + GET_CR3_INTO_RDI
18596 + add $__START_KERNEL_map,%rdi
18597 + sub phys_base(%rip),%rdi
18598 +
18599 +#ifdef CONFIG_PARAVIRT
18600 + cmpl $0, pv_info+PARAVIRT_enabled
18601 + jz 1f
18602 + mov %rdi,%rbx
18603 + i = 0
18604 + .rept USER_PGD_PTRS
18605 + mov i*8(%rbx),%rsi
18606 + mov $0x67,%sil
18607 + lea i*8(%rbx),%rdi
18608 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
18609 + i = i + 1
18610 + .endr
18611 + jmp 2f
18612 +1:
18613 +#endif
18614 +
18615 + i = 0
18616 + .rept USER_PGD_PTRS
18617 + movb $0x67,i*8(%rdi)
18618 + i = i + 1
18619 + .endr
18620 +
18621 +#ifdef CONFIG_PARAVIRT
18622 +2: PV_RESTORE_REGS(CLBR_RDI)
18623 + popq %rbx
18624 +#endif
18625 +
18626 + popq %rdi
18627 + pax_force_retaddr
18628 + retq
18629 +ENDPROC(pax_exit_kernel_user)
18630 +#endif
18631 +
18632 +.macro pax_erase_kstack
18633 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18634 + call pax_erase_kstack
18635 +#endif
18636 +.endm
18637 +
18638 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18639 +ENTRY(pax_erase_kstack)
18640 + pushq %rdi
18641 + pushq %rcx
18642 + pushq %rax
18643 + pushq %r11
18644 +
18645 + GET_THREAD_INFO(%r11)
18646 + mov TI_lowest_stack(%r11), %rdi
18647 + mov $-0xBEEF, %rax
18648 + std
18649 +
18650 +1: mov %edi, %ecx
18651 + and $THREAD_SIZE_asm - 1, %ecx
18652 + shr $3, %ecx
18653 + repne scasq
18654 + jecxz 2f
18655 +
18656 + cmp $2*8, %ecx
18657 + jc 2f
18658 +
18659 + mov $2*8, %ecx
18660 + repe scasq
18661 + jecxz 2f
18662 + jne 1b
18663 +
18664 +2: cld
18665 + mov %esp, %ecx
18666 + sub %edi, %ecx
18667 +
18668 + cmp $THREAD_SIZE_asm, %rcx
18669 + jb 3f
18670 + ud2
18671 +3:
18672 +
18673 + shr $3, %ecx
18674 + rep stosq
18675 +
18676 + mov TI_task_thread_sp0(%r11), %rdi
18677 + sub $256, %rdi
18678 + mov %rdi, TI_lowest_stack(%r11)
18679 +
18680 + popq %r11
18681 + popq %rax
18682 + popq %rcx
18683 + popq %rdi
18684 + pax_force_retaddr
18685 + ret
18686 +ENDPROC(pax_erase_kstack)
18687 +#endif
18688
18689 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
18690 #ifdef CONFIG_TRACE_IRQFLAGS
18691 @@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
18692 .endm
18693
18694 .macro UNFAKE_STACK_FRAME
18695 - addq $8*6, %rsp
18696 - CFI_ADJUST_CFA_OFFSET -(6*8)
18697 + addq $8*6 + ARG_SKIP, %rsp
18698 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
18699 .endm
18700
18701 /*
18702 @@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
18703 movq %rsp, %rsi
18704
18705 leaq -RBP(%rsp),%rdi /* arg1 for handler */
18706 - testl $3, CS-RBP(%rsi)
18707 + testb $3, CS-RBP(%rsi)
18708 je 1f
18709 SWAPGS
18710 /*
18711 @@ -498,9 +774,10 @@ ENTRY(save_rest)
18712 movq_cfi r15, R15+16
18713 movq %r11, 8(%rsp) /* return address */
18714 FIXUP_TOP_OF_STACK %r11, 16
18715 + pax_force_retaddr
18716 ret
18717 CFI_ENDPROC
18718 -END(save_rest)
18719 +ENDPROC(save_rest)
18720
18721 /* save complete stack frame */
18722 .pushsection .kprobes.text, "ax"
18723 @@ -529,9 +806,10 @@ ENTRY(save_paranoid)
18724 js 1f /* negative -> in kernel */
18725 SWAPGS
18726 xorl %ebx,%ebx
18727 -1: ret
18728 +1: pax_force_retaddr_bts
18729 + ret
18730 CFI_ENDPROC
18731 -END(save_paranoid)
18732 +ENDPROC(save_paranoid)
18733 .popsection
18734
18735 /*
18736 @@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
18737
18738 RESTORE_REST
18739
18740 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
18741 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
18742 jz 1f
18743
18744 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
18745 @@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
18746 RESTORE_REST
18747 jmp int_ret_from_sys_call
18748 CFI_ENDPROC
18749 -END(ret_from_fork)
18750 +ENDPROC(ret_from_fork)
18751
18752 /*
18753 * System call entry. Up to 6 arguments in registers are supported.
18754 @@ -608,7 +886,7 @@ END(ret_from_fork)
18755 ENTRY(system_call)
18756 CFI_STARTPROC simple
18757 CFI_SIGNAL_FRAME
18758 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
18759 + CFI_DEF_CFA rsp,0
18760 CFI_REGISTER rip,rcx
18761 /*CFI_REGISTER rflags,r11*/
18762 SWAPGS_UNSAFE_STACK
18763 @@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
18764
18765 movq %rsp,PER_CPU_VAR(old_rsp)
18766 movq PER_CPU_VAR(kernel_stack),%rsp
18767 + SAVE_ARGS 8*6,0
18768 + pax_enter_kernel_user
18769 +
18770 +#ifdef CONFIG_PAX_RANDKSTACK
18771 + pax_erase_kstack
18772 +#endif
18773 +
18774 /*
18775 * No need to follow this irqs off/on section - it's straight
18776 * and short:
18777 */
18778 ENABLE_INTERRUPTS(CLBR_NONE)
18779 - SAVE_ARGS 8,0
18780 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
18781 movq %rcx,RIP-ARGOFFSET(%rsp)
18782 CFI_REL_OFFSET rip,RIP-ARGOFFSET
18783 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
18784 + GET_THREAD_INFO(%rcx)
18785 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
18786 jnz tracesys
18787 system_call_fastpath:
18788 #if __SYSCALL_MASK == ~0
18789 @@ -640,7 +925,7 @@ system_call_fastpath:
18790 cmpl $__NR_syscall_max,%eax
18791 #endif
18792 ja badsys
18793 - movq %r10,%rcx
18794 + movq R10-ARGOFFSET(%rsp),%rcx
18795 call *sys_call_table(,%rax,8) # XXX: rip relative
18796 movq %rax,RAX-ARGOFFSET(%rsp)
18797 /*
18798 @@ -654,10 +939,13 @@ sysret_check:
18799 LOCKDEP_SYS_EXIT
18800 DISABLE_INTERRUPTS(CLBR_NONE)
18801 TRACE_IRQS_OFF
18802 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
18803 + GET_THREAD_INFO(%rcx)
18804 + movl TI_flags(%rcx),%edx
18805 andl %edi,%edx
18806 jnz sysret_careful
18807 CFI_REMEMBER_STATE
18808 + pax_exit_kernel_user
18809 + pax_erase_kstack
18810 /*
18811 * sysretq will re-enable interrupts:
18812 */
18813 @@ -709,14 +997,18 @@ badsys:
18814 * jump back to the normal fast path.
18815 */
18816 auditsys:
18817 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
18818 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
18819 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
18820 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
18821 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
18822 movq %rax,%rsi /* 2nd arg: syscall number */
18823 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
18824 call __audit_syscall_entry
18825 +
18826 + pax_erase_kstack
18827 +
18828 LOAD_ARGS 0 /* reload call-clobbered registers */
18829 + pax_set_fptr_mask
18830 jmp system_call_fastpath
18831
18832 /*
18833 @@ -737,7 +1029,7 @@ sysret_audit:
18834 /* Do syscall tracing */
18835 tracesys:
18836 #ifdef CONFIG_AUDITSYSCALL
18837 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
18838 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
18839 jz auditsys
18840 #endif
18841 SAVE_REST
18842 @@ -745,12 +1037,16 @@ tracesys:
18843 FIXUP_TOP_OF_STACK %rdi
18844 movq %rsp,%rdi
18845 call syscall_trace_enter
18846 +
18847 + pax_erase_kstack
18848 +
18849 /*
18850 * Reload arg registers from stack in case ptrace changed them.
18851 * We don't reload %rax because syscall_trace_enter() returned
18852 * the value it wants us to use in the table lookup.
18853 */
18854 LOAD_ARGS ARGOFFSET, 1
18855 + pax_set_fptr_mask
18856 RESTORE_REST
18857 #if __SYSCALL_MASK == ~0
18858 cmpq $__NR_syscall_max,%rax
18859 @@ -759,7 +1055,7 @@ tracesys:
18860 cmpl $__NR_syscall_max,%eax
18861 #endif
18862 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
18863 - movq %r10,%rcx /* fixup for C */
18864 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
18865 call *sys_call_table(,%rax,8)
18866 movq %rax,RAX-ARGOFFSET(%rsp)
18867 /* Use IRET because user could have changed frame */
18868 @@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
18869 andl %edi,%edx
18870 jnz int_careful
18871 andl $~TS_COMPAT,TI_status(%rcx)
18872 - jmp retint_swapgs
18873 + pax_exit_kernel_user
18874 + pax_erase_kstack
18875 + jmp retint_swapgs_pax
18876
18877 /* Either reschedule or signal or syscall exit tracking needed. */
18878 /* First do a reschedule test. */
18879 @@ -826,7 +1124,7 @@ int_restore_rest:
18880 TRACE_IRQS_OFF
18881 jmp int_with_check
18882 CFI_ENDPROC
18883 -END(system_call)
18884 +ENDPROC(system_call)
18885
18886 /*
18887 * Certain special system calls that need to save a complete full stack frame.
18888 @@ -842,7 +1140,7 @@ ENTRY(\label)
18889 call \func
18890 jmp ptregscall_common
18891 CFI_ENDPROC
18892 -END(\label)
18893 +ENDPROC(\label)
18894 .endm
18895
18896 .macro FORK_LIKE func
18897 @@ -856,9 +1154,10 @@ ENTRY(stub_\func)
18898 DEFAULT_FRAME 0 8 /* offset 8: return address */
18899 call sys_\func
18900 RESTORE_TOP_OF_STACK %r11, 8
18901 + pax_force_retaddr
18902 ret $REST_SKIP /* pop extended registers */
18903 CFI_ENDPROC
18904 -END(stub_\func)
18905 +ENDPROC(stub_\func)
18906 .endm
18907
18908 FORK_LIKE clone
18909 @@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
18910 movq_cfi_restore R12+8, r12
18911 movq_cfi_restore RBP+8, rbp
18912 movq_cfi_restore RBX+8, rbx
18913 + pax_force_retaddr
18914 ret $REST_SKIP /* pop extended registers */
18915 CFI_ENDPROC
18916 -END(ptregscall_common)
18917 +ENDPROC(ptregscall_common)
18918
18919 ENTRY(stub_execve)
18920 CFI_STARTPROC
18921 @@ -891,7 +1191,7 @@ ENTRY(stub_execve)
18922 RESTORE_REST
18923 jmp int_ret_from_sys_call
18924 CFI_ENDPROC
18925 -END(stub_execve)
18926 +ENDPROC(stub_execve)
18927
18928 /*
18929 * sigreturn is special because it needs to restore all registers on return.
18930 @@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
18931 RESTORE_REST
18932 jmp int_ret_from_sys_call
18933 CFI_ENDPROC
18934 -END(stub_rt_sigreturn)
18935 +ENDPROC(stub_rt_sigreturn)
18936
18937 #ifdef CONFIG_X86_X32_ABI
18938 ENTRY(stub_x32_rt_sigreturn)
18939 @@ -975,7 +1275,7 @@ vector=vector+1
18940 2: jmp common_interrupt
18941 .endr
18942 CFI_ENDPROC
18943 -END(irq_entries_start)
18944 +ENDPROC(irq_entries_start)
18945
18946 .previous
18947 END(interrupt)
18948 @@ -995,6 +1295,16 @@ END(interrupt)
18949 subq $ORIG_RAX-RBP, %rsp
18950 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
18951 SAVE_ARGS_IRQ
18952 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18953 + testb $3, CS(%rdi)
18954 + jnz 1f
18955 + pax_enter_kernel
18956 + jmp 2f
18957 +1: pax_enter_kernel_user
18958 +2:
18959 +#else
18960 + pax_enter_kernel
18961 +#endif
18962 call \func
18963 .endm
18964
18965 @@ -1027,7 +1337,7 @@ ret_from_intr:
18966
18967 exit_intr:
18968 GET_THREAD_INFO(%rcx)
18969 - testl $3,CS-ARGOFFSET(%rsp)
18970 + testb $3,CS-ARGOFFSET(%rsp)
18971 je retint_kernel
18972
18973 /* Interrupt came from user space */
18974 @@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
18975 * The iretq could re-enable interrupts:
18976 */
18977 DISABLE_INTERRUPTS(CLBR_ANY)
18978 + pax_exit_kernel_user
18979 +retint_swapgs_pax:
18980 TRACE_IRQS_IRETQ
18981 SWAPGS
18982 jmp restore_args
18983
18984 retint_restore_args: /* return to kernel space */
18985 DISABLE_INTERRUPTS(CLBR_ANY)
18986 + pax_exit_kernel
18987 + pax_force_retaddr (RIP-ARGOFFSET)
18988 /*
18989 * The iretq could re-enable interrupts:
18990 */
18991 @@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
18992 #endif
18993
18994 CFI_ENDPROC
18995 -END(common_interrupt)
18996 +ENDPROC(common_interrupt)
18997 /*
18998 * End of kprobes section
18999 */
19000 @@ -1155,7 +1469,7 @@ ENTRY(\sym)
19001 interrupt \do_sym
19002 jmp ret_from_intr
19003 CFI_ENDPROC
19004 -END(\sym)
19005 +ENDPROC(\sym)
19006 .endm
19007
19008 #ifdef CONFIG_SMP
19009 @@ -1211,12 +1525,22 @@ ENTRY(\sym)
19010 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19011 call error_entry
19012 DEFAULT_FRAME 0
19013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19014 + testb $3, CS(%rsp)
19015 + jnz 1f
19016 + pax_enter_kernel
19017 + jmp 2f
19018 +1: pax_enter_kernel_user
19019 +2:
19020 +#else
19021 + pax_enter_kernel
19022 +#endif
19023 movq %rsp,%rdi /* pt_regs pointer */
19024 xorl %esi,%esi /* no error code */
19025 call \do_sym
19026 jmp error_exit /* %ebx: no swapgs flag */
19027 CFI_ENDPROC
19028 -END(\sym)
19029 +ENDPROC(\sym)
19030 .endm
19031
19032 .macro paranoidzeroentry sym do_sym
19033 @@ -1229,15 +1553,25 @@ ENTRY(\sym)
19034 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19035 call save_paranoid
19036 TRACE_IRQS_OFF
19037 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19038 + testb $3, CS(%rsp)
19039 + jnz 1f
19040 + pax_enter_kernel
19041 + jmp 2f
19042 +1: pax_enter_kernel_user
19043 +2:
19044 +#else
19045 + pax_enter_kernel
19046 +#endif
19047 movq %rsp,%rdi /* pt_regs pointer */
19048 xorl %esi,%esi /* no error code */
19049 call \do_sym
19050 jmp paranoid_exit /* %ebx: no swapgs flag */
19051 CFI_ENDPROC
19052 -END(\sym)
19053 +ENDPROC(\sym)
19054 .endm
19055
19056 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19057 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19058 .macro paranoidzeroentry_ist sym do_sym ist
19059 ENTRY(\sym)
19060 INTR_FRAME
19061 @@ -1248,14 +1582,30 @@ ENTRY(\sym)
19062 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19063 call save_paranoid
19064 TRACE_IRQS_OFF_DEBUG
19065 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19066 + testb $3, CS(%rsp)
19067 + jnz 1f
19068 + pax_enter_kernel
19069 + jmp 2f
19070 +1: pax_enter_kernel_user
19071 +2:
19072 +#else
19073 + pax_enter_kernel
19074 +#endif
19075 movq %rsp,%rdi /* pt_regs pointer */
19076 xorl %esi,%esi /* no error code */
19077 +#ifdef CONFIG_SMP
19078 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19079 + lea init_tss(%r12), %r12
19080 +#else
19081 + lea init_tss(%rip), %r12
19082 +#endif
19083 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19084 call \do_sym
19085 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19086 jmp paranoid_exit /* %ebx: no swapgs flag */
19087 CFI_ENDPROC
19088 -END(\sym)
19089 +ENDPROC(\sym)
19090 .endm
19091
19092 .macro errorentry sym do_sym
19093 @@ -1267,13 +1617,23 @@ ENTRY(\sym)
19094 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19095 call error_entry
19096 DEFAULT_FRAME 0
19097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19098 + testb $3, CS(%rsp)
19099 + jnz 1f
19100 + pax_enter_kernel
19101 + jmp 2f
19102 +1: pax_enter_kernel_user
19103 +2:
19104 +#else
19105 + pax_enter_kernel
19106 +#endif
19107 movq %rsp,%rdi /* pt_regs pointer */
19108 movq ORIG_RAX(%rsp),%rsi /* get error code */
19109 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19110 call \do_sym
19111 jmp error_exit /* %ebx: no swapgs flag */
19112 CFI_ENDPROC
19113 -END(\sym)
19114 +ENDPROC(\sym)
19115 .endm
19116
19117 /* error code is on the stack already */
19118 @@ -1287,13 +1647,23 @@ ENTRY(\sym)
19119 call save_paranoid
19120 DEFAULT_FRAME 0
19121 TRACE_IRQS_OFF
19122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19123 + testb $3, CS(%rsp)
19124 + jnz 1f
19125 + pax_enter_kernel
19126 + jmp 2f
19127 +1: pax_enter_kernel_user
19128 +2:
19129 +#else
19130 + pax_enter_kernel
19131 +#endif
19132 movq %rsp,%rdi /* pt_regs pointer */
19133 movq ORIG_RAX(%rsp),%rsi /* get error code */
19134 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19135 call \do_sym
19136 jmp paranoid_exit /* %ebx: no swapgs flag */
19137 CFI_ENDPROC
19138 -END(\sym)
19139 +ENDPROC(\sym)
19140 .endm
19141
19142 zeroentry divide_error do_divide_error
19143 @@ -1323,9 +1693,10 @@ gs_change:
19144 2: mfence /* workaround */
19145 SWAPGS
19146 popfq_cfi
19147 + pax_force_retaddr
19148 ret
19149 CFI_ENDPROC
19150 -END(native_load_gs_index)
19151 +ENDPROC(native_load_gs_index)
19152
19153 _ASM_EXTABLE(gs_change,bad_gs)
19154 .section .fixup,"ax"
19155 @@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
19156 CFI_DEF_CFA_REGISTER rsp
19157 CFI_ADJUST_CFA_OFFSET -8
19158 decl PER_CPU_VAR(irq_count)
19159 + pax_force_retaddr
19160 ret
19161 CFI_ENDPROC
19162 -END(call_softirq)
19163 +ENDPROC(call_softirq)
19164
19165 #ifdef CONFIG_XEN
19166 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19167 @@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19168 decl PER_CPU_VAR(irq_count)
19169 jmp error_exit
19170 CFI_ENDPROC
19171 -END(xen_do_hypervisor_callback)
19172 +ENDPROC(xen_do_hypervisor_callback)
19173
19174 /*
19175 * Hypervisor uses this for application faults while it executes.
19176 @@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
19177 SAVE_ALL
19178 jmp error_exit
19179 CFI_ENDPROC
19180 -END(xen_failsafe_callback)
19181 +ENDPROC(xen_failsafe_callback)
19182
19183 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
19184 xen_hvm_callback_vector xen_evtchn_do_upcall
19185 @@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
19186 TRACE_IRQS_OFF_DEBUG
19187 testl %ebx,%ebx /* swapgs needed? */
19188 jnz paranoid_restore
19189 - testl $3,CS(%rsp)
19190 + testb $3,CS(%rsp)
19191 jnz paranoid_userspace
19192 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19193 + pax_exit_kernel
19194 + TRACE_IRQS_IRETQ 0
19195 + SWAPGS_UNSAFE_STACK
19196 + RESTORE_ALL 8
19197 + pax_force_retaddr_bts
19198 + jmp irq_return
19199 +#endif
19200 paranoid_swapgs:
19201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19202 + pax_exit_kernel_user
19203 +#else
19204 + pax_exit_kernel
19205 +#endif
19206 TRACE_IRQS_IRETQ 0
19207 SWAPGS_UNSAFE_STACK
19208 RESTORE_ALL 8
19209 jmp irq_return
19210 paranoid_restore:
19211 + pax_exit_kernel
19212 TRACE_IRQS_IRETQ_DEBUG 0
19213 RESTORE_ALL 8
19214 + pax_force_retaddr_bts
19215 jmp irq_return
19216 paranoid_userspace:
19217 GET_THREAD_INFO(%rcx)
19218 @@ -1539,7 +1926,7 @@ paranoid_schedule:
19219 TRACE_IRQS_OFF
19220 jmp paranoid_userspace
19221 CFI_ENDPROC
19222 -END(paranoid_exit)
19223 +ENDPROC(paranoid_exit)
19224
19225 /*
19226 * Exception entry point. This expects an error code/orig_rax on the stack.
19227 @@ -1566,12 +1953,13 @@ ENTRY(error_entry)
19228 movq_cfi r14, R14+8
19229 movq_cfi r15, R15+8
19230 xorl %ebx,%ebx
19231 - testl $3,CS+8(%rsp)
19232 + testb $3,CS+8(%rsp)
19233 je error_kernelspace
19234 error_swapgs:
19235 SWAPGS
19236 error_sti:
19237 TRACE_IRQS_OFF
19238 + pax_force_retaddr_bts
19239 ret
19240
19241 /*
19242 @@ -1598,7 +1986,7 @@ bstep_iret:
19243 movq %rcx,RIP+8(%rsp)
19244 jmp error_swapgs
19245 CFI_ENDPROC
19246 -END(error_entry)
19247 +ENDPROC(error_entry)
19248
19249
19250 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19251 @@ -1618,7 +2006,7 @@ ENTRY(error_exit)
19252 jnz retint_careful
19253 jmp retint_swapgs
19254 CFI_ENDPROC
19255 -END(error_exit)
19256 +ENDPROC(error_exit)
19257
19258 /*
19259 * Test if a given stack is an NMI stack or not.
19260 @@ -1676,9 +2064,11 @@ ENTRY(nmi)
19261 * If %cs was not the kernel segment, then the NMI triggered in user
19262 * space, which means it is definitely not nested.
19263 */
19264 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19265 + je 1f
19266 cmpl $__KERNEL_CS, 16(%rsp)
19267 jne first_nmi
19268 -
19269 +1:
19270 /*
19271 * Check the special variable on the stack to see if NMIs are
19272 * executing.
19273 @@ -1847,6 +2237,17 @@ end_repeat_nmi:
19274 */
19275 movq %cr2, %r12
19276
19277 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19278 + testb $3, CS(%rsp)
19279 + jnz 1f
19280 + pax_enter_kernel
19281 + jmp 2f
19282 +1: pax_enter_kernel_user
19283 +2:
19284 +#else
19285 + pax_enter_kernel
19286 +#endif
19287 +
19288 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
19289 movq %rsp,%rdi
19290 movq $-1,%rsi
19291 @@ -1862,23 +2263,34 @@ end_repeat_nmi:
19292 testl %ebx,%ebx /* swapgs needed? */
19293 jnz nmi_restore
19294 nmi_swapgs:
19295 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19296 + pax_exit_kernel_user
19297 +#else
19298 + pax_exit_kernel
19299 +#endif
19300 SWAPGS_UNSAFE_STACK
19301 + RESTORE_ALL 6*8
19302 + /* Clear the NMI executing stack variable */
19303 + movq $0, 5*8(%rsp)
19304 + jmp irq_return
19305 nmi_restore:
19306 + pax_exit_kernel
19307 /* Pop the extra iret frame at once */
19308 RESTORE_ALL 6*8
19309 + pax_force_retaddr_bts
19310
19311 /* Clear the NMI executing stack variable */
19312 movq $0, 5*8(%rsp)
19313 jmp irq_return
19314 CFI_ENDPROC
19315 -END(nmi)
19316 +ENDPROC(nmi)
19317
19318 ENTRY(ignore_sysret)
19319 CFI_STARTPROC
19320 mov $-ENOSYS,%eax
19321 sysret
19322 CFI_ENDPROC
19323 -END(ignore_sysret)
19324 +ENDPROC(ignore_sysret)
19325
19326 /*
19327 * End of kprobes section
19328 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
19329 index 1d41402..af9a46a 100644
19330 --- a/arch/x86/kernel/ftrace.c
19331 +++ b/arch/x86/kernel/ftrace.c
19332 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
19333 {
19334 unsigned char replaced[MCOUNT_INSN_SIZE];
19335
19336 + ip = ktla_ktva(ip);
19337 +
19338 /*
19339 * Note: Due to modules and __init, code can
19340 * disappear and change, we need to protect against faulting
19341 @@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19342 unsigned char old[MCOUNT_INSN_SIZE], *new;
19343 int ret;
19344
19345 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
19346 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
19347 new = ftrace_call_replace(ip, (unsigned long)func);
19348
19349 /* See comment above by declaration of modifying_ftrace_code */
19350 @@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19351 /* Also update the regs callback function */
19352 if (!ret) {
19353 ip = (unsigned long)(&ftrace_regs_call);
19354 - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
19355 + memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
19356 new = ftrace_call_replace(ip, (unsigned long)func);
19357 ret = ftrace_modify_code(ip, old, new);
19358 }
19359 @@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
19360 * kernel identity mapping to modify code.
19361 */
19362 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
19363 - ip = (unsigned long)__va(__pa(ip));
19364 + ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
19365
19366 return probe_kernel_write((void *)ip, val, size);
19367 }
19368 @@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
19369 unsigned char replaced[MCOUNT_INSN_SIZE];
19370 unsigned char brk = BREAKPOINT_INSTRUCTION;
19371
19372 - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
19373 + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
19374 return -EFAULT;
19375
19376 /* Make sure it is what we expect it to be */
19377 @@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
19378 return ret;
19379
19380 fail_update:
19381 - probe_kernel_write((void *)ip, &old_code[0], 1);
19382 + probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
19383 goto out;
19384 }
19385
19386 @@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
19387 {
19388 unsigned char code[MCOUNT_INSN_SIZE];
19389
19390 + ip = ktla_ktva(ip);
19391 +
19392 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
19393 return -EFAULT;
19394
19395 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
19396 index c18f59d..9c0c9f6 100644
19397 --- a/arch/x86/kernel/head32.c
19398 +++ b/arch/x86/kernel/head32.c
19399 @@ -18,6 +18,7 @@
19400 #include <asm/io_apic.h>
19401 #include <asm/bios_ebda.h>
19402 #include <asm/tlbflush.h>
19403 +#include <asm/boot.h>
19404
19405 static void __init i386_default_early_setup(void)
19406 {
19407 @@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
19408
19409 void __init i386_start_kernel(void)
19410 {
19411 - memblock_reserve(__pa_symbol(&_text),
19412 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
19413 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
19414
19415 #ifdef CONFIG_BLK_DEV_INITRD
19416 /* Reserve INITRD */
19417 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
19418 index c8932c7..d56b622 100644
19419 --- a/arch/x86/kernel/head_32.S
19420 +++ b/arch/x86/kernel/head_32.S
19421 @@ -26,6 +26,12 @@
19422 /* Physical address */
19423 #define pa(X) ((X) - __PAGE_OFFSET)
19424
19425 +#ifdef CONFIG_PAX_KERNEXEC
19426 +#define ta(X) (X)
19427 +#else
19428 +#define ta(X) ((X) - __PAGE_OFFSET)
19429 +#endif
19430 +
19431 /*
19432 * References to members of the new_cpu_data structure.
19433 */
19434 @@ -55,11 +61,7 @@
19435 * and small than max_low_pfn, otherwise will waste some page table entries
19436 */
19437
19438 -#if PTRS_PER_PMD > 1
19439 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
19440 -#else
19441 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
19442 -#endif
19443 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
19444
19445 /* Number of possible pages in the lowmem region */
19446 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
19447 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
19448 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19449
19450 /*
19451 + * Real beginning of normal "text" segment
19452 + */
19453 +ENTRY(stext)
19454 +ENTRY(_stext)
19455 +
19456 +/*
19457 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
19458 * %esi points to the real-mode code as a 32-bit pointer.
19459 * CS and DS must be 4 GB flat segments, but we don't depend on
19460 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19461 * can.
19462 */
19463 __HEAD
19464 +
19465 +#ifdef CONFIG_PAX_KERNEXEC
19466 + jmp startup_32
19467 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
19468 +.fill PAGE_SIZE-5,1,0xcc
19469 +#endif
19470 +
19471 ENTRY(startup_32)
19472 movl pa(stack_start),%ecx
19473
19474 @@ -106,6 +121,59 @@ ENTRY(startup_32)
19475 2:
19476 leal -__PAGE_OFFSET(%ecx),%esp
19477
19478 +#ifdef CONFIG_SMP
19479 + movl $pa(cpu_gdt_table),%edi
19480 + movl $__per_cpu_load,%eax
19481 + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
19482 + rorl $16,%eax
19483 + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
19484 + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
19485 + movl $__per_cpu_end - 1,%eax
19486 + subl $__per_cpu_start,%eax
19487 + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
19488 +#endif
19489 +
19490 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19491 + movl $NR_CPUS,%ecx
19492 + movl $pa(cpu_gdt_table),%edi
19493 +1:
19494 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
19495 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
19496 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
19497 + addl $PAGE_SIZE_asm,%edi
19498 + loop 1b
19499 +#endif
19500 +
19501 +#ifdef CONFIG_PAX_KERNEXEC
19502 + movl $pa(boot_gdt),%edi
19503 + movl $__LOAD_PHYSICAL_ADDR,%eax
19504 + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
19505 + rorl $16,%eax
19506 + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
19507 + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
19508 + rorl $16,%eax
19509 +
19510 + ljmp $(__BOOT_CS),$1f
19511 +1:
19512 +
19513 + movl $NR_CPUS,%ecx
19514 + movl $pa(cpu_gdt_table),%edi
19515 + addl $__PAGE_OFFSET,%eax
19516 +1:
19517 + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
19518 + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
19519 + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
19520 + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
19521 + rorl $16,%eax
19522 + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
19523 + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
19524 + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
19525 + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
19526 + rorl $16,%eax
19527 + addl $PAGE_SIZE_asm,%edi
19528 + loop 1b
19529 +#endif
19530 +
19531 /*
19532 * Clear BSS first so that there are no surprises...
19533 */
19534 @@ -196,8 +264,11 @@ ENTRY(startup_32)
19535 movl %eax, pa(max_pfn_mapped)
19536
19537 /* Do early initialization of the fixmap area */
19538 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
19539 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
19540 +#ifdef CONFIG_COMPAT_VDSO
19541 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
19542 +#else
19543 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
19544 +#endif
19545 #else /* Not PAE */
19546
19547 page_pde_offset = (__PAGE_OFFSET >> 20);
19548 @@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
19549 movl %eax, pa(max_pfn_mapped)
19550
19551 /* Do early initialization of the fixmap area */
19552 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
19553 - movl %eax,pa(initial_page_table+0xffc)
19554 +#ifdef CONFIG_COMPAT_VDSO
19555 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
19556 +#else
19557 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
19558 +#endif
19559 #endif
19560
19561 #ifdef CONFIG_PARAVIRT
19562 @@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
19563 cmpl $num_subarch_entries, %eax
19564 jae bad_subarch
19565
19566 - movl pa(subarch_entries)(,%eax,4), %eax
19567 - subl $__PAGE_OFFSET, %eax
19568 - jmp *%eax
19569 + jmp *pa(subarch_entries)(,%eax,4)
19570
19571 bad_subarch:
19572 WEAK(lguest_entry)
19573 @@ -256,10 +328,10 @@ WEAK(xen_entry)
19574 __INITDATA
19575
19576 subarch_entries:
19577 - .long default_entry /* normal x86/PC */
19578 - .long lguest_entry /* lguest hypervisor */
19579 - .long xen_entry /* Xen hypervisor */
19580 - .long default_entry /* Moorestown MID */
19581 + .long ta(default_entry) /* normal x86/PC */
19582 + .long ta(lguest_entry) /* lguest hypervisor */
19583 + .long ta(xen_entry) /* Xen hypervisor */
19584 + .long ta(default_entry) /* Moorestown MID */
19585 num_subarch_entries = (. - subarch_entries) / 4
19586 .previous
19587 #else
19588 @@ -335,6 +407,7 @@ default_entry:
19589 movl pa(mmu_cr4_features),%eax
19590 movl %eax,%cr4
19591
19592 +#ifdef CONFIG_X86_PAE
19593 testb $X86_CR4_PAE, %al # check if PAE is enabled
19594 jz 6f
19595
19596 @@ -363,6 +436,9 @@ default_entry:
19597 /* Make changes effective */
19598 wrmsr
19599
19600 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
19601 +#endif
19602 +
19603 6:
19604
19605 /*
19606 @@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
19607 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
19608 movl %eax,%ss # after changing gdt.
19609
19610 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
19611 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
19612 movl %eax,%ds
19613 movl %eax,%es
19614
19615 movl $(__KERNEL_PERCPU), %eax
19616 movl %eax,%fs # set this cpu's percpu
19617
19618 +#ifdef CONFIG_CC_STACKPROTECTOR
19619 movl $(__KERNEL_STACK_CANARY),%eax
19620 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
19621 + movl $(__USER_DS),%eax
19622 +#else
19623 + xorl %eax,%eax
19624 +#endif
19625 movl %eax,%gs
19626
19627 xorl %eax,%eax # Clear LDT
19628 @@ -544,8 +626,11 @@ setup_once:
19629 * relocation. Manually set base address in stack canary
19630 * segment descriptor.
19631 */
19632 - movl $gdt_page,%eax
19633 + movl $cpu_gdt_table,%eax
19634 movl $stack_canary,%ecx
19635 +#ifdef CONFIG_SMP
19636 + addl $__per_cpu_load,%ecx
19637 +#endif
19638 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
19639 shrl $16, %ecx
19640 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
19641 @@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
19642 /* This is global to keep gas from relaxing the jumps */
19643 ENTRY(early_idt_handler)
19644 cld
19645 - cmpl $2,%ss:early_recursion_flag
19646 + cmpl $1,%ss:early_recursion_flag
19647 je hlt_loop
19648 incl %ss:early_recursion_flag
19649
19650 @@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
19651 pushl (20+6*4)(%esp) /* trapno */
19652 pushl $fault_msg
19653 call printk
19654 -#endif
19655 call dump_stack
19656 +#endif
19657 hlt_loop:
19658 hlt
19659 jmp hlt_loop
19660 @@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
19661 /* This is the default interrupt "handler" :-) */
19662 ALIGN
19663 ignore_int:
19664 - cld
19665 #ifdef CONFIG_PRINTK
19666 + cmpl $2,%ss:early_recursion_flag
19667 + je hlt_loop
19668 + incl %ss:early_recursion_flag
19669 + cld
19670 pushl %eax
19671 pushl %ecx
19672 pushl %edx
19673 @@ -644,9 +732,6 @@ ignore_int:
19674 movl $(__KERNEL_DS),%eax
19675 movl %eax,%ds
19676 movl %eax,%es
19677 - cmpl $2,early_recursion_flag
19678 - je hlt_loop
19679 - incl early_recursion_flag
19680 pushl 16(%esp)
19681 pushl 24(%esp)
19682 pushl 32(%esp)
19683 @@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
19684 /*
19685 * BSS section
19686 */
19687 -__PAGE_ALIGNED_BSS
19688 - .align PAGE_SIZE
19689 #ifdef CONFIG_X86_PAE
19690 +.section .initial_pg_pmd,"a",@progbits
19691 initial_pg_pmd:
19692 .fill 1024*KPMDS,4,0
19693 #else
19694 +.section .initial_page_table,"a",@progbits
19695 ENTRY(initial_page_table)
19696 .fill 1024,4,0
19697 #endif
19698 +.section .initial_pg_fixmap,"a",@progbits
19699 initial_pg_fixmap:
19700 .fill 1024,4,0
19701 +.section .empty_zero_page,"a",@progbits
19702 ENTRY(empty_zero_page)
19703 .fill 4096,1,0
19704 +.section .swapper_pg_dir,"a",@progbits
19705 ENTRY(swapper_pg_dir)
19706 +#ifdef CONFIG_X86_PAE
19707 + .fill 4,8,0
19708 +#else
19709 .fill 1024,4,0
19710 +#endif
19711 +
19712 +/*
19713 + * The IDT has to be page-aligned to simplify the Pentium
19714 + * F0 0F bug workaround.. We have a special link segment
19715 + * for this.
19716 + */
19717 +.section .idt,"a",@progbits
19718 +ENTRY(idt_table)
19719 + .fill 256,8,0
19720
19721 /*
19722 * This starts the data section.
19723 */
19724 #ifdef CONFIG_X86_PAE
19725 -__PAGE_ALIGNED_DATA
19726 - /* Page-aligned for the benefit of paravirt? */
19727 - .align PAGE_SIZE
19728 +.section .initial_page_table,"a",@progbits
19729 ENTRY(initial_page_table)
19730 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
19731 # if KPMDS == 3
19732 @@ -721,12 +820,20 @@ ENTRY(initial_page_table)
19733 # error "Kernel PMDs should be 1, 2 or 3"
19734 # endif
19735 .align PAGE_SIZE /* needs to be page-sized too */
19736 +
19737 +#ifdef CONFIG_PAX_PER_CPU_PGD
19738 +ENTRY(cpu_pgd)
19739 + .rept NR_CPUS
19740 + .fill 4,8,0
19741 + .endr
19742 +#endif
19743 +
19744 #endif
19745
19746 .data
19747 .balign 4
19748 ENTRY(stack_start)
19749 - .long init_thread_union+THREAD_SIZE
19750 + .long init_thread_union+THREAD_SIZE-8
19751
19752 __INITRODATA
19753 int_msg:
19754 @@ -754,7 +861,7 @@ fault_msg:
19755 * segment size, and 32-bit linear address value:
19756 */
19757
19758 - .data
19759 +.section .rodata,"a",@progbits
19760 .globl boot_gdt_descr
19761 .globl idt_descr
19762
19763 @@ -763,7 +870,7 @@ fault_msg:
19764 .word 0 # 32 bit align gdt_desc.address
19765 boot_gdt_descr:
19766 .word __BOOT_DS+7
19767 - .long boot_gdt - __PAGE_OFFSET
19768 + .long pa(boot_gdt)
19769
19770 .word 0 # 32-bit align idt_desc.address
19771 idt_descr:
19772 @@ -774,7 +881,7 @@ idt_descr:
19773 .word 0 # 32 bit align gdt_desc.address
19774 ENTRY(early_gdt_descr)
19775 .word GDT_ENTRIES*8-1
19776 - .long gdt_page /* Overwritten for secondary CPUs */
19777 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
19778
19779 /*
19780 * The boot_gdt must mirror the equivalent in setup.S and is
19781 @@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
19782 .align L1_CACHE_BYTES
19783 ENTRY(boot_gdt)
19784 .fill GDT_ENTRY_BOOT_CS,8,0
19785 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
19786 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
19787 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
19788 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
19789 +
19790 + .align PAGE_SIZE_asm
19791 +ENTRY(cpu_gdt_table)
19792 + .rept NR_CPUS
19793 + .quad 0x0000000000000000 /* NULL descriptor */
19794 + .quad 0x0000000000000000 /* 0x0b reserved */
19795 + .quad 0x0000000000000000 /* 0x13 reserved */
19796 + .quad 0x0000000000000000 /* 0x1b reserved */
19797 +
19798 +#ifdef CONFIG_PAX_KERNEXEC
19799 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
19800 +#else
19801 + .quad 0x0000000000000000 /* 0x20 unused */
19802 +#endif
19803 +
19804 + .quad 0x0000000000000000 /* 0x28 unused */
19805 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
19806 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
19807 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
19808 + .quad 0x0000000000000000 /* 0x4b reserved */
19809 + .quad 0x0000000000000000 /* 0x53 reserved */
19810 + .quad 0x0000000000000000 /* 0x5b reserved */
19811 +
19812 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
19813 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
19814 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
19815 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
19816 +
19817 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
19818 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
19819 +
19820 + /*
19821 + * Segments used for calling PnP BIOS have byte granularity.
19822 + * The code segments and data segments have fixed 64k limits,
19823 + * the transfer segment sizes are set at run time.
19824 + */
19825 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
19826 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
19827 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
19828 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
19829 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
19830 +
19831 + /*
19832 + * The APM segments have byte granularity and their bases
19833 + * are set at run time. All have 64k limits.
19834 + */
19835 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
19836 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
19837 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
19838 +
19839 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
19840 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
19841 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
19842 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
19843 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
19844 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
19845 +
19846 + /* Be sure this is zeroed to avoid false validations in Xen */
19847 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
19848 + .endr
19849 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
19850 index 980053c..74d3b44 100644
19851 --- a/arch/x86/kernel/head_64.S
19852 +++ b/arch/x86/kernel/head_64.S
19853 @@ -20,6 +20,8 @@
19854 #include <asm/processor-flags.h>
19855 #include <asm/percpu.h>
19856 #include <asm/nops.h>
19857 +#include <asm/cpufeature.h>
19858 +#include <asm/alternative-asm.h>
19859
19860 #ifdef CONFIG_PARAVIRT
19861 #include <asm/asm-offsets.h>
19862 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
19863 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
19864 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
19865 L3_START_KERNEL = pud_index(__START_KERNEL_map)
19866 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
19867 +L3_VMALLOC_START = pud_index(VMALLOC_START)
19868 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
19869 +L3_VMALLOC_END = pud_index(VMALLOC_END)
19870 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
19871 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
19872
19873 .text
19874 __HEAD
19875 @@ -88,35 +96,23 @@ startup_64:
19876 */
19877 addq %rbp, init_level4_pgt + 0(%rip)
19878 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
19879 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
19880 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
19881 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
19882 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
19883
19884 addq %rbp, level3_ident_pgt + 0(%rip)
19885 +#ifndef CONFIG_XEN
19886 + addq %rbp, level3_ident_pgt + 8(%rip)
19887 +#endif
19888
19889 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
19890 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
19891 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
19892 +
19893 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
19894 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
19895
19896 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
19897 -
19898 - /* Add an Identity mapping if I am above 1G */
19899 - leaq _text(%rip), %rdi
19900 - andq $PMD_PAGE_MASK, %rdi
19901 -
19902 - movq %rdi, %rax
19903 - shrq $PUD_SHIFT, %rax
19904 - andq $(PTRS_PER_PUD - 1), %rax
19905 - jz ident_complete
19906 -
19907 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
19908 - leaq level3_ident_pgt(%rip), %rbx
19909 - movq %rdx, 0(%rbx, %rax, 8)
19910 -
19911 - movq %rdi, %rax
19912 - shrq $PMD_SHIFT, %rax
19913 - andq $(PTRS_PER_PMD - 1), %rax
19914 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
19915 - leaq level2_spare_pgt(%rip), %rbx
19916 - movq %rdx, 0(%rbx, %rax, 8)
19917 -ident_complete:
19918 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
19919
19920 /*
19921 * Fixup the kernel text+data virtual addresses. Note that
19922 @@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
19923 * after the boot processor executes this code.
19924 */
19925
19926 - /* Enable PAE mode and PGE */
19927 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
19928 + /* Enable PAE mode and PSE/PGE */
19929 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19930 movq %rax, %cr4
19931
19932 /* Setup early boot stage 4 level pagetables. */
19933 @@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
19934 movl $MSR_EFER, %ecx
19935 rdmsr
19936 btsl $_EFER_SCE, %eax /* Enable System Call */
19937 - btl $20,%edi /* No Execute supported? */
19938 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
19939 jnc 1f
19940 btsl $_EFER_NX, %eax
19941 + leaq init_level4_pgt(%rip), %rdi
19942 +#ifndef CONFIG_EFI
19943 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
19944 +#endif
19945 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
19946 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
19947 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
19948 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
19949 1: wrmsr /* Make changes effective */
19950
19951 /* Setup cr0 */
19952 @@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
19953 * jump. In addition we need to ensure %cs is set so we make this
19954 * a far return.
19955 */
19956 + pax_set_fptr_mask
19957 movq initial_code(%rip),%rax
19958 pushq $0 # fake return address to stop unwinder
19959 pushq $__KERNEL_CS # set correct cs
19960 @@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
19961 bad_address:
19962 jmp bad_address
19963
19964 - .section ".init.text","ax"
19965 + __INIT
19966 .globl early_idt_handlers
19967 early_idt_handlers:
19968 # 104(%rsp) %rflags
19969 @@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
19970 call dump_stack
19971 #ifdef CONFIG_KALLSYMS
19972 leaq early_idt_ripmsg(%rip),%rdi
19973 - movq 40(%rsp),%rsi # %rip again
19974 + movq 88(%rsp),%rsi # %rip again
19975 call __print_symbol
19976 #endif
19977 #endif /* EARLY_PRINTK */
19978 @@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
19979 addq $16,%rsp # drop vector number and error code
19980 decl early_recursion_flag(%rip)
19981 INTERRUPT_RETURN
19982 + .previous
19983
19984 + __INITDATA
19985 .balign 4
19986 early_recursion_flag:
19987 .long 0
19988 + .previous
19989
19990 + .section .rodata,"a",@progbits
19991 #ifdef CONFIG_EARLY_PRINTK
19992 early_idt_msg:
19993 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
19994 @@ -376,6 +385,7 @@ early_idt_ripmsg:
19995 #endif /* CONFIG_EARLY_PRINTK */
19996 .previous
19997
19998 + .section .rodata,"a",@progbits
19999 #define NEXT_PAGE(name) \
20000 .balign PAGE_SIZE; \
20001 ENTRY(name)
20002 @@ -388,7 +398,6 @@ ENTRY(name)
20003 i = i + 1 ; \
20004 .endr
20005
20006 - .data
20007 /*
20008 * This default setting generates an ident mapping at address 0x100000
20009 * and a mapping for the kernel that precisely maps virtual address
20010 @@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20011 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20012 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20013 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20014 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
20015 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20016 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
20017 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20018 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20019 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20020 .org init_level4_pgt + L4_START_KERNEL*8, 0
20021 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20022 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20023
20024 +#ifdef CONFIG_PAX_PER_CPU_PGD
20025 +NEXT_PAGE(cpu_pgd)
20026 + .rept NR_CPUS
20027 + .fill 512,8,0
20028 + .endr
20029 +#endif
20030 +
20031 NEXT_PAGE(level3_ident_pgt)
20032 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20033 +#ifdef CONFIG_XEN
20034 .fill 511,8,0
20035 +#else
20036 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20037 + .fill 510,8,0
20038 +#endif
20039 +
20040 +NEXT_PAGE(level3_vmalloc_start_pgt)
20041 + .fill 512,8,0
20042 +
20043 +NEXT_PAGE(level3_vmalloc_end_pgt)
20044 + .fill 512,8,0
20045 +
20046 +NEXT_PAGE(level3_vmemmap_pgt)
20047 + .fill L3_VMEMMAP_START,8,0
20048 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20049
20050 NEXT_PAGE(level3_kernel_pgt)
20051 .fill L3_START_KERNEL,8,0
20052 @@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20053 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20054 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20055
20056 +NEXT_PAGE(level2_vmemmap_pgt)
20057 + .fill 512,8,0
20058 +
20059 NEXT_PAGE(level2_fixmap_pgt)
20060 - .fill 506,8,0
20061 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20062 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20063 - .fill 5,8,0
20064 + .fill 507,8,0
20065 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20066 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20067 + .fill 4,8,0
20068
20069 -NEXT_PAGE(level1_fixmap_pgt)
20070 +NEXT_PAGE(level1_vsyscall_pgt)
20071 .fill 512,8,0
20072
20073 -NEXT_PAGE(level2_ident_pgt)
20074 - /* Since I easily can, map the first 1G.
20075 + /* Since I easily can, map the first 2G.
20076 * Don't set NX because code runs from these pages.
20077 */
20078 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20079 +NEXT_PAGE(level2_ident_pgt)
20080 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20081
20082 NEXT_PAGE(level2_kernel_pgt)
20083 /*
20084 @@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20085 * If you want to increase this then increase MODULES_VADDR
20086 * too.)
20087 */
20088 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20089 - KERNEL_IMAGE_SIZE/PMD_SIZE)
20090 -
20091 -NEXT_PAGE(level2_spare_pgt)
20092 - .fill 512, 8, 0
20093 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
20094
20095 #undef PMDS
20096 #undef NEXT_PAGE
20097
20098 - .data
20099 + .align PAGE_SIZE
20100 +ENTRY(cpu_gdt_table)
20101 + .rept NR_CPUS
20102 + .quad 0x0000000000000000 /* NULL descriptor */
20103 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20104 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
20105 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
20106 + .quad 0x00cffb000000ffff /* __USER32_CS */
20107 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20108 + .quad 0x00affb000000ffff /* __USER_CS */
20109 +
20110 +#ifdef CONFIG_PAX_KERNEXEC
20111 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20112 +#else
20113 + .quad 0x0 /* unused */
20114 +#endif
20115 +
20116 + .quad 0,0 /* TSS */
20117 + .quad 0,0 /* LDT */
20118 + .quad 0,0,0 /* three TLS descriptors */
20119 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
20120 + /* asm/segment.h:GDT_ENTRIES must match this */
20121 +
20122 + /* zero the remaining page */
20123 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20124 + .endr
20125 +
20126 .align 16
20127 .globl early_gdt_descr
20128 early_gdt_descr:
20129 .word GDT_ENTRIES*8-1
20130 early_gdt_descr_base:
20131 - .quad INIT_PER_CPU_VAR(gdt_page)
20132 + .quad cpu_gdt_table
20133
20134 ENTRY(phys_base)
20135 /* This must match the first entry in level2_kernel_pgt */
20136 .quad 0x0000000000000000
20137
20138 #include "../../x86/xen/xen-head.S"
20139 -
20140 - .section .bss, "aw", @nobits
20141 +
20142 + .section .rodata,"a",@progbits
20143 .align L1_CACHE_BYTES
20144 ENTRY(idt_table)
20145 - .skip IDT_ENTRIES * 16
20146 + .fill 512,8,0
20147
20148 .align L1_CACHE_BYTES
20149 ENTRY(nmi_idt_table)
20150 - .skip IDT_ENTRIES * 16
20151 + .fill 512,8,0
20152
20153 __PAGE_ALIGNED_BSS
20154 .align PAGE_SIZE
20155 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20156 index 9c3bd4a..e1d9b35 100644
20157 --- a/arch/x86/kernel/i386_ksyms_32.c
20158 +++ b/arch/x86/kernel/i386_ksyms_32.c
20159 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20160 EXPORT_SYMBOL(cmpxchg8b_emu);
20161 #endif
20162
20163 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
20164 +
20165 /* Networking helper routines. */
20166 EXPORT_SYMBOL(csum_partial_copy_generic);
20167 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20168 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20169
20170 EXPORT_SYMBOL(__get_user_1);
20171 EXPORT_SYMBOL(__get_user_2);
20172 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
20173
20174 EXPORT_SYMBOL(csum_partial);
20175 EXPORT_SYMBOL(empty_zero_page);
20176 +
20177 +#ifdef CONFIG_PAX_KERNEXEC
20178 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20179 +#endif
20180 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20181 index 245a71d..89d9ce4 100644
20182 --- a/arch/x86/kernel/i387.c
20183 +++ b/arch/x86/kernel/i387.c
20184 @@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20185 static inline bool interrupted_user_mode(void)
20186 {
20187 struct pt_regs *regs = get_irq_regs();
20188 - return regs && user_mode_vm(regs);
20189 + return regs && user_mode(regs);
20190 }
20191
20192 /*
20193 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20194 index 9a5c460..dc4374d 100644
20195 --- a/arch/x86/kernel/i8259.c
20196 +++ b/arch/x86/kernel/i8259.c
20197 @@ -209,7 +209,7 @@ spurious_8259A_irq:
20198 "spurious 8259A interrupt: IRQ%d.\n", irq);
20199 spurious_irq_mask |= irqmask;
20200 }
20201 - atomic_inc(&irq_err_count);
20202 + atomic_inc_unchecked(&irq_err_count);
20203 /*
20204 * Theoretically we do not have to handle this IRQ,
20205 * but in Linux this does not cause problems and is
20206 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20207 index 8c96897..be66bfa 100644
20208 --- a/arch/x86/kernel/ioport.c
20209 +++ b/arch/x86/kernel/ioport.c
20210 @@ -6,6 +6,7 @@
20211 #include <linux/sched.h>
20212 #include <linux/kernel.h>
20213 #include <linux/capability.h>
20214 +#include <linux/security.h>
20215 #include <linux/errno.h>
20216 #include <linux/types.h>
20217 #include <linux/ioport.h>
20218 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20219
20220 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20221 return -EINVAL;
20222 +#ifdef CONFIG_GRKERNSEC_IO
20223 + if (turn_on && grsec_disable_privio) {
20224 + gr_handle_ioperm();
20225 + return -EPERM;
20226 + }
20227 +#endif
20228 if (turn_on && !capable(CAP_SYS_RAWIO))
20229 return -EPERM;
20230
20231 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20232 * because the ->io_bitmap_max value must match the bitmap
20233 * contents:
20234 */
20235 - tss = &per_cpu(init_tss, get_cpu());
20236 + tss = init_tss + get_cpu();
20237
20238 if (turn_on)
20239 bitmap_clear(t->io_bitmap_ptr, from, num);
20240 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
20241 return -EINVAL;
20242 /* Trying to gain more privileges? */
20243 if (level > old) {
20244 +#ifdef CONFIG_GRKERNSEC_IO
20245 + if (grsec_disable_privio) {
20246 + gr_handle_iopl();
20247 + return -EPERM;
20248 + }
20249 +#endif
20250 if (!capable(CAP_SYS_RAWIO))
20251 return -EPERM;
20252 }
20253 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20254 index e4595f1..ee3bfb8 100644
20255 --- a/arch/x86/kernel/irq.c
20256 +++ b/arch/x86/kernel/irq.c
20257 @@ -18,7 +18,7 @@
20258 #include <asm/mce.h>
20259 #include <asm/hw_irq.h>
20260
20261 -atomic_t irq_err_count;
20262 +atomic_unchecked_t irq_err_count;
20263
20264 /* Function pointer for generic interrupt vector handling */
20265 void (*x86_platform_ipi_callback)(void) = NULL;
20266 @@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
20267 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
20268 seq_printf(p, " Machine check polls\n");
20269 #endif
20270 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
20271 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
20272 #if defined(CONFIG_X86_IO_APIC)
20273 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
20274 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
20275 #endif
20276 return 0;
20277 }
20278 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
20279
20280 u64 arch_irq_stat(void)
20281 {
20282 - u64 sum = atomic_read(&irq_err_count);
20283 + u64 sum = atomic_read_unchecked(&irq_err_count);
20284
20285 #ifdef CONFIG_X86_IO_APIC
20286 - sum += atomic_read(&irq_mis_count);
20287 + sum += atomic_read_unchecked(&irq_mis_count);
20288 #endif
20289 return sum;
20290 }
20291 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
20292 index 344faf8..355f60d 100644
20293 --- a/arch/x86/kernel/irq_32.c
20294 +++ b/arch/x86/kernel/irq_32.c
20295 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
20296 __asm__ __volatile__("andl %%esp,%0" :
20297 "=r" (sp) : "0" (THREAD_SIZE - 1));
20298
20299 - return sp < (sizeof(struct thread_info) + STACK_WARN);
20300 + return sp < STACK_WARN;
20301 }
20302
20303 static void print_stack_overflow(void)
20304 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
20305 * per-CPU IRQ handling contexts (thread information and stack)
20306 */
20307 union irq_ctx {
20308 - struct thread_info tinfo;
20309 - u32 stack[THREAD_SIZE/sizeof(u32)];
20310 + unsigned long previous_esp;
20311 + u32 stack[THREAD_SIZE/sizeof(u32)];
20312 } __attribute__((aligned(THREAD_SIZE)));
20313
20314 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
20315 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
20316 static inline int
20317 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20318 {
20319 - union irq_ctx *curctx, *irqctx;
20320 + union irq_ctx *irqctx;
20321 u32 *isp, arg1, arg2;
20322
20323 - curctx = (union irq_ctx *) current_thread_info();
20324 irqctx = __this_cpu_read(hardirq_ctx);
20325
20326 /*
20327 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20328 * handler) we can't do that and just have to keep using the
20329 * current stack (which is the irq stack already after all)
20330 */
20331 - if (unlikely(curctx == irqctx))
20332 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
20333 return 0;
20334
20335 /* build the stack frame on the IRQ stack */
20336 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20337 - irqctx->tinfo.task = curctx->tinfo.task;
20338 - irqctx->tinfo.previous_esp = current_stack_pointer;
20339 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20340 + irqctx->previous_esp = current_stack_pointer;
20341
20342 - /* Copy the preempt_count so that the [soft]irq checks work. */
20343 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
20344 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20345 + __set_fs(MAKE_MM_SEG(0));
20346 +#endif
20347
20348 if (unlikely(overflow))
20349 call_on_stack(print_stack_overflow, isp);
20350 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20351 : "0" (irq), "1" (desc), "2" (isp),
20352 "D" (desc->handle_irq)
20353 : "memory", "cc", "ecx");
20354 +
20355 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20356 + __set_fs(current_thread_info()->addr_limit);
20357 +#endif
20358 +
20359 return 1;
20360 }
20361
20362 @@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20363 */
20364 void __cpuinit irq_ctx_init(int cpu)
20365 {
20366 - union irq_ctx *irqctx;
20367 -
20368 if (per_cpu(hardirq_ctx, cpu))
20369 return;
20370
20371 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20372 - THREADINFO_GFP,
20373 - THREAD_SIZE_ORDER));
20374 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20375 - irqctx->tinfo.cpu = cpu;
20376 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
20377 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20378 -
20379 - per_cpu(hardirq_ctx, cpu) = irqctx;
20380 -
20381 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20382 - THREADINFO_GFP,
20383 - THREAD_SIZE_ORDER));
20384 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20385 - irqctx->tinfo.cpu = cpu;
20386 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20387 -
20388 - per_cpu(softirq_ctx, cpu) = irqctx;
20389 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20390 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20391 +
20392 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20393 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20394
20395 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20396 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20397 @@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
20398 asmlinkage void do_softirq(void)
20399 {
20400 unsigned long flags;
20401 - struct thread_info *curctx;
20402 union irq_ctx *irqctx;
20403 u32 *isp;
20404
20405 @@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
20406 local_irq_save(flags);
20407
20408 if (local_softirq_pending()) {
20409 - curctx = current_thread_info();
20410 irqctx = __this_cpu_read(softirq_ctx);
20411 - irqctx->tinfo.task = curctx->task;
20412 - irqctx->tinfo.previous_esp = current_stack_pointer;
20413 + irqctx->previous_esp = current_stack_pointer;
20414
20415 /* build the stack frame on the softirq stack */
20416 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20417 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20418 +
20419 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20420 + __set_fs(MAKE_MM_SEG(0));
20421 +#endif
20422
20423 call_on_stack(__do_softirq, isp);
20424 +
20425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20426 + __set_fs(current_thread_info()->addr_limit);
20427 +#endif
20428 +
20429 /*
20430 * Shouldn't happen, we returned above if in_interrupt():
20431 */
20432 @@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
20433 if (unlikely(!desc))
20434 return false;
20435
20436 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20437 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20438 if (unlikely(overflow))
20439 print_stack_overflow();
20440 desc->handle_irq(irq, desc);
20441 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
20442 index d04d3ec..ea4b374 100644
20443 --- a/arch/x86/kernel/irq_64.c
20444 +++ b/arch/x86/kernel/irq_64.c
20445 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
20446 u64 estack_top, estack_bottom;
20447 u64 curbase = (u64)task_stack_page(current);
20448
20449 - if (user_mode_vm(regs))
20450 + if (user_mode(regs))
20451 return;
20452
20453 if (regs->sp >= curbase + sizeof(struct thread_info) +
20454 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
20455 index dc1404b..bbc43e7 100644
20456 --- a/arch/x86/kernel/kdebugfs.c
20457 +++ b/arch/x86/kernel/kdebugfs.c
20458 @@ -27,7 +27,7 @@ struct setup_data_node {
20459 u32 len;
20460 };
20461
20462 -static ssize_t setup_data_read(struct file *file, char __user *user_buf,
20463 +static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
20464 size_t count, loff_t *ppos)
20465 {
20466 struct setup_data_node *node = file->private_data;
20467 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
20468 index 836f832..a8bda67 100644
20469 --- a/arch/x86/kernel/kgdb.c
20470 +++ b/arch/x86/kernel/kgdb.c
20471 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
20472 #ifdef CONFIG_X86_32
20473 switch (regno) {
20474 case GDB_SS:
20475 - if (!user_mode_vm(regs))
20476 + if (!user_mode(regs))
20477 *(unsigned long *)mem = __KERNEL_DS;
20478 break;
20479 case GDB_SP:
20480 - if (!user_mode_vm(regs))
20481 + if (!user_mode(regs))
20482 *(unsigned long *)mem = kernel_stack_pointer(regs);
20483 break;
20484 case GDB_GS:
20485 @@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
20486 bp->attr.bp_addr = breakinfo[breakno].addr;
20487 bp->attr.bp_len = breakinfo[breakno].len;
20488 bp->attr.bp_type = breakinfo[breakno].type;
20489 - info->address = breakinfo[breakno].addr;
20490 + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
20491 + info->address = ktla_ktva(breakinfo[breakno].addr);
20492 + else
20493 + info->address = breakinfo[breakno].addr;
20494 info->len = breakinfo[breakno].len;
20495 info->type = breakinfo[breakno].type;
20496 val = arch_install_hw_breakpoint(bp);
20497 @@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
20498 case 'k':
20499 /* clear the trace bit */
20500 linux_regs->flags &= ~X86_EFLAGS_TF;
20501 - atomic_set(&kgdb_cpu_doing_single_step, -1);
20502 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
20503
20504 /* set the trace bit if we're stepping */
20505 if (remcomInBuffer[0] == 's') {
20506 linux_regs->flags |= X86_EFLAGS_TF;
20507 - atomic_set(&kgdb_cpu_doing_single_step,
20508 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
20509 raw_smp_processor_id());
20510 }
20511
20512 @@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
20513
20514 switch (cmd) {
20515 case DIE_DEBUG:
20516 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
20517 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
20518 if (user_mode(regs))
20519 return single_step_cont(regs, args);
20520 break;
20521 @@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
20522 #endif /* CONFIG_DEBUG_RODATA */
20523
20524 bpt->type = BP_BREAKPOINT;
20525 - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
20526 + err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
20527 BREAK_INSTR_SIZE);
20528 if (err)
20529 return err;
20530 - err = probe_kernel_write((char *)bpt->bpt_addr,
20531 + err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
20532 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
20533 #ifdef CONFIG_DEBUG_RODATA
20534 if (!err)
20535 @@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
20536 return -EBUSY;
20537 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
20538 BREAK_INSTR_SIZE);
20539 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
20540 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
20541 if (err)
20542 return err;
20543 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
20544 @@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
20545 if (mutex_is_locked(&text_mutex))
20546 goto knl_write;
20547 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
20548 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
20549 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
20550 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
20551 goto knl_write;
20552 return err;
20553 knl_write:
20554 #endif /* CONFIG_DEBUG_RODATA */
20555 - return probe_kernel_write((char *)bpt->bpt_addr,
20556 + return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
20557 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
20558 }
20559
20560 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
20561 index c5e410e..ed5a7f0 100644
20562 --- a/arch/x86/kernel/kprobes-opt.c
20563 +++ b/arch/x86/kernel/kprobes-opt.c
20564 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
20565 * Verify if the address gap is in 2GB range, because this uses
20566 * a relative jump.
20567 */
20568 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
20569 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
20570 if (abs(rel) > 0x7fffffff)
20571 return -ERANGE;
20572
20573 @@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
20574 op->optinsn.size = ret;
20575
20576 /* Copy arch-dep-instance from template */
20577 - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
20578 + memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
20579
20580 /* Set probe information */
20581 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
20582
20583 /* Set probe function call */
20584 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
20585 + synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
20586
20587 /* Set returning jmp instruction at the tail of out-of-line buffer */
20588 - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
20589 + synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
20590 (u8 *)op->kp.addr + op->optinsn.size);
20591
20592 flush_icache_range((unsigned long) buf,
20593 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
20594 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
20595
20596 /* Backup instructions which will be replaced by jump address */
20597 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
20598 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
20599 RELATIVE_ADDR_SIZE);
20600
20601 insn_buf[0] = RELATIVEJUMP_OPCODE;
20602 @@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
20603 /* This kprobe is really able to run optimized path. */
20604 op = container_of(p, struct optimized_kprobe, kp);
20605 /* Detour through copied instructions */
20606 - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
20607 + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
20608 if (!reenter)
20609 reset_current_kprobe();
20610 preempt_enable_no_resched();
20611 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
20612 index 57916c0..9e0b9d0 100644
20613 --- a/arch/x86/kernel/kprobes.c
20614 +++ b/arch/x86/kernel/kprobes.c
20615 @@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
20616 s32 raddr;
20617 } __attribute__((packed)) *insn;
20618
20619 - insn = (struct __arch_relative_insn *)from;
20620 + insn = (struct __arch_relative_insn *)ktla_ktva(from);
20621 +
20622 + pax_open_kernel();
20623 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
20624 insn->op = op;
20625 + pax_close_kernel();
20626 }
20627
20628 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
20629 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
20630 kprobe_opcode_t opcode;
20631 kprobe_opcode_t *orig_opcodes = opcodes;
20632
20633 - if (search_exception_tables((unsigned long)opcodes))
20634 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
20635 return 0; /* Page fault may occur on this address. */
20636
20637 retry:
20638 @@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
20639 * for the first byte, we can recover the original instruction
20640 * from it and kp->opcode.
20641 */
20642 - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
20643 + memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
20644 buf[0] = kp->opcode;
20645 - return (unsigned long)buf;
20646 + return ktva_ktla((unsigned long)buf);
20647 }
20648
20649 /*
20650 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
20651 /* Another subsystem puts a breakpoint, failed to recover */
20652 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
20653 return 0;
20654 + pax_open_kernel();
20655 memcpy(dest, insn.kaddr, insn.length);
20656 + pax_close_kernel();
20657
20658 #ifdef CONFIG_X86_64
20659 if (insn_rip_relative(&insn)) {
20660 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
20661 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
20662 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
20663 disp = (u8 *) dest + insn_offset_displacement(&insn);
20664 + pax_open_kernel();
20665 *(s32 *) disp = (s32) newdisp;
20666 + pax_close_kernel();
20667 }
20668 #endif
20669 return insn.length;
20670 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
20671 * nor set current_kprobe, because it doesn't use single
20672 * stepping.
20673 */
20674 - regs->ip = (unsigned long)p->ainsn.insn;
20675 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
20676 preempt_enable_no_resched();
20677 return;
20678 }
20679 @@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
20680 regs->flags &= ~X86_EFLAGS_IF;
20681 /* single step inline if the instruction is an int3 */
20682 if (p->opcode == BREAKPOINT_INSTRUCTION)
20683 - regs->ip = (unsigned long)p->addr;
20684 + regs->ip = ktla_ktva((unsigned long)p->addr);
20685 else
20686 - regs->ip = (unsigned long)p->ainsn.insn;
20687 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
20688 }
20689
20690 /*
20691 @@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
20692 setup_singlestep(p, regs, kcb, 0);
20693 return 1;
20694 }
20695 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
20696 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
20697 /*
20698 * The breakpoint instruction was removed right
20699 * after we hit it. Another cpu has removed
20700 @@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
20701 " movq %rax, 152(%rsp)\n"
20702 RESTORE_REGS_STRING
20703 " popfq\n"
20704 +#ifdef KERNEXEC_PLUGIN
20705 + " btsq $63,(%rsp)\n"
20706 +#endif
20707 #else
20708 " pushf\n"
20709 SAVE_REGS_STRING
20710 @@ -788,7 +798,7 @@ static void __kprobes
20711 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
20712 {
20713 unsigned long *tos = stack_addr(regs);
20714 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
20715 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
20716 unsigned long orig_ip = (unsigned long)p->addr;
20717 kprobe_opcode_t *insn = p->ainsn.insn;
20718
20719 @@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
20720 struct die_args *args = data;
20721 int ret = NOTIFY_DONE;
20722
20723 - if (args->regs && user_mode_vm(args->regs))
20724 + if (args->regs && user_mode(args->regs))
20725 return ret;
20726
20727 switch (val) {
20728 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
20729 index 9c2bd8b..bb1131c 100644
20730 --- a/arch/x86/kernel/kvm.c
20731 +++ b/arch/x86/kernel/kvm.c
20732 @@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
20733 return NOTIFY_OK;
20734 }
20735
20736 -static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
20737 +static struct notifier_block kvm_cpu_notifier = {
20738 .notifier_call = kvm_cpu_notify,
20739 };
20740 #endif
20741 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
20742 index ebc9873..1b9724b 100644
20743 --- a/arch/x86/kernel/ldt.c
20744 +++ b/arch/x86/kernel/ldt.c
20745 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
20746 if (reload) {
20747 #ifdef CONFIG_SMP
20748 preempt_disable();
20749 - load_LDT(pc);
20750 + load_LDT_nolock(pc);
20751 if (!cpumask_equal(mm_cpumask(current->mm),
20752 cpumask_of(smp_processor_id())))
20753 smp_call_function(flush_ldt, current->mm, 1);
20754 preempt_enable();
20755 #else
20756 - load_LDT(pc);
20757 + load_LDT_nolock(pc);
20758 #endif
20759 }
20760 if (oldsize) {
20761 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
20762 return err;
20763
20764 for (i = 0; i < old->size; i++)
20765 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
20766 + write_ldt_entry(new->ldt, i, old->ldt + i);
20767 return 0;
20768 }
20769
20770 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
20771 retval = copy_ldt(&mm->context, &old_mm->context);
20772 mutex_unlock(&old_mm->context.lock);
20773 }
20774 +
20775 + if (tsk == current) {
20776 + mm->context.vdso = 0;
20777 +
20778 +#ifdef CONFIG_X86_32
20779 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20780 + mm->context.user_cs_base = 0UL;
20781 + mm->context.user_cs_limit = ~0UL;
20782 +
20783 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
20784 + cpus_clear(mm->context.cpu_user_cs_mask);
20785 +#endif
20786 +
20787 +#endif
20788 +#endif
20789 +
20790 + }
20791 +
20792 return retval;
20793 }
20794
20795 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
20796 }
20797 }
20798
20799 +#ifdef CONFIG_PAX_SEGMEXEC
20800 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
20801 + error = -EINVAL;
20802 + goto out_unlock;
20803 + }
20804 +#endif
20805 +
20806 fill_ldt(&ldt, &ldt_info);
20807 if (oldmode)
20808 ldt.avl = 0;
20809 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
20810 index 5b19e4d..6476a76 100644
20811 --- a/arch/x86/kernel/machine_kexec_32.c
20812 +++ b/arch/x86/kernel/machine_kexec_32.c
20813 @@ -26,7 +26,7 @@
20814 #include <asm/cacheflush.h>
20815 #include <asm/debugreg.h>
20816
20817 -static void set_idt(void *newidt, __u16 limit)
20818 +static void set_idt(struct desc_struct *newidt, __u16 limit)
20819 {
20820 struct desc_ptr curidt;
20821
20822 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
20823 }
20824
20825
20826 -static void set_gdt(void *newgdt, __u16 limit)
20827 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
20828 {
20829 struct desc_ptr curgdt;
20830
20831 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
20832 }
20833
20834 control_page = page_address(image->control_code_page);
20835 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
20836 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
20837
20838 relocate_kernel_ptr = control_page;
20839 page_list[PA_CONTROL_PAGE] = __pa(control_page);
20840 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
20841 index 3a04b22..1d2eb09 100644
20842 --- a/arch/x86/kernel/microcode_core.c
20843 +++ b/arch/x86/kernel/microcode_core.c
20844 @@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
20845 return NOTIFY_OK;
20846 }
20847
20848 -static struct notifier_block __refdata mc_cpu_notifier = {
20849 +static struct notifier_block mc_cpu_notifier = {
20850 .notifier_call = mc_cpu_callback,
20851 };
20852
20853 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
20854 index 3544aed..01ddc1c 100644
20855 --- a/arch/x86/kernel/microcode_intel.c
20856 +++ b/arch/x86/kernel/microcode_intel.c
20857 @@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
20858
20859 static int get_ucode_user(void *to, const void *from, size_t n)
20860 {
20861 - return copy_from_user(to, from, n);
20862 + return copy_from_user(to, (const void __force_user *)from, n);
20863 }
20864
20865 static enum ucode_state
20866 request_microcode_user(int cpu, const void __user *buf, size_t size)
20867 {
20868 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
20869 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
20870 }
20871
20872 static void microcode_fini_cpu(int cpu)
20873 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
20874 index 216a4d7..228255a 100644
20875 --- a/arch/x86/kernel/module.c
20876 +++ b/arch/x86/kernel/module.c
20877 @@ -43,15 +43,60 @@ do { \
20878 } while (0)
20879 #endif
20880
20881 -void *module_alloc(unsigned long size)
20882 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
20883 {
20884 - if (PAGE_ALIGN(size) > MODULES_LEN)
20885 + if (!size || PAGE_ALIGN(size) > MODULES_LEN)
20886 return NULL;
20887 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
20888 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
20889 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
20890 -1, __builtin_return_address(0));
20891 }
20892
20893 +void *module_alloc(unsigned long size)
20894 +{
20895 +
20896 +#ifdef CONFIG_PAX_KERNEXEC
20897 + return __module_alloc(size, PAGE_KERNEL);
20898 +#else
20899 + return __module_alloc(size, PAGE_KERNEL_EXEC);
20900 +#endif
20901 +
20902 +}
20903 +
20904 +#ifdef CONFIG_PAX_KERNEXEC
20905 +#ifdef CONFIG_X86_32
20906 +void *module_alloc_exec(unsigned long size)
20907 +{
20908 + struct vm_struct *area;
20909 +
20910 + if (size == 0)
20911 + return NULL;
20912 +
20913 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
20914 + return area ? area->addr : NULL;
20915 +}
20916 +EXPORT_SYMBOL(module_alloc_exec);
20917 +
20918 +void module_free_exec(struct module *mod, void *module_region)
20919 +{
20920 + vunmap(module_region);
20921 +}
20922 +EXPORT_SYMBOL(module_free_exec);
20923 +#else
20924 +void module_free_exec(struct module *mod, void *module_region)
20925 +{
20926 + module_free(mod, module_region);
20927 +}
20928 +EXPORT_SYMBOL(module_free_exec);
20929 +
20930 +void *module_alloc_exec(unsigned long size)
20931 +{
20932 + return __module_alloc(size, PAGE_KERNEL_RX);
20933 +}
20934 +EXPORT_SYMBOL(module_alloc_exec);
20935 +#endif
20936 +#endif
20937 +
20938 #ifdef CONFIG_X86_32
20939 int apply_relocate(Elf32_Shdr *sechdrs,
20940 const char *strtab,
20941 @@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
20942 unsigned int i;
20943 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
20944 Elf32_Sym *sym;
20945 - uint32_t *location;
20946 + uint32_t *plocation, location;
20947
20948 DEBUGP("Applying relocate section %u to %u\n",
20949 relsec, sechdrs[relsec].sh_info);
20950 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
20951 /* This is where to make the change */
20952 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
20953 - + rel[i].r_offset;
20954 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
20955 + location = (uint32_t)plocation;
20956 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
20957 + plocation = ktla_ktva((void *)plocation);
20958 /* This is the symbol it is referring to. Note that all
20959 undefined symbols have been resolved. */
20960 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
20961 @@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
20962 switch (ELF32_R_TYPE(rel[i].r_info)) {
20963 case R_386_32:
20964 /* We add the value into the location given */
20965 - *location += sym->st_value;
20966 + pax_open_kernel();
20967 + *plocation += sym->st_value;
20968 + pax_close_kernel();
20969 break;
20970 case R_386_PC32:
20971 /* Add the value, subtract its position */
20972 - *location += sym->st_value - (uint32_t)location;
20973 + pax_open_kernel();
20974 + *plocation += sym->st_value - location;
20975 + pax_close_kernel();
20976 break;
20977 default:
20978 pr_err("%s: Unknown relocation: %u\n",
20979 @@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
20980 case R_X86_64_NONE:
20981 break;
20982 case R_X86_64_64:
20983 + pax_open_kernel();
20984 *(u64 *)loc = val;
20985 + pax_close_kernel();
20986 break;
20987 case R_X86_64_32:
20988 + pax_open_kernel();
20989 *(u32 *)loc = val;
20990 + pax_close_kernel();
20991 if (val != *(u32 *)loc)
20992 goto overflow;
20993 break;
20994 case R_X86_64_32S:
20995 + pax_open_kernel();
20996 *(s32 *)loc = val;
20997 + pax_close_kernel();
20998 if ((s64)val != *(s32 *)loc)
20999 goto overflow;
21000 break;
21001 case R_X86_64_PC32:
21002 val -= (u64)loc;
21003 + pax_open_kernel();
21004 *(u32 *)loc = val;
21005 + pax_close_kernel();
21006 +
21007 #if 0
21008 if ((s64)val != *(s32 *)loc)
21009 goto overflow;
21010 diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21011 index 4929502..686c291 100644
21012 --- a/arch/x86/kernel/msr.c
21013 +++ b/arch/x86/kernel/msr.c
21014 @@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21015 return notifier_from_errno(err);
21016 }
21017
21018 -static struct notifier_block __refdata msr_class_cpu_notifier = {
21019 +static struct notifier_block msr_class_cpu_notifier = {
21020 .notifier_call = msr_class_cpu_callback,
21021 };
21022
21023 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21024 index f84f5c5..e27e54b 100644
21025 --- a/arch/x86/kernel/nmi.c
21026 +++ b/arch/x86/kernel/nmi.c
21027 @@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21028 dotraplinkage notrace __kprobes void
21029 do_nmi(struct pt_regs *regs, long error_code)
21030 {
21031 +
21032 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21033 + if (!user_mode(regs)) {
21034 + unsigned long cs = regs->cs & 0xFFFF;
21035 + unsigned long ip = ktva_ktla(regs->ip);
21036 +
21037 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21038 + regs->ip = ip;
21039 + }
21040 +#endif
21041 +
21042 nmi_nesting_preprocess(regs);
21043
21044 nmi_enter();
21045 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21046 index 676b8c7..870ba04 100644
21047 --- a/arch/x86/kernel/paravirt-spinlocks.c
21048 +++ b/arch/x86/kernel/paravirt-spinlocks.c
21049 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21050 arch_spin_lock(lock);
21051 }
21052
21053 -struct pv_lock_ops pv_lock_ops = {
21054 +struct pv_lock_ops pv_lock_ops __read_only = {
21055 #ifdef CONFIG_SMP
21056 .spin_is_locked = __ticket_spin_is_locked,
21057 .spin_is_contended = __ticket_spin_is_contended,
21058 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21059 index 17fff18..5cfa0f4 100644
21060 --- a/arch/x86/kernel/paravirt.c
21061 +++ b/arch/x86/kernel/paravirt.c
21062 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21063 {
21064 return x;
21065 }
21066 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21067 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21068 +#endif
21069
21070 void __init default_banner(void)
21071 {
21072 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21073 if (opfunc == NULL)
21074 /* If there's no function, patch it with a ud2a (BUG) */
21075 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21076 - else if (opfunc == _paravirt_nop)
21077 + else if (opfunc == (void *)_paravirt_nop)
21078 /* If the operation is a nop, then nop the callsite */
21079 ret = paravirt_patch_nop();
21080
21081 /* identity functions just return their single argument */
21082 - else if (opfunc == _paravirt_ident_32)
21083 + else if (opfunc == (void *)_paravirt_ident_32)
21084 ret = paravirt_patch_ident_32(insnbuf, len);
21085 - else if (opfunc == _paravirt_ident_64)
21086 + else if (opfunc == (void *)_paravirt_ident_64)
21087 ret = paravirt_patch_ident_64(insnbuf, len);
21088 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21089 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21090 + ret = paravirt_patch_ident_64(insnbuf, len);
21091 +#endif
21092
21093 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21094 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21095 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21096 if (insn_len > len || start == NULL)
21097 insn_len = len;
21098 else
21099 - memcpy(insnbuf, start, insn_len);
21100 + memcpy(insnbuf, ktla_ktva(start), insn_len);
21101
21102 return insn_len;
21103 }
21104 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
21105 preempt_enable();
21106 }
21107
21108 -struct pv_info pv_info = {
21109 +struct pv_info pv_info __read_only = {
21110 .name = "bare hardware",
21111 .paravirt_enabled = 0,
21112 .kernel_rpl = 0,
21113 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
21114 #endif
21115 };
21116
21117 -struct pv_init_ops pv_init_ops = {
21118 +struct pv_init_ops pv_init_ops __read_only = {
21119 .patch = native_patch,
21120 };
21121
21122 -struct pv_time_ops pv_time_ops = {
21123 +struct pv_time_ops pv_time_ops __read_only = {
21124 .sched_clock = native_sched_clock,
21125 .steal_clock = native_steal_clock,
21126 };
21127
21128 -struct pv_irq_ops pv_irq_ops = {
21129 +struct pv_irq_ops pv_irq_ops __read_only = {
21130 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21131 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21132 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21133 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21134 #endif
21135 };
21136
21137 -struct pv_cpu_ops pv_cpu_ops = {
21138 +struct pv_cpu_ops pv_cpu_ops __read_only = {
21139 .cpuid = native_cpuid,
21140 .get_debugreg = native_get_debugreg,
21141 .set_debugreg = native_set_debugreg,
21142 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21143 .end_context_switch = paravirt_nop,
21144 };
21145
21146 -struct pv_apic_ops pv_apic_ops = {
21147 +struct pv_apic_ops pv_apic_ops __read_only= {
21148 #ifdef CONFIG_X86_LOCAL_APIC
21149 .startup_ipi_hook = paravirt_nop,
21150 #endif
21151 };
21152
21153 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21154 +#ifdef CONFIG_X86_32
21155 +#ifdef CONFIG_X86_PAE
21156 +/* 64-bit pagetable entries */
21157 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21158 +#else
21159 /* 32-bit pagetable entries */
21160 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21161 +#endif
21162 #else
21163 /* 64-bit pagetable entries */
21164 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21165 #endif
21166
21167 -struct pv_mmu_ops pv_mmu_ops = {
21168 +struct pv_mmu_ops pv_mmu_ops __read_only = {
21169
21170 .read_cr2 = native_read_cr2,
21171 .write_cr2 = native_write_cr2,
21172 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21173 .make_pud = PTE_IDENT,
21174
21175 .set_pgd = native_set_pgd,
21176 + .set_pgd_batched = native_set_pgd_batched,
21177 #endif
21178 #endif /* PAGETABLE_LEVELS >= 3 */
21179
21180 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21181 },
21182
21183 .set_fixmap = native_set_fixmap,
21184 +
21185 +#ifdef CONFIG_PAX_KERNEXEC
21186 + .pax_open_kernel = native_pax_open_kernel,
21187 + .pax_close_kernel = native_pax_close_kernel,
21188 +#endif
21189 +
21190 };
21191
21192 EXPORT_SYMBOL_GPL(pv_time_ops);
21193 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
21194 index 35ccf75..7a15747 100644
21195 --- a/arch/x86/kernel/pci-iommu_table.c
21196 +++ b/arch/x86/kernel/pci-iommu_table.c
21197 @@ -2,7 +2,7 @@
21198 #include <asm/iommu_table.h>
21199 #include <linux/string.h>
21200 #include <linux/kallsyms.h>
21201 -
21202 +#include <linux/sched.h>
21203
21204 #define DEBUG 1
21205
21206 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
21207 index 2ed787f..f70c9f6 100644
21208 --- a/arch/x86/kernel/process.c
21209 +++ b/arch/x86/kernel/process.c
21210 @@ -36,7 +36,8 @@
21211 * section. Since TSS's are completely CPU-local, we want them
21212 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
21213 */
21214 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
21215 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
21216 +EXPORT_SYMBOL(init_tss);
21217
21218 #ifdef CONFIG_X86_64
21219 static DEFINE_PER_CPU(unsigned char, is_idle);
21220 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
21221 task_xstate_cachep =
21222 kmem_cache_create("task_xstate", xstate_size,
21223 __alignof__(union thread_xstate),
21224 - SLAB_PANIC | SLAB_NOTRACK, NULL);
21225 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
21226 }
21227
21228 /*
21229 @@ -105,7 +106,7 @@ void exit_thread(void)
21230 unsigned long *bp = t->io_bitmap_ptr;
21231
21232 if (bp) {
21233 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
21234 + struct tss_struct *tss = init_tss + get_cpu();
21235
21236 t->io_bitmap_ptr = NULL;
21237 clear_thread_flag(TIF_IO_BITMAP);
21238 @@ -136,7 +137,7 @@ void show_regs_common(void)
21239 board = dmi_get_system_info(DMI_BOARD_NAME);
21240
21241 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
21242 - current->pid, current->comm, print_tainted(),
21243 + task_pid_nr(current), current->comm, print_tainted(),
21244 init_utsname()->release,
21245 (int)strcspn(init_utsname()->version, " "),
21246 init_utsname()->version,
21247 @@ -149,6 +150,9 @@ void flush_thread(void)
21248 {
21249 struct task_struct *tsk = current;
21250
21251 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
21252 + loadsegment(gs, 0);
21253 +#endif
21254 flush_ptrace_hw_breakpoint(tsk);
21255 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
21256 drop_init_fpu(tsk);
21257 @@ -301,7 +305,7 @@ static void __exit_idle(void)
21258 void exit_idle(void)
21259 {
21260 /* idle loop has pid 0 */
21261 - if (current->pid)
21262 + if (task_pid_nr(current))
21263 return;
21264 __exit_idle();
21265 }
21266 @@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
21267
21268 return ret;
21269 }
21270 -void stop_this_cpu(void *dummy)
21271 +__noreturn void stop_this_cpu(void *dummy)
21272 {
21273 local_irq_disable();
21274 /*
21275 @@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
21276 }
21277 early_param("idle", idle_setup);
21278
21279 -unsigned long arch_align_stack(unsigned long sp)
21280 +#ifdef CONFIG_PAX_RANDKSTACK
21281 +void pax_randomize_kstack(struct pt_regs *regs)
21282 {
21283 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
21284 - sp -= get_random_int() % 8192;
21285 - return sp & ~0xf;
21286 -}
21287 + struct thread_struct *thread = &current->thread;
21288 + unsigned long time;
21289
21290 -unsigned long arch_randomize_brk(struct mm_struct *mm)
21291 -{
21292 - unsigned long range_end = mm->brk + 0x02000000;
21293 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
21294 -}
21295 + if (!randomize_va_space)
21296 + return;
21297 +
21298 + if (v8086_mode(regs))
21299 + return;
21300
21301 + rdtscl(time);
21302 +
21303 + /* P4 seems to return a 0 LSB, ignore it */
21304 +#ifdef CONFIG_MPENTIUM4
21305 + time &= 0x3EUL;
21306 + time <<= 2;
21307 +#elif defined(CONFIG_X86_64)
21308 + time &= 0xFUL;
21309 + time <<= 4;
21310 +#else
21311 + time &= 0x1FUL;
21312 + time <<= 3;
21313 +#endif
21314 +
21315 + thread->sp0 ^= time;
21316 + load_sp0(init_tss + smp_processor_id(), thread);
21317 +
21318 +#ifdef CONFIG_X86_64
21319 + this_cpu_write(kernel_stack, thread->sp0);
21320 +#endif
21321 +}
21322 +#endif
21323 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
21324 index b5a8905..d9cacac 100644
21325 --- a/arch/x86/kernel/process_32.c
21326 +++ b/arch/x86/kernel/process_32.c
21327 @@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
21328 unsigned long thread_saved_pc(struct task_struct *tsk)
21329 {
21330 return ((unsigned long *)tsk->thread.sp)[3];
21331 +//XXX return tsk->thread.eip;
21332 }
21333
21334 void __show_regs(struct pt_regs *regs, int all)
21335 @@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
21336 unsigned long sp;
21337 unsigned short ss, gs;
21338
21339 - if (user_mode_vm(regs)) {
21340 + if (user_mode(regs)) {
21341 sp = regs->sp;
21342 ss = regs->ss & 0xffff;
21343 - gs = get_user_gs(regs);
21344 } else {
21345 sp = kernel_stack_pointer(regs);
21346 savesegment(ss, ss);
21347 - savesegment(gs, gs);
21348 }
21349 + gs = get_user_gs(regs);
21350
21351 show_regs_common();
21352
21353 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
21354 (u16)regs->cs, regs->ip, regs->flags,
21355 - smp_processor_id());
21356 + raw_smp_processor_id());
21357 print_symbol("EIP is at %s\n", regs->ip);
21358
21359 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
21360 @@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
21361 int copy_thread(unsigned long clone_flags, unsigned long sp,
21362 unsigned long arg, struct task_struct *p)
21363 {
21364 - struct pt_regs *childregs = task_pt_regs(p);
21365 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
21366 struct task_struct *tsk;
21367 int err;
21368
21369 p->thread.sp = (unsigned long) childregs;
21370 p->thread.sp0 = (unsigned long) (childregs+1);
21371 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21372
21373 if (unlikely(p->flags & PF_KTHREAD)) {
21374 /* kernel thread */
21375 memset(childregs, 0, sizeof(struct pt_regs));
21376 p->thread.ip = (unsigned long) ret_from_kernel_thread;
21377 - task_user_gs(p) = __KERNEL_STACK_CANARY;
21378 - childregs->ds = __USER_DS;
21379 - childregs->es = __USER_DS;
21380 + savesegment(gs, childregs->gs);
21381 + childregs->ds = __KERNEL_DS;
21382 + childregs->es = __KERNEL_DS;
21383 childregs->fs = __KERNEL_PERCPU;
21384 childregs->bx = sp; /* function */
21385 childregs->bp = arg;
21386 @@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21387 struct thread_struct *prev = &prev_p->thread,
21388 *next = &next_p->thread;
21389 int cpu = smp_processor_id();
21390 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21391 + struct tss_struct *tss = init_tss + cpu;
21392 fpu_switch_t fpu;
21393
21394 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
21395 @@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21396 */
21397 lazy_save_gs(prev->gs);
21398
21399 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21400 + __set_fs(task_thread_info(next_p)->addr_limit);
21401 +#endif
21402 +
21403 /*
21404 * Load the per-thread Thread-Local Storage descriptor.
21405 */
21406 @@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21407 */
21408 arch_end_context_switch(next_p);
21409
21410 + this_cpu_write(current_task, next_p);
21411 + this_cpu_write(current_tinfo, &next_p->tinfo);
21412 +
21413 /*
21414 * Restore %gs if needed (which is common)
21415 */
21416 @@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21417
21418 switch_fpu_finish(next_p, fpu);
21419
21420 - this_cpu_write(current_task, next_p);
21421 -
21422 return prev_p;
21423 }
21424
21425 @@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
21426 } while (count++ < 16);
21427 return 0;
21428 }
21429 -
21430 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
21431 index 6e68a61..955a9a5 100644
21432 --- a/arch/x86/kernel/process_64.c
21433 +++ b/arch/x86/kernel/process_64.c
21434 @@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
21435 struct pt_regs *childregs;
21436 struct task_struct *me = current;
21437
21438 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
21439 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
21440 childregs = task_pt_regs(p);
21441 p->thread.sp = (unsigned long) childregs;
21442 p->thread.usersp = me->thread.usersp;
21443 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21444 set_tsk_thread_flag(p, TIF_FORK);
21445 p->fpu_counter = 0;
21446 p->thread.io_bitmap_ptr = NULL;
21447 @@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21448 struct thread_struct *prev = &prev_p->thread;
21449 struct thread_struct *next = &next_p->thread;
21450 int cpu = smp_processor_id();
21451 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21452 + struct tss_struct *tss = init_tss + cpu;
21453 unsigned fsindex, gsindex;
21454 fpu_switch_t fpu;
21455
21456 @@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21457 prev->usersp = this_cpu_read(old_rsp);
21458 this_cpu_write(old_rsp, next->usersp);
21459 this_cpu_write(current_task, next_p);
21460 + this_cpu_write(current_tinfo, &next_p->tinfo);
21461
21462 - this_cpu_write(kernel_stack,
21463 - (unsigned long)task_stack_page(next_p) +
21464 - THREAD_SIZE - KERNEL_STACK_OFFSET);
21465 + this_cpu_write(kernel_stack, next->sp0);
21466
21467 /*
21468 * Now maybe reload the debug registers and handle I/O bitmaps
21469 @@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
21470 if (!p || p == current || p->state == TASK_RUNNING)
21471 return 0;
21472 stack = (unsigned long)task_stack_page(p);
21473 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
21474 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
21475 return 0;
21476 fp = *(u64 *)(p->thread.sp);
21477 do {
21478 - if (fp < (unsigned long)stack ||
21479 - fp >= (unsigned long)stack+THREAD_SIZE)
21480 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
21481 return 0;
21482 ip = *(u64 *)(fp+8);
21483 if (!in_sched_functions(ip))
21484 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
21485 index b629bbe..0fa615a 100644
21486 --- a/arch/x86/kernel/ptrace.c
21487 +++ b/arch/x86/kernel/ptrace.c
21488 @@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
21489 {
21490 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
21491 unsigned long sp = (unsigned long)&regs->sp;
21492 - struct thread_info *tinfo;
21493
21494 - if (context == (sp & ~(THREAD_SIZE - 1)))
21495 + if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
21496 return sp;
21497
21498 - tinfo = (struct thread_info *)context;
21499 - if (tinfo->previous_esp)
21500 - return tinfo->previous_esp;
21501 + sp = *(unsigned long *)context;
21502 + if (sp)
21503 + return sp;
21504
21505 return (unsigned long)regs;
21506 }
21507 @@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
21508 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
21509 {
21510 int i;
21511 - int dr7 = 0;
21512 + unsigned long dr7 = 0;
21513 struct arch_hw_breakpoint *info;
21514
21515 for (i = 0; i < HBP_NUM; i++) {
21516 @@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
21517 unsigned long addr, unsigned long data)
21518 {
21519 int ret;
21520 - unsigned long __user *datap = (unsigned long __user *)data;
21521 + unsigned long __user *datap = (__force unsigned long __user *)data;
21522
21523 switch (request) {
21524 /* read the word at location addr in the USER area. */
21525 @@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
21526 if ((int) addr < 0)
21527 return -EIO;
21528 ret = do_get_thread_area(child, addr,
21529 - (struct user_desc __user *)data);
21530 + (__force struct user_desc __user *) data);
21531 break;
21532
21533 case PTRACE_SET_THREAD_AREA:
21534 if ((int) addr < 0)
21535 return -EIO;
21536 ret = do_set_thread_area(child, addr,
21537 - (struct user_desc __user *)data, 0);
21538 + (__force struct user_desc __user *) data, 0);
21539 break;
21540 #endif
21541
21542 @@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
21543
21544 #ifdef CONFIG_X86_64
21545
21546 -static struct user_regset x86_64_regsets[] __read_mostly = {
21547 +static user_regset_no_const x86_64_regsets[] __read_only = {
21548 [REGSET_GENERAL] = {
21549 .core_note_type = NT_PRSTATUS,
21550 .n = sizeof(struct user_regs_struct) / sizeof(long),
21551 @@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
21552 #endif /* CONFIG_X86_64 */
21553
21554 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
21555 -static struct user_regset x86_32_regsets[] __read_mostly = {
21556 +static user_regset_no_const x86_32_regsets[] __read_only = {
21557 [REGSET_GENERAL] = {
21558 .core_note_type = NT_PRSTATUS,
21559 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
21560 @@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
21561 */
21562 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
21563
21564 -void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
21565 +void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
21566 {
21567 #ifdef CONFIG_X86_64
21568 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
21569 @@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
21570 memset(info, 0, sizeof(*info));
21571 info->si_signo = SIGTRAP;
21572 info->si_code = si_code;
21573 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
21574 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
21575 }
21576
21577 void user_single_step_siginfo(struct task_struct *tsk,
21578 @@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
21579 # define IS_IA32 0
21580 #endif
21581
21582 +#ifdef CONFIG_GRKERNSEC_SETXID
21583 +extern void gr_delayed_cred_worker(void);
21584 +#endif
21585 +
21586 /*
21587 * We must return the syscall number to actually look up in the table.
21588 * This can be -1L to skip running any syscall at all.
21589 @@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
21590
21591 user_exit();
21592
21593 +#ifdef CONFIG_GRKERNSEC_SETXID
21594 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
21595 + gr_delayed_cred_worker();
21596 +#endif
21597 +
21598 /*
21599 * If we stepped into a sysenter/syscall insn, it trapped in
21600 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
21601 @@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
21602 */
21603 user_exit();
21604
21605 +#ifdef CONFIG_GRKERNSEC_SETXID
21606 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
21607 + gr_delayed_cred_worker();
21608 +#endif
21609 +
21610 audit_syscall_exit(regs);
21611
21612 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
21613 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
21614 index 85c3959..76b89f9 100644
21615 --- a/arch/x86/kernel/pvclock.c
21616 +++ b/arch/x86/kernel/pvclock.c
21617 @@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
21618 return pv_tsc_khz;
21619 }
21620
21621 -static atomic64_t last_value = ATOMIC64_INIT(0);
21622 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
21623
21624 void pvclock_resume(void)
21625 {
21626 - atomic64_set(&last_value, 0);
21627 + atomic64_set_unchecked(&last_value, 0);
21628 }
21629
21630 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
21631 @@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
21632 * updating at the same time, and one of them could be slightly behind,
21633 * making the assumption that last_value always go forward fail to hold.
21634 */
21635 - last = atomic64_read(&last_value);
21636 + last = atomic64_read_unchecked(&last_value);
21637 do {
21638 if (ret < last)
21639 return last;
21640 - last = atomic64_cmpxchg(&last_value, last, ret);
21641 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
21642 } while (unlikely(last != ret));
21643
21644 return ret;
21645 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
21646 index 76fa1e9..abf09ea 100644
21647 --- a/arch/x86/kernel/reboot.c
21648 +++ b/arch/x86/kernel/reboot.c
21649 @@ -36,7 +36,7 @@ void (*pm_power_off)(void);
21650 EXPORT_SYMBOL(pm_power_off);
21651
21652 static const struct desc_ptr no_idt = {};
21653 -static int reboot_mode;
21654 +static unsigned short reboot_mode;
21655 enum reboot_type reboot_type = BOOT_ACPI;
21656 int reboot_force;
21657
21658 @@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
21659
21660 void __noreturn machine_real_restart(unsigned int type)
21661 {
21662 +
21663 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21664 + struct desc_struct *gdt;
21665 +#endif
21666 +
21667 local_irq_disable();
21668
21669 /*
21670 @@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
21671
21672 /* Jump to the identity-mapped low memory code */
21673 #ifdef CONFIG_X86_32
21674 - asm volatile("jmpl *%0" : :
21675 +
21676 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21677 + gdt = get_cpu_gdt_table(smp_processor_id());
21678 + pax_open_kernel();
21679 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21680 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
21681 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
21682 + loadsegment(ds, __KERNEL_DS);
21683 + loadsegment(es, __KERNEL_DS);
21684 + loadsegment(ss, __KERNEL_DS);
21685 +#endif
21686 +#ifdef CONFIG_PAX_KERNEXEC
21687 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
21688 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
21689 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
21690 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
21691 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
21692 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
21693 +#endif
21694 + pax_close_kernel();
21695 +#endif
21696 +
21697 + asm volatile("ljmpl *%0" : :
21698 "rm" (real_mode_header->machine_real_restart_asm),
21699 "a" (type));
21700 #else
21701 @@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
21702 * try to force a triple fault and then cycle between hitting the keyboard
21703 * controller and doing that
21704 */
21705 -static void native_machine_emergency_restart(void)
21706 +static void __noreturn native_machine_emergency_restart(void)
21707 {
21708 int i;
21709 int attempt = 0;
21710 @@ -654,13 +681,13 @@ void native_machine_shutdown(void)
21711 #endif
21712 }
21713
21714 -static void __machine_emergency_restart(int emergency)
21715 +static void __noreturn __machine_emergency_restart(int emergency)
21716 {
21717 reboot_emergency = emergency;
21718 machine_ops.emergency_restart();
21719 }
21720
21721 -static void native_machine_restart(char *__unused)
21722 +static void __noreturn native_machine_restart(char *__unused)
21723 {
21724 pr_notice("machine restart\n");
21725
21726 @@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
21727 __machine_emergency_restart(0);
21728 }
21729
21730 -static void native_machine_halt(void)
21731 +static void __noreturn native_machine_halt(void)
21732 {
21733 /* Stop other cpus and apics */
21734 machine_shutdown();
21735 @@ -679,7 +706,7 @@ static void native_machine_halt(void)
21736 stop_this_cpu(NULL);
21737 }
21738
21739 -static void native_machine_power_off(void)
21740 +static void __noreturn native_machine_power_off(void)
21741 {
21742 if (pm_power_off) {
21743 if (!reboot_force)
21744 @@ -688,9 +715,10 @@ static void native_machine_power_off(void)
21745 }
21746 /* A fallback in case there is no PM info available */
21747 tboot_shutdown(TB_SHUTDOWN_HALT);
21748 + unreachable();
21749 }
21750
21751 -struct machine_ops machine_ops = {
21752 +struct machine_ops machine_ops __read_only = {
21753 .power_off = native_machine_power_off,
21754 .shutdown = native_machine_shutdown,
21755 .emergency_restart = native_machine_emergency_restart,
21756 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
21757 index 7a6f3b3..bed145d7 100644
21758 --- a/arch/x86/kernel/relocate_kernel_64.S
21759 +++ b/arch/x86/kernel/relocate_kernel_64.S
21760 @@ -11,6 +11,7 @@
21761 #include <asm/kexec.h>
21762 #include <asm/processor-flags.h>
21763 #include <asm/pgtable_types.h>
21764 +#include <asm/alternative-asm.h>
21765
21766 /*
21767 * Must be relocatable PIC code callable as a C function
21768 @@ -160,13 +161,14 @@ identity_mapped:
21769 xorq %rbp, %rbp
21770 xorq %r8, %r8
21771 xorq %r9, %r9
21772 - xorq %r10, %r9
21773 + xorq %r10, %r10
21774 xorq %r11, %r11
21775 xorq %r12, %r12
21776 xorq %r13, %r13
21777 xorq %r14, %r14
21778 xorq %r15, %r15
21779
21780 + pax_force_retaddr 0, 1
21781 ret
21782
21783 1:
21784 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
21785 index 8b24289..d37b58b 100644
21786 --- a/arch/x86/kernel/setup.c
21787 +++ b/arch/x86/kernel/setup.c
21788 @@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
21789
21790 switch (data->type) {
21791 case SETUP_E820_EXT:
21792 - parse_e820_ext(data);
21793 + parse_e820_ext((struct setup_data __force_kernel *)data);
21794 break;
21795 case SETUP_DTB:
21796 add_dtb(pa_data);
21797 @@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
21798 * area (640->1Mb) as ram even though it is not.
21799 * take them out.
21800 */
21801 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
21802 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
21803
21804 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
21805 }
21806 @@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
21807
21808 if (!boot_params.hdr.root_flags)
21809 root_mountflags &= ~MS_RDONLY;
21810 - init_mm.start_code = (unsigned long) _text;
21811 - init_mm.end_code = (unsigned long) _etext;
21812 + init_mm.start_code = ktla_ktva((unsigned long) _text);
21813 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
21814 init_mm.end_data = (unsigned long) _edata;
21815 init_mm.brk = _brk_end;
21816
21817 - code_resource.start = virt_to_phys(_text);
21818 - code_resource.end = virt_to_phys(_etext)-1;
21819 - data_resource.start = virt_to_phys(_etext);
21820 + code_resource.start = virt_to_phys(ktla_ktva(_text));
21821 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
21822 + data_resource.start = virt_to_phys(_sdata);
21823 data_resource.end = virt_to_phys(_edata)-1;
21824 bss_resource.start = virt_to_phys(&__bss_start);
21825 bss_resource.end = virt_to_phys(&__bss_stop)-1;
21826 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
21827 index 5cdff03..5810740 100644
21828 --- a/arch/x86/kernel/setup_percpu.c
21829 +++ b/arch/x86/kernel/setup_percpu.c
21830 @@ -21,19 +21,17 @@
21831 #include <asm/cpu.h>
21832 #include <asm/stackprotector.h>
21833
21834 -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
21835 +#ifdef CONFIG_SMP
21836 +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
21837 EXPORT_PER_CPU_SYMBOL(cpu_number);
21838 +#endif
21839
21840 -#ifdef CONFIG_X86_64
21841 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
21842 -#else
21843 -#define BOOT_PERCPU_OFFSET 0
21844 -#endif
21845
21846 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
21847 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
21848
21849 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
21850 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
21851 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
21852 };
21853 EXPORT_SYMBOL(__per_cpu_offset);
21854 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
21855 {
21856 #ifdef CONFIG_X86_32
21857 struct desc_struct gdt;
21858 + unsigned long base = per_cpu_offset(cpu);
21859
21860 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
21861 - 0x2 | DESCTYPE_S, 0x8);
21862 - gdt.s = 1;
21863 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
21864 + 0x83 | DESCTYPE_S, 0xC);
21865 write_gdt_entry(get_cpu_gdt_table(cpu),
21866 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
21867 #endif
21868 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
21869 /* alrighty, percpu areas up and running */
21870 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
21871 for_each_possible_cpu(cpu) {
21872 +#ifdef CONFIG_CC_STACKPROTECTOR
21873 +#ifdef CONFIG_X86_32
21874 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
21875 +#endif
21876 +#endif
21877 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
21878 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
21879 per_cpu(cpu_number, cpu) = cpu;
21880 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
21881 */
21882 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
21883 #endif
21884 +#ifdef CONFIG_CC_STACKPROTECTOR
21885 +#ifdef CONFIG_X86_32
21886 + if (!cpu)
21887 + per_cpu(stack_canary.canary, cpu) = canary;
21888 +#endif
21889 +#endif
21890 /*
21891 * Up to this point, the boot CPU has been using .init.data
21892 * area. Reload any changed state for the boot CPU.
21893 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
21894 index d6bf1f3..3ffce5a 100644
21895 --- a/arch/x86/kernel/signal.c
21896 +++ b/arch/x86/kernel/signal.c
21897 @@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
21898 * Align the stack pointer according to the i386 ABI,
21899 * i.e. so that on function entry ((sp + 4) & 15) == 0.
21900 */
21901 - sp = ((sp + 4) & -16ul) - 4;
21902 + sp = ((sp - 12) & -16ul) - 4;
21903 #else /* !CONFIG_X86_32 */
21904 sp = round_down(sp, 16) - 8;
21905 #endif
21906 @@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
21907 }
21908
21909 if (current->mm->context.vdso)
21910 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
21911 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
21912 else
21913 - restorer = &frame->retcode;
21914 + restorer = (void __user *)&frame->retcode;
21915 if (ka->sa.sa_flags & SA_RESTORER)
21916 restorer = ka->sa.sa_restorer;
21917
21918 @@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
21919 * reasons and because gdb uses it as a signature to notice
21920 * signal handler stack frames.
21921 */
21922 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
21923 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
21924
21925 if (err)
21926 return -EFAULT;
21927 @@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
21928 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
21929
21930 /* Set up to return from userspace. */
21931 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
21932 + if (current->mm->context.vdso)
21933 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
21934 + else
21935 + restorer = (void __user *)&frame->retcode;
21936 if (ka->sa.sa_flags & SA_RESTORER)
21937 restorer = ka->sa.sa_restorer;
21938 put_user_ex(restorer, &frame->pretcode);
21939 @@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
21940 * reasons and because gdb uses it as a signature to notice
21941 * signal handler stack frames.
21942 */
21943 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
21944 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
21945 } put_user_catch(err);
21946
21947 err |= copy_siginfo_to_user(&frame->info, info);
21948 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
21949 index 48d2b7d..90d328a 100644
21950 --- a/arch/x86/kernel/smp.c
21951 +++ b/arch/x86/kernel/smp.c
21952 @@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
21953
21954 __setup("nonmi_ipi", nonmi_ipi_setup);
21955
21956 -struct smp_ops smp_ops = {
21957 +struct smp_ops smp_ops __read_only = {
21958 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
21959 .smp_prepare_cpus = native_smp_prepare_cpus,
21960 .smp_cpus_done = native_smp_cpus_done,
21961 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
21962 index ed0fe38..87fc692 100644
21963 --- a/arch/x86/kernel/smpboot.c
21964 +++ b/arch/x86/kernel/smpboot.c
21965 @@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
21966 idle->thread.sp = (unsigned long) (((struct pt_regs *)
21967 (THREAD_SIZE + task_stack_page(idle))) - 1);
21968 per_cpu(current_task, cpu) = idle;
21969 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21970
21971 #ifdef CONFIG_X86_32
21972 /* Stack for startup_32 can be just as for start_secondary onwards */
21973 @@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
21974 #else
21975 clear_tsk_thread_flag(idle, TIF_FORK);
21976 initial_gs = per_cpu_offset(cpu);
21977 - per_cpu(kernel_stack, cpu) =
21978 - (unsigned long)task_stack_page(idle) -
21979 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21980 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21981 #endif
21982 +
21983 + pax_open_kernel();
21984 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21985 + pax_close_kernel();
21986 +
21987 initial_code = (unsigned long)start_secondary;
21988 stack_start = idle->thread.sp;
21989
21990 @@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
21991 /* the FPU context is blank, nobody can own it */
21992 __cpu_disable_lazy_restore(cpu);
21993
21994 +#ifdef CONFIG_PAX_PER_CPU_PGD
21995 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
21996 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21997 + KERNEL_PGD_PTRS);
21998 +#endif
21999 +
22000 + /* the FPU context is blank, nobody can own it */
22001 + __cpu_disable_lazy_restore(cpu);
22002 +
22003 err = do_boot_cpu(apicid, cpu, tidle);
22004 if (err) {
22005 pr_debug("do_boot_cpu failed %d\n", err);
22006 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22007 index 9b4d51d..5d28b58 100644
22008 --- a/arch/x86/kernel/step.c
22009 +++ b/arch/x86/kernel/step.c
22010 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22011 struct desc_struct *desc;
22012 unsigned long base;
22013
22014 - seg &= ~7UL;
22015 + seg >>= 3;
22016
22017 mutex_lock(&child->mm->context.lock);
22018 - if (unlikely((seg >> 3) >= child->mm->context.size))
22019 + if (unlikely(seg >= child->mm->context.size))
22020 addr = -1L; /* bogus selector, access would fault */
22021 else {
22022 desc = child->mm->context.ldt + seg;
22023 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22024 addr += base;
22025 }
22026 mutex_unlock(&child->mm->context.lock);
22027 - }
22028 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22029 + addr = ktla_ktva(addr);
22030
22031 return addr;
22032 }
22033 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22034 unsigned char opcode[15];
22035 unsigned long addr = convert_ip_to_linear(child, regs);
22036
22037 + if (addr == -EINVAL)
22038 + return 0;
22039 +
22040 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22041 for (i = 0; i < copied; i++) {
22042 switch (opcode[i]) {
22043 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22044 new file mode 100644
22045 index 0000000..26bb1af
22046 --- /dev/null
22047 +++ b/arch/x86/kernel/sys_i386_32.c
22048 @@ -0,0 +1,249 @@
22049 +/*
22050 + * This file contains various random system calls that
22051 + * have a non-standard calling sequence on the Linux/i386
22052 + * platform.
22053 + */
22054 +
22055 +#include <linux/errno.h>
22056 +#include <linux/sched.h>
22057 +#include <linux/mm.h>
22058 +#include <linux/fs.h>
22059 +#include <linux/smp.h>
22060 +#include <linux/sem.h>
22061 +#include <linux/msg.h>
22062 +#include <linux/shm.h>
22063 +#include <linux/stat.h>
22064 +#include <linux/syscalls.h>
22065 +#include <linux/mman.h>
22066 +#include <linux/file.h>
22067 +#include <linux/utsname.h>
22068 +#include <linux/ipc.h>
22069 +
22070 +#include <linux/uaccess.h>
22071 +#include <linux/unistd.h>
22072 +
22073 +#include <asm/syscalls.h>
22074 +
22075 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22076 +{
22077 + unsigned long pax_task_size = TASK_SIZE;
22078 +
22079 +#ifdef CONFIG_PAX_SEGMEXEC
22080 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22081 + pax_task_size = SEGMEXEC_TASK_SIZE;
22082 +#endif
22083 +
22084 + if (len > pax_task_size || addr > pax_task_size - len)
22085 + return -EINVAL;
22086 +
22087 + return 0;
22088 +}
22089 +
22090 +unsigned long
22091 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
22092 + unsigned long len, unsigned long pgoff, unsigned long flags)
22093 +{
22094 + struct mm_struct *mm = current->mm;
22095 + struct vm_area_struct *vma;
22096 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22097 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22098 +
22099 +#ifdef CONFIG_PAX_SEGMEXEC
22100 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22101 + pax_task_size = SEGMEXEC_TASK_SIZE;
22102 +#endif
22103 +
22104 + pax_task_size -= PAGE_SIZE;
22105 +
22106 + if (len > pax_task_size)
22107 + return -ENOMEM;
22108 +
22109 + if (flags & MAP_FIXED)
22110 + return addr;
22111 +
22112 +#ifdef CONFIG_PAX_RANDMMAP
22113 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22114 +#endif
22115 +
22116 + if (addr) {
22117 + addr = PAGE_ALIGN(addr);
22118 + if (pax_task_size - len >= addr) {
22119 + vma = find_vma(mm, addr);
22120 + if (check_heap_stack_gap(vma, addr, len, offset))
22121 + return addr;
22122 + }
22123 + }
22124 + if (len > mm->cached_hole_size) {
22125 + start_addr = addr = mm->free_area_cache;
22126 + } else {
22127 + start_addr = addr = mm->mmap_base;
22128 + mm->cached_hole_size = 0;
22129 + }
22130 +
22131 +#ifdef CONFIG_PAX_PAGEEXEC
22132 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
22133 + start_addr = 0x00110000UL;
22134 +
22135 +#ifdef CONFIG_PAX_RANDMMAP
22136 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22137 + start_addr += mm->delta_mmap & 0x03FFF000UL;
22138 +#endif
22139 +
22140 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
22141 + start_addr = addr = mm->mmap_base;
22142 + else
22143 + addr = start_addr;
22144 + }
22145 +#endif
22146 +
22147 +full_search:
22148 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22149 + /* At this point: (!vma || addr < vma->vm_end). */
22150 + if (pax_task_size - len < addr) {
22151 + /*
22152 + * Start a new search - just in case we missed
22153 + * some holes.
22154 + */
22155 + if (start_addr != mm->mmap_base) {
22156 + start_addr = addr = mm->mmap_base;
22157 + mm->cached_hole_size = 0;
22158 + goto full_search;
22159 + }
22160 + return -ENOMEM;
22161 + }
22162 + if (check_heap_stack_gap(vma, addr, len, offset))
22163 + break;
22164 + if (addr + mm->cached_hole_size < vma->vm_start)
22165 + mm->cached_hole_size = vma->vm_start - addr;
22166 + addr = vma->vm_end;
22167 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
22168 + start_addr = addr = mm->mmap_base;
22169 + mm->cached_hole_size = 0;
22170 + goto full_search;
22171 + }
22172 + }
22173 +
22174 + /*
22175 + * Remember the place where we stopped the search:
22176 + */
22177 + mm->free_area_cache = addr + len;
22178 + return addr;
22179 +}
22180 +
22181 +unsigned long
22182 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22183 + const unsigned long len, const unsigned long pgoff,
22184 + const unsigned long flags)
22185 +{
22186 + struct vm_area_struct *vma;
22187 + struct mm_struct *mm = current->mm;
22188 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
22189 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22190 +
22191 +#ifdef CONFIG_PAX_SEGMEXEC
22192 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22193 + pax_task_size = SEGMEXEC_TASK_SIZE;
22194 +#endif
22195 +
22196 + pax_task_size -= PAGE_SIZE;
22197 +
22198 + /* requested length too big for entire address space */
22199 + if (len > pax_task_size)
22200 + return -ENOMEM;
22201 +
22202 + if (flags & MAP_FIXED)
22203 + return addr;
22204 +
22205 +#ifdef CONFIG_PAX_PAGEEXEC
22206 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
22207 + goto bottomup;
22208 +#endif
22209 +
22210 +#ifdef CONFIG_PAX_RANDMMAP
22211 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22212 +#endif
22213 +
22214 + /* requesting a specific address */
22215 + if (addr) {
22216 + addr = PAGE_ALIGN(addr);
22217 + if (pax_task_size - len >= addr) {
22218 + vma = find_vma(mm, addr);
22219 + if (check_heap_stack_gap(vma, addr, len, offset))
22220 + return addr;
22221 + }
22222 + }
22223 +
22224 + /* check if free_area_cache is useful for us */
22225 + if (len <= mm->cached_hole_size) {
22226 + mm->cached_hole_size = 0;
22227 + mm->free_area_cache = mm->mmap_base;
22228 + }
22229 +
22230 + /* either no address requested or can't fit in requested address hole */
22231 + addr = mm->free_area_cache;
22232 +
22233 + /* make sure it can fit in the remaining address space */
22234 + if (addr > len) {
22235 + vma = find_vma(mm, addr-len);
22236 + if (check_heap_stack_gap(vma, addr - len, len, offset))
22237 + /* remember the address as a hint for next time */
22238 + return (mm->free_area_cache = addr-len);
22239 + }
22240 +
22241 + if (mm->mmap_base < len)
22242 + goto bottomup;
22243 +
22244 + addr = mm->mmap_base-len;
22245 +
22246 + do {
22247 + /*
22248 + * Lookup failure means no vma is above this address,
22249 + * else if new region fits below vma->vm_start,
22250 + * return with success:
22251 + */
22252 + vma = find_vma(mm, addr);
22253 + if (check_heap_stack_gap(vma, addr, len, offset))
22254 + /* remember the address as a hint for next time */
22255 + return (mm->free_area_cache = addr);
22256 +
22257 + /* remember the largest hole we saw so far */
22258 + if (addr + mm->cached_hole_size < vma->vm_start)
22259 + mm->cached_hole_size = vma->vm_start - addr;
22260 +
22261 + /* try just below the current vma->vm_start */
22262 + addr = skip_heap_stack_gap(vma, len, offset);
22263 + } while (!IS_ERR_VALUE(addr));
22264 +
22265 +bottomup:
22266 + /*
22267 + * A failed mmap() very likely causes application failure,
22268 + * so fall back to the bottom-up function here. This scenario
22269 + * can happen with large stack limits and large mmap()
22270 + * allocations.
22271 + */
22272 +
22273 +#ifdef CONFIG_PAX_SEGMEXEC
22274 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22275 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22276 + else
22277 +#endif
22278 +
22279 + mm->mmap_base = TASK_UNMAPPED_BASE;
22280 +
22281 +#ifdef CONFIG_PAX_RANDMMAP
22282 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22283 + mm->mmap_base += mm->delta_mmap;
22284 +#endif
22285 +
22286 + mm->free_area_cache = mm->mmap_base;
22287 + mm->cached_hole_size = ~0UL;
22288 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
22289 + /*
22290 + * Restore the topdown base:
22291 + */
22292 + mm->mmap_base = base;
22293 + mm->free_area_cache = base;
22294 + mm->cached_hole_size = ~0UL;
22295 +
22296 + return addr;
22297 +}
22298 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
22299 index 97ef74b..57a1882 100644
22300 --- a/arch/x86/kernel/sys_x86_64.c
22301 +++ b/arch/x86/kernel/sys_x86_64.c
22302 @@ -81,8 +81,8 @@ out:
22303 return error;
22304 }
22305
22306 -static void find_start_end(unsigned long flags, unsigned long *begin,
22307 - unsigned long *end)
22308 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
22309 + unsigned long *begin, unsigned long *end)
22310 {
22311 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
22312 unsigned long new_begin;
22313 @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
22314 *begin = new_begin;
22315 }
22316 } else {
22317 - *begin = TASK_UNMAPPED_BASE;
22318 + *begin = mm->mmap_base;
22319 *end = TASK_SIZE;
22320 }
22321 }
22322 @@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
22323 struct vm_area_struct *vma;
22324 struct vm_unmapped_area_info info;
22325 unsigned long begin, end;
22326 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22327
22328 if (flags & MAP_FIXED)
22329 return addr;
22330
22331 - find_start_end(flags, &begin, &end);
22332 + find_start_end(mm, flags, &begin, &end);
22333
22334 if (len > end)
22335 return -ENOMEM;
22336
22337 +#ifdef CONFIG_PAX_RANDMMAP
22338 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22339 +#endif
22340 +
22341 if (addr) {
22342 addr = PAGE_ALIGN(addr);
22343 vma = find_vma(mm, addr);
22344 - if (end - len >= addr &&
22345 - (!vma || addr + len <= vma->vm_start))
22346 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
22347 return addr;
22348 }
22349
22350 @@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22351 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
22352 goto bottomup;
22353
22354 +#ifdef CONFIG_PAX_RANDMMAP
22355 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22356 +#endif
22357 +
22358 /* requesting a specific address */
22359 if (addr) {
22360 addr = PAGE_ALIGN(addr);
22361 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
22362 index f84fe00..f41d9f1 100644
22363 --- a/arch/x86/kernel/tboot.c
22364 +++ b/arch/x86/kernel/tboot.c
22365 @@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
22366
22367 void tboot_shutdown(u32 shutdown_type)
22368 {
22369 - void (*shutdown)(void);
22370 + void (* __noreturn shutdown)(void);
22371
22372 if (!tboot_enabled())
22373 return;
22374 @@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
22375
22376 switch_to_tboot_pt();
22377
22378 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
22379 + shutdown = (void *)tboot->shutdown_entry;
22380 shutdown();
22381
22382 /* should not reach here */
22383 @@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
22384 return 0;
22385 }
22386
22387 -static atomic_t ap_wfs_count;
22388 +static atomic_unchecked_t ap_wfs_count;
22389
22390 static int tboot_wait_for_aps(int num_aps)
22391 {
22392 @@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
22393 {
22394 switch (action) {
22395 case CPU_DYING:
22396 - atomic_inc(&ap_wfs_count);
22397 + atomic_inc_unchecked(&ap_wfs_count);
22398 if (num_online_cpus() == 1)
22399 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
22400 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
22401 return NOTIFY_BAD;
22402 break;
22403 }
22404 return NOTIFY_OK;
22405 }
22406
22407 -static struct notifier_block tboot_cpu_notifier __cpuinitdata =
22408 +static struct notifier_block tboot_cpu_notifier =
22409 {
22410 .notifier_call = tboot_cpu_callback,
22411 };
22412 @@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
22413
22414 tboot_create_trampoline();
22415
22416 - atomic_set(&ap_wfs_count, 0);
22417 + atomic_set_unchecked(&ap_wfs_count, 0);
22418 register_hotcpu_notifier(&tboot_cpu_notifier);
22419
22420 acpi_os_set_prepare_sleep(&tboot_sleep);
22421 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
22422 index 24d3c91..d06b473 100644
22423 --- a/arch/x86/kernel/time.c
22424 +++ b/arch/x86/kernel/time.c
22425 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
22426 {
22427 unsigned long pc = instruction_pointer(regs);
22428
22429 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
22430 + if (!user_mode(regs) && in_lock_functions(pc)) {
22431 #ifdef CONFIG_FRAME_POINTER
22432 - return *(unsigned long *)(regs->bp + sizeof(long));
22433 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
22434 #else
22435 unsigned long *sp =
22436 (unsigned long *)kernel_stack_pointer(regs);
22437 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
22438 * or above a saved flags. Eflags has bits 22-31 zero,
22439 * kernel addresses don't.
22440 */
22441 +
22442 +#ifdef CONFIG_PAX_KERNEXEC
22443 + return ktla_ktva(sp[0]);
22444 +#else
22445 if (sp[0] >> 22)
22446 return sp[0];
22447 if (sp[1] >> 22)
22448 return sp[1];
22449 #endif
22450 +
22451 +#endif
22452 }
22453 return pc;
22454 }
22455 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
22456 index 9d9d2f9..cad418a 100644
22457 --- a/arch/x86/kernel/tls.c
22458 +++ b/arch/x86/kernel/tls.c
22459 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
22460 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
22461 return -EINVAL;
22462
22463 +#ifdef CONFIG_PAX_SEGMEXEC
22464 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
22465 + return -EINVAL;
22466 +#endif
22467 +
22468 set_tls_desc(p, idx, &info, 1);
22469
22470 return 0;
22471 @@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
22472
22473 if (kbuf)
22474 info = kbuf;
22475 - else if (__copy_from_user(infobuf, ubuf, count))
22476 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
22477 return -EFAULT;
22478 else
22479 info = infobuf;
22480 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
22481 index ecffca1..95c4d13 100644
22482 --- a/arch/x86/kernel/traps.c
22483 +++ b/arch/x86/kernel/traps.c
22484 @@ -68,12 +68,6 @@
22485 #include <asm/setup.h>
22486
22487 asmlinkage int system_call(void);
22488 -
22489 -/*
22490 - * The IDT has to be page-aligned to simplify the Pentium
22491 - * F0 0F bug workaround.
22492 - */
22493 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
22494 #endif
22495
22496 DECLARE_BITMAP(used_vectors, NR_VECTORS);
22497 @@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
22498 }
22499
22500 static int __kprobes
22501 -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22502 +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
22503 struct pt_regs *regs, long error_code)
22504 {
22505 #ifdef CONFIG_X86_32
22506 - if (regs->flags & X86_VM_MASK) {
22507 + if (v8086_mode(regs)) {
22508 /*
22509 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
22510 * On nmi (interrupt 2), do_trap should not be called.
22511 @@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22512 return -1;
22513 }
22514 #endif
22515 - if (!user_mode(regs)) {
22516 + if (!user_mode_novm(regs)) {
22517 if (!fixup_exception(regs)) {
22518 tsk->thread.error_code = error_code;
22519 tsk->thread.trap_nr = trapnr;
22520 +
22521 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22522 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
22523 + str = "PAX: suspicious stack segment fault";
22524 +#endif
22525 +
22526 die(str, regs, error_code);
22527 }
22528 +
22529 +#ifdef CONFIG_PAX_REFCOUNT
22530 + if (trapnr == 4)
22531 + pax_report_refcount_overflow(regs);
22532 +#endif
22533 +
22534 return 0;
22535 }
22536
22537 @@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22538 }
22539
22540 static void __kprobes
22541 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
22542 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
22543 long error_code, siginfo_t *info)
22544 {
22545 struct task_struct *tsk = current;
22546 @@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
22547 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
22548 printk_ratelimit()) {
22549 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
22550 - tsk->comm, tsk->pid, str,
22551 + tsk->comm, task_pid_nr(tsk), str,
22552 regs->ip, regs->sp, error_code);
22553 print_vma_addr(" in ", regs->ip);
22554 pr_cont("\n");
22555 @@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
22556 conditional_sti(regs);
22557
22558 #ifdef CONFIG_X86_32
22559 - if (regs->flags & X86_VM_MASK) {
22560 + if (v8086_mode(regs)) {
22561 local_irq_enable();
22562 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
22563 goto exit;
22564 @@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
22565 #endif
22566
22567 tsk = current;
22568 - if (!user_mode(regs)) {
22569 + if (!user_mode_novm(regs)) {
22570 if (fixup_exception(regs))
22571 goto exit;
22572
22573 tsk->thread.error_code = error_code;
22574 tsk->thread.trap_nr = X86_TRAP_GP;
22575 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
22576 - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
22577 + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
22578 +
22579 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22580 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
22581 + die("PAX: suspicious general protection fault", regs, error_code);
22582 + else
22583 +#endif
22584 +
22585 die("general protection fault", regs, error_code);
22586 + }
22587 goto exit;
22588 }
22589
22590 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22591 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
22592 + struct mm_struct *mm = tsk->mm;
22593 + unsigned long limit;
22594 +
22595 + down_write(&mm->mmap_sem);
22596 + limit = mm->context.user_cs_limit;
22597 + if (limit < TASK_SIZE) {
22598 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
22599 + up_write(&mm->mmap_sem);
22600 + return;
22601 + }
22602 + up_write(&mm->mmap_sem);
22603 + }
22604 +#endif
22605 +
22606 tsk->thread.error_code = error_code;
22607 tsk->thread.trap_nr = X86_TRAP_GP;
22608
22609 @@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
22610 /* It's safe to allow irq's after DR6 has been saved */
22611 preempt_conditional_sti(regs);
22612
22613 - if (regs->flags & X86_VM_MASK) {
22614 + if (v8086_mode(regs)) {
22615 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
22616 X86_TRAP_DB);
22617 preempt_conditional_cli(regs);
22618 @@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
22619 * We already checked v86 mode above, so we can check for kernel mode
22620 * by just checking the CPL of CS.
22621 */
22622 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
22623 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
22624 tsk->thread.debugreg6 &= ~DR_STEP;
22625 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
22626 regs->flags &= ~X86_EFLAGS_TF;
22627 @@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
22628 return;
22629 conditional_sti(regs);
22630
22631 - if (!user_mode_vm(regs))
22632 + if (!user_mode(regs))
22633 {
22634 if (!fixup_exception(regs)) {
22635 task->thread.error_code = error_code;
22636 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
22637 index c71025b..b117501 100644
22638 --- a/arch/x86/kernel/uprobes.c
22639 +++ b/arch/x86/kernel/uprobes.c
22640 @@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
22641 int ret = NOTIFY_DONE;
22642
22643 /* We are only interested in userspace traps */
22644 - if (regs && !user_mode_vm(regs))
22645 + if (regs && !user_mode(regs))
22646 return NOTIFY_DONE;
22647
22648 switch (val) {
22649 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
22650 index b9242ba..50c5edd 100644
22651 --- a/arch/x86/kernel/verify_cpu.S
22652 +++ b/arch/x86/kernel/verify_cpu.S
22653 @@ -20,6 +20,7 @@
22654 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
22655 * arch/x86/kernel/trampoline_64.S: secondary processor verification
22656 * arch/x86/kernel/head_32.S: processor startup
22657 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
22658 *
22659 * verify_cpu, returns the status of longmode and SSE in register %eax.
22660 * 0: Success 1: Failure
22661 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
22662 index 1dfe69c..a3df6f6 100644
22663 --- a/arch/x86/kernel/vm86_32.c
22664 +++ b/arch/x86/kernel/vm86_32.c
22665 @@ -43,6 +43,7 @@
22666 #include <linux/ptrace.h>
22667 #include <linux/audit.h>
22668 #include <linux/stddef.h>
22669 +#include <linux/grsecurity.h>
22670
22671 #include <asm/uaccess.h>
22672 #include <asm/io.h>
22673 @@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
22674 do_exit(SIGSEGV);
22675 }
22676
22677 - tss = &per_cpu(init_tss, get_cpu());
22678 + tss = init_tss + get_cpu();
22679 current->thread.sp0 = current->thread.saved_sp0;
22680 current->thread.sysenter_cs = __KERNEL_CS;
22681 load_sp0(tss, &current->thread);
22682 @@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
22683 struct task_struct *tsk;
22684 int tmp, ret = -EPERM;
22685
22686 +#ifdef CONFIG_GRKERNSEC_VM86
22687 + if (!capable(CAP_SYS_RAWIO)) {
22688 + gr_handle_vm86();
22689 + goto out;
22690 + }
22691 +#endif
22692 +
22693 tsk = current;
22694 if (tsk->thread.saved_sp0)
22695 goto out;
22696 @@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
22697 int tmp, ret;
22698 struct vm86plus_struct __user *v86;
22699
22700 +#ifdef CONFIG_GRKERNSEC_VM86
22701 + if (!capable(CAP_SYS_RAWIO)) {
22702 + gr_handle_vm86();
22703 + ret = -EPERM;
22704 + goto out;
22705 + }
22706 +#endif
22707 +
22708 tsk = current;
22709 switch (cmd) {
22710 case VM86_REQUEST_IRQ:
22711 @@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
22712 tsk->thread.saved_fs = info->regs32->fs;
22713 tsk->thread.saved_gs = get_user_gs(info->regs32);
22714
22715 - tss = &per_cpu(init_tss, get_cpu());
22716 + tss = init_tss + get_cpu();
22717 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
22718 if (cpu_has_sep)
22719 tsk->thread.sysenter_cs = 0;
22720 @@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
22721 goto cannot_handle;
22722 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
22723 goto cannot_handle;
22724 - intr_ptr = (unsigned long __user *) (i << 2);
22725 + intr_ptr = (__force unsigned long __user *) (i << 2);
22726 if (get_user(segoffs, intr_ptr))
22727 goto cannot_handle;
22728 if ((segoffs >> 16) == BIOSSEG)
22729 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
22730 index 22a1530..8fbaaad 100644
22731 --- a/arch/x86/kernel/vmlinux.lds.S
22732 +++ b/arch/x86/kernel/vmlinux.lds.S
22733 @@ -26,6 +26,13 @@
22734 #include <asm/page_types.h>
22735 #include <asm/cache.h>
22736 #include <asm/boot.h>
22737 +#include <asm/segment.h>
22738 +
22739 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22740 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
22741 +#else
22742 +#define __KERNEL_TEXT_OFFSET 0
22743 +#endif
22744
22745 #undef i386 /* in case the preprocessor is a 32bit one */
22746
22747 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
22748
22749 PHDRS {
22750 text PT_LOAD FLAGS(5); /* R_E */
22751 +#ifdef CONFIG_X86_32
22752 + module PT_LOAD FLAGS(5); /* R_E */
22753 +#endif
22754 +#ifdef CONFIG_XEN
22755 + rodata PT_LOAD FLAGS(5); /* R_E */
22756 +#else
22757 + rodata PT_LOAD FLAGS(4); /* R__ */
22758 +#endif
22759 data PT_LOAD FLAGS(6); /* RW_ */
22760 -#ifdef CONFIG_X86_64
22761 + init.begin PT_LOAD FLAGS(6); /* RW_ */
22762 #ifdef CONFIG_SMP
22763 percpu PT_LOAD FLAGS(6); /* RW_ */
22764 #endif
22765 + text.init PT_LOAD FLAGS(5); /* R_E */
22766 + text.exit PT_LOAD FLAGS(5); /* R_E */
22767 init PT_LOAD FLAGS(7); /* RWE */
22768 -#endif
22769 note PT_NOTE FLAGS(0); /* ___ */
22770 }
22771
22772 SECTIONS
22773 {
22774 #ifdef CONFIG_X86_32
22775 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
22776 - phys_startup_32 = startup_32 - LOAD_OFFSET;
22777 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
22778 #else
22779 - . = __START_KERNEL;
22780 - phys_startup_64 = startup_64 - LOAD_OFFSET;
22781 + . = __START_KERNEL;
22782 #endif
22783
22784 /* Text and read-only data */
22785 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
22786 - _text = .;
22787 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22788 /* bootstrapping code */
22789 +#ifdef CONFIG_X86_32
22790 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22791 +#else
22792 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22793 +#endif
22794 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22795 + _text = .;
22796 HEAD_TEXT
22797 #ifdef CONFIG_X86_32
22798 . = ALIGN(PAGE_SIZE);
22799 @@ -108,13 +128,48 @@ SECTIONS
22800 IRQENTRY_TEXT
22801 *(.fixup)
22802 *(.gnu.warning)
22803 - /* End of text section */
22804 - _etext = .;
22805 } :text = 0x9090
22806
22807 - NOTES :text :note
22808 + . += __KERNEL_TEXT_OFFSET;
22809
22810 - EXCEPTION_TABLE(16) :text = 0x9090
22811 +#ifdef CONFIG_X86_32
22812 + . = ALIGN(PAGE_SIZE);
22813 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
22814 +
22815 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
22816 + MODULES_EXEC_VADDR = .;
22817 + BYTE(0)
22818 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
22819 + . = ALIGN(HPAGE_SIZE) - 1;
22820 + MODULES_EXEC_END = .;
22821 +#endif
22822 +
22823 + } :module
22824 +#endif
22825 +
22826 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
22827 + /* End of text section */
22828 + BYTE(0)
22829 + _etext = . - __KERNEL_TEXT_OFFSET;
22830 + }
22831 +
22832 +#ifdef CONFIG_X86_32
22833 + . = ALIGN(PAGE_SIZE);
22834 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
22835 + *(.idt)
22836 + . = ALIGN(PAGE_SIZE);
22837 + *(.empty_zero_page)
22838 + *(.initial_pg_fixmap)
22839 + *(.initial_pg_pmd)
22840 + *(.initial_page_table)
22841 + *(.swapper_pg_dir)
22842 + } :rodata
22843 +#endif
22844 +
22845 + . = ALIGN(PAGE_SIZE);
22846 + NOTES :rodata :note
22847 +
22848 + EXCEPTION_TABLE(16) :rodata
22849
22850 #if defined(CONFIG_DEBUG_RODATA)
22851 /* .text should occupy whole number of pages */
22852 @@ -126,16 +181,20 @@ SECTIONS
22853
22854 /* Data */
22855 .data : AT(ADDR(.data) - LOAD_OFFSET) {
22856 +
22857 +#ifdef CONFIG_PAX_KERNEXEC
22858 + . = ALIGN(HPAGE_SIZE);
22859 +#else
22860 + . = ALIGN(PAGE_SIZE);
22861 +#endif
22862 +
22863 /* Start of data section */
22864 _sdata = .;
22865
22866 /* init_task */
22867 INIT_TASK_DATA(THREAD_SIZE)
22868
22869 -#ifdef CONFIG_X86_32
22870 - /* 32 bit has nosave before _edata */
22871 NOSAVE_DATA
22872 -#endif
22873
22874 PAGE_ALIGNED_DATA(PAGE_SIZE)
22875
22876 @@ -176,12 +235,19 @@ SECTIONS
22877 #endif /* CONFIG_X86_64 */
22878
22879 /* Init code and data - will be freed after init */
22880 - . = ALIGN(PAGE_SIZE);
22881 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
22882 + BYTE(0)
22883 +
22884 +#ifdef CONFIG_PAX_KERNEXEC
22885 + . = ALIGN(HPAGE_SIZE);
22886 +#else
22887 + . = ALIGN(PAGE_SIZE);
22888 +#endif
22889 +
22890 __init_begin = .; /* paired with __init_end */
22891 - }
22892 + } :init.begin
22893
22894 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
22895 +#ifdef CONFIG_SMP
22896 /*
22897 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
22898 * output PHDR, so the next output section - .init.text - should
22899 @@ -190,12 +256,27 @@ SECTIONS
22900 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
22901 #endif
22902
22903 - INIT_TEXT_SECTION(PAGE_SIZE)
22904 -#ifdef CONFIG_X86_64
22905 - :init
22906 -#endif
22907 + . = ALIGN(PAGE_SIZE);
22908 + init_begin = .;
22909 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
22910 + VMLINUX_SYMBOL(_sinittext) = .;
22911 + INIT_TEXT
22912 + VMLINUX_SYMBOL(_einittext) = .;
22913 + . = ALIGN(PAGE_SIZE);
22914 + } :text.init
22915
22916 - INIT_DATA_SECTION(16)
22917 + /*
22918 + * .exit.text is discard at runtime, not link time, to deal with
22919 + * references from .altinstructions and .eh_frame
22920 + */
22921 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22922 + EXIT_TEXT
22923 + . = ALIGN(16);
22924 + } :text.exit
22925 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
22926 +
22927 + . = ALIGN(PAGE_SIZE);
22928 + INIT_DATA_SECTION(16) :init
22929
22930 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
22931 __x86_cpu_dev_start = .;
22932 @@ -257,19 +338,12 @@ SECTIONS
22933 }
22934
22935 . = ALIGN(8);
22936 - /*
22937 - * .exit.text is discard at runtime, not link time, to deal with
22938 - * references from .altinstructions and .eh_frame
22939 - */
22940 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
22941 - EXIT_TEXT
22942 - }
22943
22944 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
22945 EXIT_DATA
22946 }
22947
22948 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
22949 +#ifndef CONFIG_SMP
22950 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
22951 #endif
22952
22953 @@ -288,16 +362,10 @@ SECTIONS
22954 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
22955 __smp_locks = .;
22956 *(.smp_locks)
22957 - . = ALIGN(PAGE_SIZE);
22958 __smp_locks_end = .;
22959 + . = ALIGN(PAGE_SIZE);
22960 }
22961
22962 -#ifdef CONFIG_X86_64
22963 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
22964 - NOSAVE_DATA
22965 - }
22966 -#endif
22967 -
22968 /* BSS */
22969 . = ALIGN(PAGE_SIZE);
22970 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
22971 @@ -313,6 +381,7 @@ SECTIONS
22972 __brk_base = .;
22973 . += 64 * 1024; /* 64k alignment slop space */
22974 *(.brk_reservation) /* areas brk users have reserved */
22975 + . = ALIGN(HPAGE_SIZE);
22976 __brk_limit = .;
22977 }
22978
22979 @@ -339,13 +408,12 @@ SECTIONS
22980 * for the boot processor.
22981 */
22982 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
22983 -INIT_PER_CPU(gdt_page);
22984 INIT_PER_CPU(irq_stack_union);
22985
22986 /*
22987 * Build-time check on the image size:
22988 */
22989 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
22990 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
22991 "kernel image bigger than KERNEL_IMAGE_SIZE");
22992
22993 #ifdef CONFIG_SMP
22994 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
22995 index 9a907a6..f83f921 100644
22996 --- a/arch/x86/kernel/vsyscall_64.c
22997 +++ b/arch/x86/kernel/vsyscall_64.c
22998 @@ -56,15 +56,13 @@
22999 DEFINE_VVAR(int, vgetcpu_mode);
23000 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23001
23002 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23003 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23004
23005 static int __init vsyscall_setup(char *str)
23006 {
23007 if (str) {
23008 if (!strcmp("emulate", str))
23009 vsyscall_mode = EMULATE;
23010 - else if (!strcmp("native", str))
23011 - vsyscall_mode = NATIVE;
23012 else if (!strcmp("none", str))
23013 vsyscall_mode = NONE;
23014 else
23015 @@ -323,8 +321,7 @@ do_ret:
23016 return true;
23017
23018 sigsegv:
23019 - force_sig(SIGSEGV, current);
23020 - return true;
23021 + do_group_exit(SIGKILL);
23022 }
23023
23024 /*
23025 @@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23026 extern char __vvar_page;
23027 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23028
23029 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23030 - vsyscall_mode == NATIVE
23031 - ? PAGE_KERNEL_VSYSCALL
23032 - : PAGE_KERNEL_VVAR);
23033 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23034 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23035 (unsigned long)VSYSCALL_START);
23036
23037 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23038 index 1330dd1..d220b99 100644
23039 --- a/arch/x86/kernel/x8664_ksyms_64.c
23040 +++ b/arch/x86/kernel/x8664_ksyms_64.c
23041 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23042 EXPORT_SYMBOL(copy_user_generic_unrolled);
23043 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23044 EXPORT_SYMBOL(__copy_user_nocache);
23045 -EXPORT_SYMBOL(_copy_from_user);
23046 -EXPORT_SYMBOL(_copy_to_user);
23047
23048 EXPORT_SYMBOL(copy_page);
23049 EXPORT_SYMBOL(clear_page);
23050 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23051 index 7a3d075..6cb373d 100644
23052 --- a/arch/x86/kernel/x86_init.c
23053 +++ b/arch/x86/kernel/x86_init.c
23054 @@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
23055 },
23056 };
23057
23058 -struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23059 +struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23060 .early_percpu_clock_init = x86_init_noop,
23061 .setup_percpu_clockev = setup_secondary_APIC_clock,
23062 };
23063 @@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23064 static void default_nmi_init(void) { };
23065 static int default_i8042_detect(void) { return 1; };
23066
23067 -struct x86_platform_ops x86_platform = {
23068 +struct x86_platform_ops x86_platform __read_only = {
23069 .calibrate_tsc = native_calibrate_tsc,
23070 .get_wallclock = mach_get_cmos_time,
23071 .set_wallclock = mach_set_rtc_mmss,
23072 @@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
23073 };
23074
23075 EXPORT_SYMBOL_GPL(x86_platform);
23076 -struct x86_msi_ops x86_msi = {
23077 +struct x86_msi_ops x86_msi __read_only = {
23078 .setup_msi_irqs = native_setup_msi_irqs,
23079 .teardown_msi_irq = native_teardown_msi_irq,
23080 .teardown_msi_irqs = default_teardown_msi_irqs,
23081 .restore_msi_irqs = default_restore_msi_irqs,
23082 };
23083
23084 -struct x86_io_apic_ops x86_io_apic_ops = {
23085 +struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23086 .init = native_io_apic_init_mappings,
23087 .read = native_io_apic_read,
23088 .write = native_io_apic_write,
23089 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23090 index ada87a3..afea76d 100644
23091 --- a/arch/x86/kernel/xsave.c
23092 +++ b/arch/x86/kernel/xsave.c
23093 @@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23094 {
23095 int err;
23096
23097 + buf = (struct xsave_struct __user *)____m(buf);
23098 if (use_xsave())
23099 err = xsave_user(buf);
23100 else if (use_fxsr())
23101 @@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23102 */
23103 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23104 {
23105 + buf = (void __user *)____m(buf);
23106 if (use_xsave()) {
23107 if ((unsigned long)buf % 64 || fx_only) {
23108 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23109 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23110 index a20ecb5..d0e2194 100644
23111 --- a/arch/x86/kvm/cpuid.c
23112 +++ b/arch/x86/kvm/cpuid.c
23113 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23114 struct kvm_cpuid2 *cpuid,
23115 struct kvm_cpuid_entry2 __user *entries)
23116 {
23117 - int r;
23118 + int r, i;
23119
23120 r = -E2BIG;
23121 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23122 goto out;
23123 r = -EFAULT;
23124 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23125 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23126 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23127 goto out;
23128 + for (i = 0; i < cpuid->nent; ++i) {
23129 + struct kvm_cpuid_entry2 cpuid_entry;
23130 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23131 + goto out;
23132 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
23133 + }
23134 vcpu->arch.cpuid_nent = cpuid->nent;
23135 kvm_apic_set_version(vcpu);
23136 kvm_x86_ops->cpuid_update(vcpu);
23137 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23138 struct kvm_cpuid2 *cpuid,
23139 struct kvm_cpuid_entry2 __user *entries)
23140 {
23141 - int r;
23142 + int r, i;
23143
23144 r = -E2BIG;
23145 if (cpuid->nent < vcpu->arch.cpuid_nent)
23146 goto out;
23147 r = -EFAULT;
23148 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
23149 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23150 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23151 goto out;
23152 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
23153 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
23154 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
23155 + goto out;
23156 + }
23157 return 0;
23158
23159 out:
23160 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
23161 index a27e763..54bfe43 100644
23162 --- a/arch/x86/kvm/emulate.c
23163 +++ b/arch/x86/kvm/emulate.c
23164 @@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23165
23166 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
23167 do { \
23168 + unsigned long _tmp; \
23169 __asm__ __volatile__ ( \
23170 _PRE_EFLAGS("0", "4", "2") \
23171 _op _suffix " %"_x"3,%1; " \
23172 @@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23173 /* Raw emulation: instruction has two explicit operands. */
23174 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
23175 do { \
23176 - unsigned long _tmp; \
23177 - \
23178 switch ((ctxt)->dst.bytes) { \
23179 case 2: \
23180 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
23181 @@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23182
23183 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
23184 do { \
23185 - unsigned long _tmp; \
23186 switch ((ctxt)->dst.bytes) { \
23187 case 1: \
23188 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
23189 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
23190 index 9392f52..0e56d77 100644
23191 --- a/arch/x86/kvm/lapic.c
23192 +++ b/arch/x86/kvm/lapic.c
23193 @@ -55,7 +55,7 @@
23194 #define APIC_BUS_CYCLE_NS 1
23195
23196 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
23197 -#define apic_debug(fmt, arg...)
23198 +#define apic_debug(fmt, arg...) do {} while (0)
23199
23200 #define APIC_LVT_NUM 6
23201 /* 14 is the version for Xeon and Pentium 8.4.8*/
23202 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
23203 index 891eb6d..e027900 100644
23204 --- a/arch/x86/kvm/paging_tmpl.h
23205 +++ b/arch/x86/kvm/paging_tmpl.h
23206 @@ -208,7 +208,7 @@ retry_walk:
23207 if (unlikely(kvm_is_error_hva(host_addr)))
23208 goto error;
23209
23210 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
23211 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
23212 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
23213 goto error;
23214 walker->ptep_user[walker->level - 1] = ptep_user;
23215 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
23216 index d29d3cd..ec9d522 100644
23217 --- a/arch/x86/kvm/svm.c
23218 +++ b/arch/x86/kvm/svm.c
23219 @@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
23220 int cpu = raw_smp_processor_id();
23221
23222 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
23223 +
23224 + pax_open_kernel();
23225 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
23226 + pax_close_kernel();
23227 +
23228 load_TR_desc();
23229 }
23230
23231 @@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
23232 #endif
23233 #endif
23234
23235 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23236 + __set_fs(current_thread_info()->addr_limit);
23237 +#endif
23238 +
23239 reload_tss(vcpu);
23240
23241 local_irq_disable();
23242 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
23243 index 9120ae1..238abc0 100644
23244 --- a/arch/x86/kvm/vmx.c
23245 +++ b/arch/x86/kvm/vmx.c
23246 @@ -1370,7 +1370,11 @@ static void reload_tss(void)
23247 struct desc_struct *descs;
23248
23249 descs = (void *)gdt->address;
23250 +
23251 + pax_open_kernel();
23252 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
23253 + pax_close_kernel();
23254 +
23255 load_TR_desc();
23256 }
23257
23258 @@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
23259 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
23260 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
23261
23262 +#ifdef CONFIG_PAX_PER_CPU_PGD
23263 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23264 +#endif
23265 +
23266 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
23267 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
23268 vmx->loaded_vmcs->cpu = cpu;
23269 @@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
23270 if (!cpu_has_vmx_flexpriority())
23271 flexpriority_enabled = 0;
23272
23273 - if (!cpu_has_vmx_tpr_shadow())
23274 - kvm_x86_ops->update_cr8_intercept = NULL;
23275 + if (!cpu_has_vmx_tpr_shadow()) {
23276 + pax_open_kernel();
23277 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
23278 + pax_close_kernel();
23279 + }
23280
23281 if (enable_ept && !cpu_has_vmx_ept_2m_page())
23282 kvm_disable_largepages();
23283 @@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
23284
23285 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
23286 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
23287 +
23288 +#ifndef CONFIG_PAX_PER_CPU_PGD
23289 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23290 +#endif
23291
23292 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
23293 #ifdef CONFIG_X86_64
23294 @@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
23295 native_store_idt(&dt);
23296 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
23297
23298 - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
23299 + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
23300
23301 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
23302 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
23303 @@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23304 "jmp 2f \n\t"
23305 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
23306 "2: "
23307 +
23308 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23309 + "ljmp %[cs],$3f\n\t"
23310 + "3: "
23311 +#endif
23312 +
23313 /* Save guest registers, load host registers, keep flags */
23314 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
23315 "pop %0 \n\t"
23316 @@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23317 #endif
23318 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
23319 [wordsize]"i"(sizeof(ulong))
23320 +
23321 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23322 + ,[cs]"i"(__KERNEL_CS)
23323 +#endif
23324 +
23325 : "cc", "memory"
23326 #ifdef CONFIG_X86_64
23327 , "rax", "rbx", "rdi", "rsi"
23328 @@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23329 if (debugctlmsr)
23330 update_debugctlmsr(debugctlmsr);
23331
23332 -#ifndef CONFIG_X86_64
23333 +#ifdef CONFIG_X86_32
23334 /*
23335 * The sysexit path does not restore ds/es, so we must set them to
23336 * a reasonable value ourselves.
23337 @@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23338 * may be executed in interrupt context, which saves and restore segments
23339 * around it, nullifying its effect.
23340 */
23341 - loadsegment(ds, __USER_DS);
23342 - loadsegment(es, __USER_DS);
23343 + loadsegment(ds, __KERNEL_DS);
23344 + loadsegment(es, __KERNEL_DS);
23345 + loadsegment(ss, __KERNEL_DS);
23346 +
23347 +#ifdef CONFIG_PAX_KERNEXEC
23348 + loadsegment(fs, __KERNEL_PERCPU);
23349 +#endif
23350 +
23351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23352 + __set_fs(current_thread_info()->addr_limit);
23353 +#endif
23354 +
23355 #endif
23356
23357 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
23358 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
23359 index c243b81..9eb193f 100644
23360 --- a/arch/x86/kvm/x86.c
23361 +++ b/arch/x86/kvm/x86.c
23362 @@ -1692,8 +1692,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
23363 {
23364 struct kvm *kvm = vcpu->kvm;
23365 int lm = is_long_mode(vcpu);
23366 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
23367 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
23368 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
23369 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
23370 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
23371 : kvm->arch.xen_hvm_config.blob_size_32;
23372 u32 page_num = data & ~PAGE_MASK;
23373 @@ -2571,6 +2571,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
23374 if (n < msr_list.nmsrs)
23375 goto out;
23376 r = -EFAULT;
23377 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
23378 + goto out;
23379 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
23380 num_msrs_to_save * sizeof(u32)))
23381 goto out;
23382 @@ -2700,7 +2702,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
23383 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
23384 struct kvm_interrupt *irq)
23385 {
23386 - if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
23387 + if (irq->irq >= KVM_NR_INTERRUPTS)
23388 return -EINVAL;
23389 if (irqchip_in_kernel(vcpu->kvm))
23390 return -ENXIO;
23391 @@ -5213,7 +5215,7 @@ static struct notifier_block pvclock_gtod_notifier = {
23392 };
23393 #endif
23394
23395 -int kvm_arch_init(void *opaque)
23396 +int kvm_arch_init(const void *opaque)
23397 {
23398 int r;
23399 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
23400 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
23401 index df4176c..23ce092 100644
23402 --- a/arch/x86/lguest/boot.c
23403 +++ b/arch/x86/lguest/boot.c
23404 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
23405 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
23406 * Launcher to reboot us.
23407 */
23408 -static void lguest_restart(char *reason)
23409 +static __noreturn void lguest_restart(char *reason)
23410 {
23411 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
23412 + BUG();
23413 }
23414
23415 /*G:050
23416 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
23417 index 00933d5..3a64af9 100644
23418 --- a/arch/x86/lib/atomic64_386_32.S
23419 +++ b/arch/x86/lib/atomic64_386_32.S
23420 @@ -48,6 +48,10 @@ BEGIN(read)
23421 movl (v), %eax
23422 movl 4(v), %edx
23423 RET_ENDP
23424 +BEGIN(read_unchecked)
23425 + movl (v), %eax
23426 + movl 4(v), %edx
23427 +RET_ENDP
23428 #undef v
23429
23430 #define v %esi
23431 @@ -55,6 +59,10 @@ BEGIN(set)
23432 movl %ebx, (v)
23433 movl %ecx, 4(v)
23434 RET_ENDP
23435 +BEGIN(set_unchecked)
23436 + movl %ebx, (v)
23437 + movl %ecx, 4(v)
23438 +RET_ENDP
23439 #undef v
23440
23441 #define v %esi
23442 @@ -70,6 +78,20 @@ RET_ENDP
23443 BEGIN(add)
23444 addl %eax, (v)
23445 adcl %edx, 4(v)
23446 +
23447 +#ifdef CONFIG_PAX_REFCOUNT
23448 + jno 0f
23449 + subl %eax, (v)
23450 + sbbl %edx, 4(v)
23451 + int $4
23452 +0:
23453 + _ASM_EXTABLE(0b, 0b)
23454 +#endif
23455 +
23456 +RET_ENDP
23457 +BEGIN(add_unchecked)
23458 + addl %eax, (v)
23459 + adcl %edx, 4(v)
23460 RET_ENDP
23461 #undef v
23462
23463 @@ -77,6 +99,24 @@ RET_ENDP
23464 BEGIN(add_return)
23465 addl (v), %eax
23466 adcl 4(v), %edx
23467 +
23468 +#ifdef CONFIG_PAX_REFCOUNT
23469 + into
23470 +1234:
23471 + _ASM_EXTABLE(1234b, 2f)
23472 +#endif
23473 +
23474 + movl %eax, (v)
23475 + movl %edx, 4(v)
23476 +
23477 +#ifdef CONFIG_PAX_REFCOUNT
23478 +2:
23479 +#endif
23480 +
23481 +RET_ENDP
23482 +BEGIN(add_return_unchecked)
23483 + addl (v), %eax
23484 + adcl 4(v), %edx
23485 movl %eax, (v)
23486 movl %edx, 4(v)
23487 RET_ENDP
23488 @@ -86,6 +126,20 @@ RET_ENDP
23489 BEGIN(sub)
23490 subl %eax, (v)
23491 sbbl %edx, 4(v)
23492 +
23493 +#ifdef CONFIG_PAX_REFCOUNT
23494 + jno 0f
23495 + addl %eax, (v)
23496 + adcl %edx, 4(v)
23497 + int $4
23498 +0:
23499 + _ASM_EXTABLE(0b, 0b)
23500 +#endif
23501 +
23502 +RET_ENDP
23503 +BEGIN(sub_unchecked)
23504 + subl %eax, (v)
23505 + sbbl %edx, 4(v)
23506 RET_ENDP
23507 #undef v
23508
23509 @@ -96,6 +150,27 @@ BEGIN(sub_return)
23510 sbbl $0, %edx
23511 addl (v), %eax
23512 adcl 4(v), %edx
23513 +
23514 +#ifdef CONFIG_PAX_REFCOUNT
23515 + into
23516 +1234:
23517 + _ASM_EXTABLE(1234b, 2f)
23518 +#endif
23519 +
23520 + movl %eax, (v)
23521 + movl %edx, 4(v)
23522 +
23523 +#ifdef CONFIG_PAX_REFCOUNT
23524 +2:
23525 +#endif
23526 +
23527 +RET_ENDP
23528 +BEGIN(sub_return_unchecked)
23529 + negl %edx
23530 + negl %eax
23531 + sbbl $0, %edx
23532 + addl (v), %eax
23533 + adcl 4(v), %edx
23534 movl %eax, (v)
23535 movl %edx, 4(v)
23536 RET_ENDP
23537 @@ -105,6 +180,20 @@ RET_ENDP
23538 BEGIN(inc)
23539 addl $1, (v)
23540 adcl $0, 4(v)
23541 +
23542 +#ifdef CONFIG_PAX_REFCOUNT
23543 + jno 0f
23544 + subl $1, (v)
23545 + sbbl $0, 4(v)
23546 + int $4
23547 +0:
23548 + _ASM_EXTABLE(0b, 0b)
23549 +#endif
23550 +
23551 +RET_ENDP
23552 +BEGIN(inc_unchecked)
23553 + addl $1, (v)
23554 + adcl $0, 4(v)
23555 RET_ENDP
23556 #undef v
23557
23558 @@ -114,6 +203,26 @@ BEGIN(inc_return)
23559 movl 4(v), %edx
23560 addl $1, %eax
23561 adcl $0, %edx
23562 +
23563 +#ifdef CONFIG_PAX_REFCOUNT
23564 + into
23565 +1234:
23566 + _ASM_EXTABLE(1234b, 2f)
23567 +#endif
23568 +
23569 + movl %eax, (v)
23570 + movl %edx, 4(v)
23571 +
23572 +#ifdef CONFIG_PAX_REFCOUNT
23573 +2:
23574 +#endif
23575 +
23576 +RET_ENDP
23577 +BEGIN(inc_return_unchecked)
23578 + movl (v), %eax
23579 + movl 4(v), %edx
23580 + addl $1, %eax
23581 + adcl $0, %edx
23582 movl %eax, (v)
23583 movl %edx, 4(v)
23584 RET_ENDP
23585 @@ -123,6 +232,20 @@ RET_ENDP
23586 BEGIN(dec)
23587 subl $1, (v)
23588 sbbl $0, 4(v)
23589 +
23590 +#ifdef CONFIG_PAX_REFCOUNT
23591 + jno 0f
23592 + addl $1, (v)
23593 + adcl $0, 4(v)
23594 + int $4
23595 +0:
23596 + _ASM_EXTABLE(0b, 0b)
23597 +#endif
23598 +
23599 +RET_ENDP
23600 +BEGIN(dec_unchecked)
23601 + subl $1, (v)
23602 + sbbl $0, 4(v)
23603 RET_ENDP
23604 #undef v
23605
23606 @@ -132,6 +255,26 @@ BEGIN(dec_return)
23607 movl 4(v), %edx
23608 subl $1, %eax
23609 sbbl $0, %edx
23610 +
23611 +#ifdef CONFIG_PAX_REFCOUNT
23612 + into
23613 +1234:
23614 + _ASM_EXTABLE(1234b, 2f)
23615 +#endif
23616 +
23617 + movl %eax, (v)
23618 + movl %edx, 4(v)
23619 +
23620 +#ifdef CONFIG_PAX_REFCOUNT
23621 +2:
23622 +#endif
23623 +
23624 +RET_ENDP
23625 +BEGIN(dec_return_unchecked)
23626 + movl (v), %eax
23627 + movl 4(v), %edx
23628 + subl $1, %eax
23629 + sbbl $0, %edx
23630 movl %eax, (v)
23631 movl %edx, 4(v)
23632 RET_ENDP
23633 @@ -143,6 +286,13 @@ BEGIN(add_unless)
23634 adcl %edx, %edi
23635 addl (v), %eax
23636 adcl 4(v), %edx
23637 +
23638 +#ifdef CONFIG_PAX_REFCOUNT
23639 + into
23640 +1234:
23641 + _ASM_EXTABLE(1234b, 2f)
23642 +#endif
23643 +
23644 cmpl %eax, %ecx
23645 je 3f
23646 1:
23647 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
23648 1:
23649 addl $1, %eax
23650 adcl $0, %edx
23651 +
23652 +#ifdef CONFIG_PAX_REFCOUNT
23653 + into
23654 +1234:
23655 + _ASM_EXTABLE(1234b, 2f)
23656 +#endif
23657 +
23658 movl %eax, (v)
23659 movl %edx, 4(v)
23660 movl $1, %eax
23661 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
23662 movl 4(v), %edx
23663 subl $1, %eax
23664 sbbl $0, %edx
23665 +
23666 +#ifdef CONFIG_PAX_REFCOUNT
23667 + into
23668 +1234:
23669 + _ASM_EXTABLE(1234b, 1f)
23670 +#endif
23671 +
23672 js 1f
23673 movl %eax, (v)
23674 movl %edx, 4(v)
23675 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
23676 index f5cc9eb..51fa319 100644
23677 --- a/arch/x86/lib/atomic64_cx8_32.S
23678 +++ b/arch/x86/lib/atomic64_cx8_32.S
23679 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
23680 CFI_STARTPROC
23681
23682 read64 %ecx
23683 + pax_force_retaddr
23684 ret
23685 CFI_ENDPROC
23686 ENDPROC(atomic64_read_cx8)
23687
23688 +ENTRY(atomic64_read_unchecked_cx8)
23689 + CFI_STARTPROC
23690 +
23691 + read64 %ecx
23692 + pax_force_retaddr
23693 + ret
23694 + CFI_ENDPROC
23695 +ENDPROC(atomic64_read_unchecked_cx8)
23696 +
23697 ENTRY(atomic64_set_cx8)
23698 CFI_STARTPROC
23699
23700 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
23701 cmpxchg8b (%esi)
23702 jne 1b
23703
23704 + pax_force_retaddr
23705 ret
23706 CFI_ENDPROC
23707 ENDPROC(atomic64_set_cx8)
23708
23709 +ENTRY(atomic64_set_unchecked_cx8)
23710 + CFI_STARTPROC
23711 +
23712 +1:
23713 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
23714 + * are atomic on 586 and newer */
23715 + cmpxchg8b (%esi)
23716 + jne 1b
23717 +
23718 + pax_force_retaddr
23719 + ret
23720 + CFI_ENDPROC
23721 +ENDPROC(atomic64_set_unchecked_cx8)
23722 +
23723 ENTRY(atomic64_xchg_cx8)
23724 CFI_STARTPROC
23725
23726 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
23727 cmpxchg8b (%esi)
23728 jne 1b
23729
23730 + pax_force_retaddr
23731 ret
23732 CFI_ENDPROC
23733 ENDPROC(atomic64_xchg_cx8)
23734
23735 -.macro addsub_return func ins insc
23736 -ENTRY(atomic64_\func\()_return_cx8)
23737 +.macro addsub_return func ins insc unchecked=""
23738 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
23739 CFI_STARTPROC
23740 SAVE ebp
23741 SAVE ebx
23742 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
23743 movl %edx, %ecx
23744 \ins\()l %esi, %ebx
23745 \insc\()l %edi, %ecx
23746 +
23747 +.ifb \unchecked
23748 +#ifdef CONFIG_PAX_REFCOUNT
23749 + into
23750 +2:
23751 + _ASM_EXTABLE(2b, 3f)
23752 +#endif
23753 +.endif
23754 +
23755 LOCK_PREFIX
23756 cmpxchg8b (%ebp)
23757 jne 1b
23758 -
23759 -10:
23760 movl %ebx, %eax
23761 movl %ecx, %edx
23762 +
23763 +.ifb \unchecked
23764 +#ifdef CONFIG_PAX_REFCOUNT
23765 +3:
23766 +#endif
23767 +.endif
23768 +
23769 RESTORE edi
23770 RESTORE esi
23771 RESTORE ebx
23772 RESTORE ebp
23773 + pax_force_retaddr
23774 ret
23775 CFI_ENDPROC
23776 -ENDPROC(atomic64_\func\()_return_cx8)
23777 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
23778 .endm
23779
23780 addsub_return add add adc
23781 addsub_return sub sub sbb
23782 +addsub_return add add adc _unchecked
23783 +addsub_return sub sub sbb _unchecked
23784
23785 -.macro incdec_return func ins insc
23786 -ENTRY(atomic64_\func\()_return_cx8)
23787 +.macro incdec_return func ins insc unchecked=""
23788 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
23789 CFI_STARTPROC
23790 SAVE ebx
23791
23792 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
23793 movl %edx, %ecx
23794 \ins\()l $1, %ebx
23795 \insc\()l $0, %ecx
23796 +
23797 +.ifb \unchecked
23798 +#ifdef CONFIG_PAX_REFCOUNT
23799 + into
23800 +2:
23801 + _ASM_EXTABLE(2b, 3f)
23802 +#endif
23803 +.endif
23804 +
23805 LOCK_PREFIX
23806 cmpxchg8b (%esi)
23807 jne 1b
23808
23809 -10:
23810 movl %ebx, %eax
23811 movl %ecx, %edx
23812 +
23813 +.ifb \unchecked
23814 +#ifdef CONFIG_PAX_REFCOUNT
23815 +3:
23816 +#endif
23817 +.endif
23818 +
23819 RESTORE ebx
23820 + pax_force_retaddr
23821 ret
23822 CFI_ENDPROC
23823 -ENDPROC(atomic64_\func\()_return_cx8)
23824 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
23825 .endm
23826
23827 incdec_return inc add adc
23828 incdec_return dec sub sbb
23829 +incdec_return inc add adc _unchecked
23830 +incdec_return dec sub sbb _unchecked
23831
23832 ENTRY(atomic64_dec_if_positive_cx8)
23833 CFI_STARTPROC
23834 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
23835 movl %edx, %ecx
23836 subl $1, %ebx
23837 sbb $0, %ecx
23838 +
23839 +#ifdef CONFIG_PAX_REFCOUNT
23840 + into
23841 +1234:
23842 + _ASM_EXTABLE(1234b, 2f)
23843 +#endif
23844 +
23845 js 2f
23846 LOCK_PREFIX
23847 cmpxchg8b (%esi)
23848 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
23849 movl %ebx, %eax
23850 movl %ecx, %edx
23851 RESTORE ebx
23852 + pax_force_retaddr
23853 ret
23854 CFI_ENDPROC
23855 ENDPROC(atomic64_dec_if_positive_cx8)
23856 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
23857 movl %edx, %ecx
23858 addl %ebp, %ebx
23859 adcl %edi, %ecx
23860 +
23861 +#ifdef CONFIG_PAX_REFCOUNT
23862 + into
23863 +1234:
23864 + _ASM_EXTABLE(1234b, 3f)
23865 +#endif
23866 +
23867 LOCK_PREFIX
23868 cmpxchg8b (%esi)
23869 jne 1b
23870 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
23871 CFI_ADJUST_CFA_OFFSET -8
23872 RESTORE ebx
23873 RESTORE ebp
23874 + pax_force_retaddr
23875 ret
23876 4:
23877 cmpl %edx, 4(%esp)
23878 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
23879 xorl %ecx, %ecx
23880 addl $1, %ebx
23881 adcl %edx, %ecx
23882 +
23883 +#ifdef CONFIG_PAX_REFCOUNT
23884 + into
23885 +1234:
23886 + _ASM_EXTABLE(1234b, 3f)
23887 +#endif
23888 +
23889 LOCK_PREFIX
23890 cmpxchg8b (%esi)
23891 jne 1b
23892 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
23893 movl $1, %eax
23894 3:
23895 RESTORE ebx
23896 + pax_force_retaddr
23897 ret
23898 CFI_ENDPROC
23899 ENDPROC(atomic64_inc_not_zero_cx8)
23900 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
23901 index 2af5df3..62b1a5a 100644
23902 --- a/arch/x86/lib/checksum_32.S
23903 +++ b/arch/x86/lib/checksum_32.S
23904 @@ -29,7 +29,8 @@
23905 #include <asm/dwarf2.h>
23906 #include <asm/errno.h>
23907 #include <asm/asm.h>
23908 -
23909 +#include <asm/segment.h>
23910 +
23911 /*
23912 * computes a partial checksum, e.g. for TCP/UDP fragments
23913 */
23914 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
23915
23916 #define ARGBASE 16
23917 #define FP 12
23918 -
23919 -ENTRY(csum_partial_copy_generic)
23920 +
23921 +ENTRY(csum_partial_copy_generic_to_user)
23922 CFI_STARTPROC
23923 +
23924 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23925 + pushl_cfi %gs
23926 + popl_cfi %es
23927 + jmp csum_partial_copy_generic
23928 +#endif
23929 +
23930 +ENTRY(csum_partial_copy_generic_from_user)
23931 +
23932 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23933 + pushl_cfi %gs
23934 + popl_cfi %ds
23935 +#endif
23936 +
23937 +ENTRY(csum_partial_copy_generic)
23938 subl $4,%esp
23939 CFI_ADJUST_CFA_OFFSET 4
23940 pushl_cfi %edi
23941 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
23942 jmp 4f
23943 SRC(1: movw (%esi), %bx )
23944 addl $2, %esi
23945 -DST( movw %bx, (%edi) )
23946 +DST( movw %bx, %es:(%edi) )
23947 addl $2, %edi
23948 addw %bx, %ax
23949 adcl $0, %eax
23950 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
23951 SRC(1: movl (%esi), %ebx )
23952 SRC( movl 4(%esi), %edx )
23953 adcl %ebx, %eax
23954 -DST( movl %ebx, (%edi) )
23955 +DST( movl %ebx, %es:(%edi) )
23956 adcl %edx, %eax
23957 -DST( movl %edx, 4(%edi) )
23958 +DST( movl %edx, %es:4(%edi) )
23959
23960 SRC( movl 8(%esi), %ebx )
23961 SRC( movl 12(%esi), %edx )
23962 adcl %ebx, %eax
23963 -DST( movl %ebx, 8(%edi) )
23964 +DST( movl %ebx, %es:8(%edi) )
23965 adcl %edx, %eax
23966 -DST( movl %edx, 12(%edi) )
23967 +DST( movl %edx, %es:12(%edi) )
23968
23969 SRC( movl 16(%esi), %ebx )
23970 SRC( movl 20(%esi), %edx )
23971 adcl %ebx, %eax
23972 -DST( movl %ebx, 16(%edi) )
23973 +DST( movl %ebx, %es:16(%edi) )
23974 adcl %edx, %eax
23975 -DST( movl %edx, 20(%edi) )
23976 +DST( movl %edx, %es:20(%edi) )
23977
23978 SRC( movl 24(%esi), %ebx )
23979 SRC( movl 28(%esi), %edx )
23980 adcl %ebx, %eax
23981 -DST( movl %ebx, 24(%edi) )
23982 +DST( movl %ebx, %es:24(%edi) )
23983 adcl %edx, %eax
23984 -DST( movl %edx, 28(%edi) )
23985 +DST( movl %edx, %es:28(%edi) )
23986
23987 lea 32(%esi), %esi
23988 lea 32(%edi), %edi
23989 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
23990 shrl $2, %edx # This clears CF
23991 SRC(3: movl (%esi), %ebx )
23992 adcl %ebx, %eax
23993 -DST( movl %ebx, (%edi) )
23994 +DST( movl %ebx, %es:(%edi) )
23995 lea 4(%esi), %esi
23996 lea 4(%edi), %edi
23997 dec %edx
23998 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
23999 jb 5f
24000 SRC( movw (%esi), %cx )
24001 leal 2(%esi), %esi
24002 -DST( movw %cx, (%edi) )
24003 +DST( movw %cx, %es:(%edi) )
24004 leal 2(%edi), %edi
24005 je 6f
24006 shll $16,%ecx
24007 SRC(5: movb (%esi), %cl )
24008 -DST( movb %cl, (%edi) )
24009 +DST( movb %cl, %es:(%edi) )
24010 6: addl %ecx, %eax
24011 adcl $0, %eax
24012 7:
24013 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24014
24015 6001:
24016 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24017 - movl $-EFAULT, (%ebx)
24018 + movl $-EFAULT, %ss:(%ebx)
24019
24020 # zero the complete destination - computing the rest
24021 # is too much work
24022 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24023
24024 6002:
24025 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24026 - movl $-EFAULT,(%ebx)
24027 + movl $-EFAULT,%ss:(%ebx)
24028 jmp 5000b
24029
24030 .previous
24031
24032 + pushl_cfi %ss
24033 + popl_cfi %ds
24034 + pushl_cfi %ss
24035 + popl_cfi %es
24036 popl_cfi %ebx
24037 CFI_RESTORE ebx
24038 popl_cfi %esi
24039 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24040 popl_cfi %ecx # equivalent to addl $4,%esp
24041 ret
24042 CFI_ENDPROC
24043 -ENDPROC(csum_partial_copy_generic)
24044 +ENDPROC(csum_partial_copy_generic_to_user)
24045
24046 #else
24047
24048 /* Version for PentiumII/PPro */
24049
24050 #define ROUND1(x) \
24051 + nop; nop; nop; \
24052 SRC(movl x(%esi), %ebx ) ; \
24053 addl %ebx, %eax ; \
24054 - DST(movl %ebx, x(%edi) ) ;
24055 + DST(movl %ebx, %es:x(%edi)) ;
24056
24057 #define ROUND(x) \
24058 + nop; nop; nop; \
24059 SRC(movl x(%esi), %ebx ) ; \
24060 adcl %ebx, %eax ; \
24061 - DST(movl %ebx, x(%edi) ) ;
24062 + DST(movl %ebx, %es:x(%edi)) ;
24063
24064 #define ARGBASE 12
24065 -
24066 -ENTRY(csum_partial_copy_generic)
24067 +
24068 +ENTRY(csum_partial_copy_generic_to_user)
24069 CFI_STARTPROC
24070 +
24071 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24072 + pushl_cfi %gs
24073 + popl_cfi %es
24074 + jmp csum_partial_copy_generic
24075 +#endif
24076 +
24077 +ENTRY(csum_partial_copy_generic_from_user)
24078 +
24079 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24080 + pushl_cfi %gs
24081 + popl_cfi %ds
24082 +#endif
24083 +
24084 +ENTRY(csum_partial_copy_generic)
24085 pushl_cfi %ebx
24086 CFI_REL_OFFSET ebx, 0
24087 pushl_cfi %edi
24088 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24089 subl %ebx, %edi
24090 lea -1(%esi),%edx
24091 andl $-32,%edx
24092 - lea 3f(%ebx,%ebx), %ebx
24093 + lea 3f(%ebx,%ebx,2), %ebx
24094 testl %esi, %esi
24095 jmp *%ebx
24096 1: addl $64,%esi
24097 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24098 jb 5f
24099 SRC( movw (%esi), %dx )
24100 leal 2(%esi), %esi
24101 -DST( movw %dx, (%edi) )
24102 +DST( movw %dx, %es:(%edi) )
24103 leal 2(%edi), %edi
24104 je 6f
24105 shll $16,%edx
24106 5:
24107 SRC( movb (%esi), %dl )
24108 -DST( movb %dl, (%edi) )
24109 +DST( movb %dl, %es:(%edi) )
24110 6: addl %edx, %eax
24111 adcl $0, %eax
24112 7:
24113 .section .fixup, "ax"
24114 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
24115 - movl $-EFAULT, (%ebx)
24116 + movl $-EFAULT, %ss:(%ebx)
24117 # zero the complete destination (computing the rest is too much work)
24118 movl ARGBASE+8(%esp),%edi # dst
24119 movl ARGBASE+12(%esp),%ecx # len
24120 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
24121 rep; stosb
24122 jmp 7b
24123 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24124 - movl $-EFAULT, (%ebx)
24125 + movl $-EFAULT, %ss:(%ebx)
24126 jmp 7b
24127 .previous
24128
24129 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24130 + pushl_cfi %ss
24131 + popl_cfi %ds
24132 + pushl_cfi %ss
24133 + popl_cfi %es
24134 +#endif
24135 +
24136 popl_cfi %esi
24137 CFI_RESTORE esi
24138 popl_cfi %edi
24139 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
24140 CFI_RESTORE ebx
24141 ret
24142 CFI_ENDPROC
24143 -ENDPROC(csum_partial_copy_generic)
24144 +ENDPROC(csum_partial_copy_generic_to_user)
24145
24146 #undef ROUND
24147 #undef ROUND1
24148 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
24149 index f2145cf..cea889d 100644
24150 --- a/arch/x86/lib/clear_page_64.S
24151 +++ b/arch/x86/lib/clear_page_64.S
24152 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
24153 movl $4096/8,%ecx
24154 xorl %eax,%eax
24155 rep stosq
24156 + pax_force_retaddr
24157 ret
24158 CFI_ENDPROC
24159 ENDPROC(clear_page_c)
24160 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
24161 movl $4096,%ecx
24162 xorl %eax,%eax
24163 rep stosb
24164 + pax_force_retaddr
24165 ret
24166 CFI_ENDPROC
24167 ENDPROC(clear_page_c_e)
24168 @@ -43,6 +45,7 @@ ENTRY(clear_page)
24169 leaq 64(%rdi),%rdi
24170 jnz .Lloop
24171 nop
24172 + pax_force_retaddr
24173 ret
24174 CFI_ENDPROC
24175 .Lclear_page_end:
24176 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
24177
24178 #include <asm/cpufeature.h>
24179
24180 - .section .altinstr_replacement,"ax"
24181 + .section .altinstr_replacement,"a"
24182 1: .byte 0xeb /* jmp <disp8> */
24183 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
24184 2: .byte 0xeb /* jmp <disp8> */
24185 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
24186 index 1e572c5..2a162cd 100644
24187 --- a/arch/x86/lib/cmpxchg16b_emu.S
24188 +++ b/arch/x86/lib/cmpxchg16b_emu.S
24189 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
24190
24191 popf
24192 mov $1, %al
24193 + pax_force_retaddr
24194 ret
24195
24196 not_same:
24197 popf
24198 xor %al,%al
24199 + pax_force_retaddr
24200 ret
24201
24202 CFI_ENDPROC
24203 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
24204 index 176cca6..1166c50 100644
24205 --- a/arch/x86/lib/copy_page_64.S
24206 +++ b/arch/x86/lib/copy_page_64.S
24207 @@ -9,6 +9,7 @@ copy_page_rep:
24208 CFI_STARTPROC
24209 movl $4096/8, %ecx
24210 rep movsq
24211 + pax_force_retaddr
24212 ret
24213 CFI_ENDPROC
24214 ENDPROC(copy_page_rep)
24215 @@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
24216
24217 ENTRY(copy_page)
24218 CFI_STARTPROC
24219 - subq $2*8, %rsp
24220 - CFI_ADJUST_CFA_OFFSET 2*8
24221 + subq $3*8, %rsp
24222 + CFI_ADJUST_CFA_OFFSET 3*8
24223 movq %rbx, (%rsp)
24224 CFI_REL_OFFSET rbx, 0
24225 movq %r12, 1*8(%rsp)
24226 CFI_REL_OFFSET r12, 1*8
24227 + movq %r13, 2*8(%rsp)
24228 + CFI_REL_OFFSET r13, 2*8
24229
24230 movl $(4096/64)-5, %ecx
24231 .p2align 4
24232 @@ -36,7 +39,7 @@ ENTRY(copy_page)
24233 movq 0x8*2(%rsi), %rdx
24234 movq 0x8*3(%rsi), %r8
24235 movq 0x8*4(%rsi), %r9
24236 - movq 0x8*5(%rsi), %r10
24237 + movq 0x8*5(%rsi), %r13
24238 movq 0x8*6(%rsi), %r11
24239 movq 0x8*7(%rsi), %r12
24240
24241 @@ -47,7 +50,7 @@ ENTRY(copy_page)
24242 movq %rdx, 0x8*2(%rdi)
24243 movq %r8, 0x8*3(%rdi)
24244 movq %r9, 0x8*4(%rdi)
24245 - movq %r10, 0x8*5(%rdi)
24246 + movq %r13, 0x8*5(%rdi)
24247 movq %r11, 0x8*6(%rdi)
24248 movq %r12, 0x8*7(%rdi)
24249
24250 @@ -66,7 +69,7 @@ ENTRY(copy_page)
24251 movq 0x8*2(%rsi), %rdx
24252 movq 0x8*3(%rsi), %r8
24253 movq 0x8*4(%rsi), %r9
24254 - movq 0x8*5(%rsi), %r10
24255 + movq 0x8*5(%rsi), %r13
24256 movq 0x8*6(%rsi), %r11
24257 movq 0x8*7(%rsi), %r12
24258
24259 @@ -75,7 +78,7 @@ ENTRY(copy_page)
24260 movq %rdx, 0x8*2(%rdi)
24261 movq %r8, 0x8*3(%rdi)
24262 movq %r9, 0x8*4(%rdi)
24263 - movq %r10, 0x8*5(%rdi)
24264 + movq %r13, 0x8*5(%rdi)
24265 movq %r11, 0x8*6(%rdi)
24266 movq %r12, 0x8*7(%rdi)
24267
24268 @@ -87,8 +90,11 @@ ENTRY(copy_page)
24269 CFI_RESTORE rbx
24270 movq 1*8(%rsp), %r12
24271 CFI_RESTORE r12
24272 - addq $2*8, %rsp
24273 - CFI_ADJUST_CFA_OFFSET -2*8
24274 + movq 2*8(%rsp), %r13
24275 + CFI_RESTORE r13
24276 + addq $3*8, %rsp
24277 + CFI_ADJUST_CFA_OFFSET -3*8
24278 + pax_force_retaddr
24279 ret
24280 .Lcopy_page_end:
24281 CFI_ENDPROC
24282 @@ -99,7 +105,7 @@ ENDPROC(copy_page)
24283
24284 #include <asm/cpufeature.h>
24285
24286 - .section .altinstr_replacement,"ax"
24287 + .section .altinstr_replacement,"a"
24288 1: .byte 0xeb /* jmp <disp8> */
24289 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
24290 2:
24291 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
24292 index a30ca15..d25fab6 100644
24293 --- a/arch/x86/lib/copy_user_64.S
24294 +++ b/arch/x86/lib/copy_user_64.S
24295 @@ -18,6 +18,7 @@
24296 #include <asm/alternative-asm.h>
24297 #include <asm/asm.h>
24298 #include <asm/smap.h>
24299 +#include <asm/pgtable.h>
24300
24301 /*
24302 * By placing feature2 after feature1 in altinstructions section, we logically
24303 @@ -31,7 +32,7 @@
24304 .byte 0xe9 /* 32bit jump */
24305 .long \orig-1f /* by default jump to orig */
24306 1:
24307 - .section .altinstr_replacement,"ax"
24308 + .section .altinstr_replacement,"a"
24309 2: .byte 0xe9 /* near jump with 32bit immediate */
24310 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
24311 3: .byte 0xe9 /* near jump with 32bit immediate */
24312 @@ -70,47 +71,20 @@
24313 #endif
24314 .endm
24315
24316 -/* Standard copy_to_user with segment limit checking */
24317 -ENTRY(_copy_to_user)
24318 - CFI_STARTPROC
24319 - GET_THREAD_INFO(%rax)
24320 - movq %rdi,%rcx
24321 - addq %rdx,%rcx
24322 - jc bad_to_user
24323 - cmpq TI_addr_limit(%rax),%rcx
24324 - ja bad_to_user
24325 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
24326 - copy_user_generic_unrolled,copy_user_generic_string, \
24327 - copy_user_enhanced_fast_string
24328 - CFI_ENDPROC
24329 -ENDPROC(_copy_to_user)
24330 -
24331 -/* Standard copy_from_user with segment limit checking */
24332 -ENTRY(_copy_from_user)
24333 - CFI_STARTPROC
24334 - GET_THREAD_INFO(%rax)
24335 - movq %rsi,%rcx
24336 - addq %rdx,%rcx
24337 - jc bad_from_user
24338 - cmpq TI_addr_limit(%rax),%rcx
24339 - ja bad_from_user
24340 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
24341 - copy_user_generic_unrolled,copy_user_generic_string, \
24342 - copy_user_enhanced_fast_string
24343 - CFI_ENDPROC
24344 -ENDPROC(_copy_from_user)
24345 -
24346 .section .fixup,"ax"
24347 /* must zero dest */
24348 ENTRY(bad_from_user)
24349 bad_from_user:
24350 CFI_STARTPROC
24351 + testl %edx,%edx
24352 + js bad_to_user
24353 movl %edx,%ecx
24354 xorl %eax,%eax
24355 rep
24356 stosb
24357 bad_to_user:
24358 movl %edx,%eax
24359 + pax_force_retaddr
24360 ret
24361 CFI_ENDPROC
24362 ENDPROC(bad_from_user)
24363 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
24364 jz 17f
24365 1: movq (%rsi),%r8
24366 2: movq 1*8(%rsi),%r9
24367 -3: movq 2*8(%rsi),%r10
24368 +3: movq 2*8(%rsi),%rax
24369 4: movq 3*8(%rsi),%r11
24370 5: movq %r8,(%rdi)
24371 6: movq %r9,1*8(%rdi)
24372 -7: movq %r10,2*8(%rdi)
24373 +7: movq %rax,2*8(%rdi)
24374 8: movq %r11,3*8(%rdi)
24375 9: movq 4*8(%rsi),%r8
24376 10: movq 5*8(%rsi),%r9
24377 -11: movq 6*8(%rsi),%r10
24378 +11: movq 6*8(%rsi),%rax
24379 12: movq 7*8(%rsi),%r11
24380 13: movq %r8,4*8(%rdi)
24381 14: movq %r9,5*8(%rdi)
24382 -15: movq %r10,6*8(%rdi)
24383 +15: movq %rax,6*8(%rdi)
24384 16: movq %r11,7*8(%rdi)
24385 leaq 64(%rsi),%rsi
24386 leaq 64(%rdi),%rdi
24387 @@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
24388 jnz 21b
24389 23: xor %eax,%eax
24390 ASM_CLAC
24391 + pax_force_retaddr
24392 ret
24393
24394 .section .fixup,"ax"
24395 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
24396 movsb
24397 4: xorl %eax,%eax
24398 ASM_CLAC
24399 + pax_force_retaddr
24400 ret
24401
24402 .section .fixup,"ax"
24403 @@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
24404 movsb
24405 2: xorl %eax,%eax
24406 ASM_CLAC
24407 + pax_force_retaddr
24408 ret
24409
24410 .section .fixup,"ax"
24411 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
24412 index 6a4f43c..f5f9e26 100644
24413 --- a/arch/x86/lib/copy_user_nocache_64.S
24414 +++ b/arch/x86/lib/copy_user_nocache_64.S
24415 @@ -8,6 +8,7 @@
24416
24417 #include <linux/linkage.h>
24418 #include <asm/dwarf2.h>
24419 +#include <asm/alternative-asm.h>
24420
24421 #define FIX_ALIGNMENT 1
24422
24423 @@ -16,6 +17,7 @@
24424 #include <asm/thread_info.h>
24425 #include <asm/asm.h>
24426 #include <asm/smap.h>
24427 +#include <asm/pgtable.h>
24428
24429 .macro ALIGN_DESTINATION
24430 #ifdef FIX_ALIGNMENT
24431 @@ -49,6 +51,15 @@
24432 */
24433 ENTRY(__copy_user_nocache)
24434 CFI_STARTPROC
24435 +
24436 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24437 + mov $PAX_USER_SHADOW_BASE,%rcx
24438 + cmp %rcx,%rsi
24439 + jae 1f
24440 + add %rcx,%rsi
24441 +1:
24442 +#endif
24443 +
24444 ASM_STAC
24445 cmpl $8,%edx
24446 jb 20f /* less then 8 bytes, go to byte copy loop */
24447 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
24448 jz 17f
24449 1: movq (%rsi),%r8
24450 2: movq 1*8(%rsi),%r9
24451 -3: movq 2*8(%rsi),%r10
24452 +3: movq 2*8(%rsi),%rax
24453 4: movq 3*8(%rsi),%r11
24454 5: movnti %r8,(%rdi)
24455 6: movnti %r9,1*8(%rdi)
24456 -7: movnti %r10,2*8(%rdi)
24457 +7: movnti %rax,2*8(%rdi)
24458 8: movnti %r11,3*8(%rdi)
24459 9: movq 4*8(%rsi),%r8
24460 10: movq 5*8(%rsi),%r9
24461 -11: movq 6*8(%rsi),%r10
24462 +11: movq 6*8(%rsi),%rax
24463 12: movq 7*8(%rsi),%r11
24464 13: movnti %r8,4*8(%rdi)
24465 14: movnti %r9,5*8(%rdi)
24466 -15: movnti %r10,6*8(%rdi)
24467 +15: movnti %rax,6*8(%rdi)
24468 16: movnti %r11,7*8(%rdi)
24469 leaq 64(%rsi),%rsi
24470 leaq 64(%rdi),%rdi
24471 @@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
24472 23: xorl %eax,%eax
24473 ASM_CLAC
24474 sfence
24475 + pax_force_retaddr
24476 ret
24477
24478 .section .fixup,"ax"
24479 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
24480 index 2419d5f..953ee51 100644
24481 --- a/arch/x86/lib/csum-copy_64.S
24482 +++ b/arch/x86/lib/csum-copy_64.S
24483 @@ -9,6 +9,7 @@
24484 #include <asm/dwarf2.h>
24485 #include <asm/errno.h>
24486 #include <asm/asm.h>
24487 +#include <asm/alternative-asm.h>
24488
24489 /*
24490 * Checksum copy with exception handling.
24491 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
24492 CFI_RESTORE rbp
24493 addq $7*8, %rsp
24494 CFI_ADJUST_CFA_OFFSET -7*8
24495 + pax_force_retaddr 0, 1
24496 ret
24497 CFI_RESTORE_STATE
24498
24499 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
24500 index 25b7ae8..169fafc 100644
24501 --- a/arch/x86/lib/csum-wrappers_64.c
24502 +++ b/arch/x86/lib/csum-wrappers_64.c
24503 @@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
24504 len -= 2;
24505 }
24506 }
24507 - isum = csum_partial_copy_generic((__force const void *)src,
24508 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
24509 dst, len, isum, errp, NULL);
24510 if (unlikely(*errp))
24511 goto out_err;
24512 @@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
24513 }
24514
24515 *errp = 0;
24516 - return csum_partial_copy_generic(src, (void __force *)dst,
24517 + return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
24518 len, isum, NULL, errp);
24519 }
24520 EXPORT_SYMBOL(csum_partial_copy_to_user);
24521 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
24522 index 156b9c8..b144132 100644
24523 --- a/arch/x86/lib/getuser.S
24524 +++ b/arch/x86/lib/getuser.S
24525 @@ -34,17 +34,40 @@
24526 #include <asm/thread_info.h>
24527 #include <asm/asm.h>
24528 #include <asm/smap.h>
24529 +#include <asm/segment.h>
24530 +#include <asm/pgtable.h>
24531 +#include <asm/alternative-asm.h>
24532 +
24533 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24534 +#define __copyuser_seg gs;
24535 +#else
24536 +#define __copyuser_seg
24537 +#endif
24538
24539 .text
24540 ENTRY(__get_user_1)
24541 CFI_STARTPROC
24542 +
24543 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24544 GET_THREAD_INFO(%_ASM_DX)
24545 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24546 jae bad_get_user
24547 ASM_STAC
24548 -1: movzb (%_ASM_AX),%edx
24549 +
24550 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24551 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24552 + cmp %_ASM_DX,%_ASM_AX
24553 + jae 1234f
24554 + add %_ASM_DX,%_ASM_AX
24555 +1234:
24556 +#endif
24557 +
24558 +#endif
24559 +
24560 +1: __copyuser_seg movzb (%_ASM_AX),%edx
24561 xor %eax,%eax
24562 ASM_CLAC
24563 + pax_force_retaddr
24564 ret
24565 CFI_ENDPROC
24566 ENDPROC(__get_user_1)
24567 @@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
24568 ENTRY(__get_user_2)
24569 CFI_STARTPROC
24570 add $1,%_ASM_AX
24571 +
24572 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24573 jc bad_get_user
24574 GET_THREAD_INFO(%_ASM_DX)
24575 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24576 jae bad_get_user
24577 ASM_STAC
24578 -2: movzwl -1(%_ASM_AX),%edx
24579 +
24580 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24581 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24582 + cmp %_ASM_DX,%_ASM_AX
24583 + jae 1234f
24584 + add %_ASM_DX,%_ASM_AX
24585 +1234:
24586 +#endif
24587 +
24588 +#endif
24589 +
24590 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
24591 xor %eax,%eax
24592 ASM_CLAC
24593 + pax_force_retaddr
24594 ret
24595 CFI_ENDPROC
24596 ENDPROC(__get_user_2)
24597 @@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
24598 ENTRY(__get_user_4)
24599 CFI_STARTPROC
24600 add $3,%_ASM_AX
24601 +
24602 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24603 jc bad_get_user
24604 GET_THREAD_INFO(%_ASM_DX)
24605 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24606 jae bad_get_user
24607 ASM_STAC
24608 -3: mov -3(%_ASM_AX),%edx
24609 +
24610 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24611 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24612 + cmp %_ASM_DX,%_ASM_AX
24613 + jae 1234f
24614 + add %_ASM_DX,%_ASM_AX
24615 +1234:
24616 +#endif
24617 +
24618 +#endif
24619 +
24620 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
24621 xor %eax,%eax
24622 ASM_CLAC
24623 + pax_force_retaddr
24624 ret
24625 CFI_ENDPROC
24626 ENDPROC(__get_user_4)
24627 @@ -87,10 +138,20 @@ ENTRY(__get_user_8)
24628 GET_THREAD_INFO(%_ASM_DX)
24629 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24630 jae bad_get_user
24631 +
24632 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24633 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24634 + cmp %_ASM_DX,%_ASM_AX
24635 + jae 1234f
24636 + add %_ASM_DX,%_ASM_AX
24637 +1234:
24638 +#endif
24639 +
24640 ASM_STAC
24641 4: movq -7(%_ASM_AX),%_ASM_DX
24642 xor %eax,%eax
24643 ASM_CLAC
24644 + pax_force_retaddr
24645 ret
24646 CFI_ENDPROC
24647 ENDPROC(__get_user_8)
24648 @@ -101,6 +162,7 @@ bad_get_user:
24649 xor %edx,%edx
24650 mov $(-EFAULT),%_ASM_AX
24651 ASM_CLAC
24652 + pax_force_retaddr
24653 ret
24654 CFI_ENDPROC
24655 END(bad_get_user)
24656 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
24657 index 54fcffe..7be149e 100644
24658 --- a/arch/x86/lib/insn.c
24659 +++ b/arch/x86/lib/insn.c
24660 @@ -20,8 +20,10 @@
24661
24662 #ifdef __KERNEL__
24663 #include <linux/string.h>
24664 +#include <asm/pgtable_types.h>
24665 #else
24666 #include <string.h>
24667 +#define ktla_ktva(addr) addr
24668 #endif
24669 #include <asm/inat.h>
24670 #include <asm/insn.h>
24671 @@ -53,8 +55,8 @@
24672 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
24673 {
24674 memset(insn, 0, sizeof(*insn));
24675 - insn->kaddr = kaddr;
24676 - insn->next_byte = kaddr;
24677 + insn->kaddr = ktla_ktva(kaddr);
24678 + insn->next_byte = ktla_ktva(kaddr);
24679 insn->x86_64 = x86_64 ? 1 : 0;
24680 insn->opnd_bytes = 4;
24681 if (x86_64)
24682 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
24683 index 05a95e7..326f2fa 100644
24684 --- a/arch/x86/lib/iomap_copy_64.S
24685 +++ b/arch/x86/lib/iomap_copy_64.S
24686 @@ -17,6 +17,7 @@
24687
24688 #include <linux/linkage.h>
24689 #include <asm/dwarf2.h>
24690 +#include <asm/alternative-asm.h>
24691
24692 /*
24693 * override generic version in lib/iomap_copy.c
24694 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
24695 CFI_STARTPROC
24696 movl %edx,%ecx
24697 rep movsd
24698 + pax_force_retaddr
24699 ret
24700 CFI_ENDPROC
24701 ENDPROC(__iowrite32_copy)
24702 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
24703 index 1c273be..da9cc0e 100644
24704 --- a/arch/x86/lib/memcpy_64.S
24705 +++ b/arch/x86/lib/memcpy_64.S
24706 @@ -33,6 +33,7 @@
24707 rep movsq
24708 movl %edx, %ecx
24709 rep movsb
24710 + pax_force_retaddr
24711 ret
24712 .Lmemcpy_e:
24713 .previous
24714 @@ -49,6 +50,7 @@
24715 movq %rdi, %rax
24716 movq %rdx, %rcx
24717 rep movsb
24718 + pax_force_retaddr
24719 ret
24720 .Lmemcpy_e_e:
24721 .previous
24722 @@ -76,13 +78,13 @@ ENTRY(memcpy)
24723 */
24724 movq 0*8(%rsi), %r8
24725 movq 1*8(%rsi), %r9
24726 - movq 2*8(%rsi), %r10
24727 + movq 2*8(%rsi), %rcx
24728 movq 3*8(%rsi), %r11
24729 leaq 4*8(%rsi), %rsi
24730
24731 movq %r8, 0*8(%rdi)
24732 movq %r9, 1*8(%rdi)
24733 - movq %r10, 2*8(%rdi)
24734 + movq %rcx, 2*8(%rdi)
24735 movq %r11, 3*8(%rdi)
24736 leaq 4*8(%rdi), %rdi
24737 jae .Lcopy_forward_loop
24738 @@ -105,12 +107,12 @@ ENTRY(memcpy)
24739 subq $0x20, %rdx
24740 movq -1*8(%rsi), %r8
24741 movq -2*8(%rsi), %r9
24742 - movq -3*8(%rsi), %r10
24743 + movq -3*8(%rsi), %rcx
24744 movq -4*8(%rsi), %r11
24745 leaq -4*8(%rsi), %rsi
24746 movq %r8, -1*8(%rdi)
24747 movq %r9, -2*8(%rdi)
24748 - movq %r10, -3*8(%rdi)
24749 + movq %rcx, -3*8(%rdi)
24750 movq %r11, -4*8(%rdi)
24751 leaq -4*8(%rdi), %rdi
24752 jae .Lcopy_backward_loop
24753 @@ -130,12 +132,13 @@ ENTRY(memcpy)
24754 */
24755 movq 0*8(%rsi), %r8
24756 movq 1*8(%rsi), %r9
24757 - movq -2*8(%rsi, %rdx), %r10
24758 + movq -2*8(%rsi, %rdx), %rcx
24759 movq -1*8(%rsi, %rdx), %r11
24760 movq %r8, 0*8(%rdi)
24761 movq %r9, 1*8(%rdi)
24762 - movq %r10, -2*8(%rdi, %rdx)
24763 + movq %rcx, -2*8(%rdi, %rdx)
24764 movq %r11, -1*8(%rdi, %rdx)
24765 + pax_force_retaddr
24766 retq
24767 .p2align 4
24768 .Lless_16bytes:
24769 @@ -148,6 +151,7 @@ ENTRY(memcpy)
24770 movq -1*8(%rsi, %rdx), %r9
24771 movq %r8, 0*8(%rdi)
24772 movq %r9, -1*8(%rdi, %rdx)
24773 + pax_force_retaddr
24774 retq
24775 .p2align 4
24776 .Lless_8bytes:
24777 @@ -161,6 +165,7 @@ ENTRY(memcpy)
24778 movl -4(%rsi, %rdx), %r8d
24779 movl %ecx, (%rdi)
24780 movl %r8d, -4(%rdi, %rdx)
24781 + pax_force_retaddr
24782 retq
24783 .p2align 4
24784 .Lless_3bytes:
24785 @@ -179,6 +184,7 @@ ENTRY(memcpy)
24786 movb %cl, (%rdi)
24787
24788 .Lend:
24789 + pax_force_retaddr
24790 retq
24791 CFI_ENDPROC
24792 ENDPROC(memcpy)
24793 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
24794 index ee16461..c39c199 100644
24795 --- a/arch/x86/lib/memmove_64.S
24796 +++ b/arch/x86/lib/memmove_64.S
24797 @@ -61,13 +61,13 @@ ENTRY(memmove)
24798 5:
24799 sub $0x20, %rdx
24800 movq 0*8(%rsi), %r11
24801 - movq 1*8(%rsi), %r10
24802 + movq 1*8(%rsi), %rcx
24803 movq 2*8(%rsi), %r9
24804 movq 3*8(%rsi), %r8
24805 leaq 4*8(%rsi), %rsi
24806
24807 movq %r11, 0*8(%rdi)
24808 - movq %r10, 1*8(%rdi)
24809 + movq %rcx, 1*8(%rdi)
24810 movq %r9, 2*8(%rdi)
24811 movq %r8, 3*8(%rdi)
24812 leaq 4*8(%rdi), %rdi
24813 @@ -81,10 +81,10 @@ ENTRY(memmove)
24814 4:
24815 movq %rdx, %rcx
24816 movq -8(%rsi, %rdx), %r11
24817 - lea -8(%rdi, %rdx), %r10
24818 + lea -8(%rdi, %rdx), %r9
24819 shrq $3, %rcx
24820 rep movsq
24821 - movq %r11, (%r10)
24822 + movq %r11, (%r9)
24823 jmp 13f
24824 .Lmemmove_end_forward:
24825
24826 @@ -95,14 +95,14 @@ ENTRY(memmove)
24827 7:
24828 movq %rdx, %rcx
24829 movq (%rsi), %r11
24830 - movq %rdi, %r10
24831 + movq %rdi, %r9
24832 leaq -8(%rsi, %rdx), %rsi
24833 leaq -8(%rdi, %rdx), %rdi
24834 shrq $3, %rcx
24835 std
24836 rep movsq
24837 cld
24838 - movq %r11, (%r10)
24839 + movq %r11, (%r9)
24840 jmp 13f
24841
24842 /*
24843 @@ -127,13 +127,13 @@ ENTRY(memmove)
24844 8:
24845 subq $0x20, %rdx
24846 movq -1*8(%rsi), %r11
24847 - movq -2*8(%rsi), %r10
24848 + movq -2*8(%rsi), %rcx
24849 movq -3*8(%rsi), %r9
24850 movq -4*8(%rsi), %r8
24851 leaq -4*8(%rsi), %rsi
24852
24853 movq %r11, -1*8(%rdi)
24854 - movq %r10, -2*8(%rdi)
24855 + movq %rcx, -2*8(%rdi)
24856 movq %r9, -3*8(%rdi)
24857 movq %r8, -4*8(%rdi)
24858 leaq -4*8(%rdi), %rdi
24859 @@ -151,11 +151,11 @@ ENTRY(memmove)
24860 * Move data from 16 bytes to 31 bytes.
24861 */
24862 movq 0*8(%rsi), %r11
24863 - movq 1*8(%rsi), %r10
24864 + movq 1*8(%rsi), %rcx
24865 movq -2*8(%rsi, %rdx), %r9
24866 movq -1*8(%rsi, %rdx), %r8
24867 movq %r11, 0*8(%rdi)
24868 - movq %r10, 1*8(%rdi)
24869 + movq %rcx, 1*8(%rdi)
24870 movq %r9, -2*8(%rdi, %rdx)
24871 movq %r8, -1*8(%rdi, %rdx)
24872 jmp 13f
24873 @@ -167,9 +167,9 @@ ENTRY(memmove)
24874 * Move data from 8 bytes to 15 bytes.
24875 */
24876 movq 0*8(%rsi), %r11
24877 - movq -1*8(%rsi, %rdx), %r10
24878 + movq -1*8(%rsi, %rdx), %r9
24879 movq %r11, 0*8(%rdi)
24880 - movq %r10, -1*8(%rdi, %rdx)
24881 + movq %r9, -1*8(%rdi, %rdx)
24882 jmp 13f
24883 10:
24884 cmpq $4, %rdx
24885 @@ -178,9 +178,9 @@ ENTRY(memmove)
24886 * Move data from 4 bytes to 7 bytes.
24887 */
24888 movl (%rsi), %r11d
24889 - movl -4(%rsi, %rdx), %r10d
24890 + movl -4(%rsi, %rdx), %r9d
24891 movl %r11d, (%rdi)
24892 - movl %r10d, -4(%rdi, %rdx)
24893 + movl %r9d, -4(%rdi, %rdx)
24894 jmp 13f
24895 11:
24896 cmp $2, %rdx
24897 @@ -189,9 +189,9 @@ ENTRY(memmove)
24898 * Move data from 2 bytes to 3 bytes.
24899 */
24900 movw (%rsi), %r11w
24901 - movw -2(%rsi, %rdx), %r10w
24902 + movw -2(%rsi, %rdx), %r9w
24903 movw %r11w, (%rdi)
24904 - movw %r10w, -2(%rdi, %rdx)
24905 + movw %r9w, -2(%rdi, %rdx)
24906 jmp 13f
24907 12:
24908 cmp $1, %rdx
24909 @@ -202,6 +202,7 @@ ENTRY(memmove)
24910 movb (%rsi), %r11b
24911 movb %r11b, (%rdi)
24912 13:
24913 + pax_force_retaddr
24914 retq
24915 CFI_ENDPROC
24916
24917 @@ -210,6 +211,7 @@ ENTRY(memmove)
24918 /* Forward moving data. */
24919 movq %rdx, %rcx
24920 rep movsb
24921 + pax_force_retaddr
24922 retq
24923 .Lmemmove_end_forward_efs:
24924 .previous
24925 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
24926 index 2dcb380..963660a 100644
24927 --- a/arch/x86/lib/memset_64.S
24928 +++ b/arch/x86/lib/memset_64.S
24929 @@ -30,6 +30,7 @@
24930 movl %edx,%ecx
24931 rep stosb
24932 movq %r9,%rax
24933 + pax_force_retaddr
24934 ret
24935 .Lmemset_e:
24936 .previous
24937 @@ -52,6 +53,7 @@
24938 movq %rdx,%rcx
24939 rep stosb
24940 movq %r9,%rax
24941 + pax_force_retaddr
24942 ret
24943 .Lmemset_e_e:
24944 .previous
24945 @@ -59,7 +61,7 @@
24946 ENTRY(memset)
24947 ENTRY(__memset)
24948 CFI_STARTPROC
24949 - movq %rdi,%r10
24950 + movq %rdi,%r11
24951
24952 /* expand byte value */
24953 movzbl %sil,%ecx
24954 @@ -117,7 +119,8 @@ ENTRY(__memset)
24955 jnz .Lloop_1
24956
24957 .Lende:
24958 - movq %r10,%rax
24959 + movq %r11,%rax
24960 + pax_force_retaddr
24961 ret
24962
24963 CFI_RESTORE_STATE
24964 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
24965 index c9f2d9b..e7fd2c0 100644
24966 --- a/arch/x86/lib/mmx_32.c
24967 +++ b/arch/x86/lib/mmx_32.c
24968 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
24969 {
24970 void *p;
24971 int i;
24972 + unsigned long cr0;
24973
24974 if (unlikely(in_interrupt()))
24975 return __memcpy(to, from, len);
24976 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
24977 kernel_fpu_begin();
24978
24979 __asm__ __volatile__ (
24980 - "1: prefetch (%0)\n" /* This set is 28 bytes */
24981 - " prefetch 64(%0)\n"
24982 - " prefetch 128(%0)\n"
24983 - " prefetch 192(%0)\n"
24984 - " prefetch 256(%0)\n"
24985 + "1: prefetch (%1)\n" /* This set is 28 bytes */
24986 + " prefetch 64(%1)\n"
24987 + " prefetch 128(%1)\n"
24988 + " prefetch 192(%1)\n"
24989 + " prefetch 256(%1)\n"
24990 "2: \n"
24991 ".section .fixup, \"ax\"\n"
24992 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24993 + "3: \n"
24994 +
24995 +#ifdef CONFIG_PAX_KERNEXEC
24996 + " movl %%cr0, %0\n"
24997 + " movl %0, %%eax\n"
24998 + " andl $0xFFFEFFFF, %%eax\n"
24999 + " movl %%eax, %%cr0\n"
25000 +#endif
25001 +
25002 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25003 +
25004 +#ifdef CONFIG_PAX_KERNEXEC
25005 + " movl %0, %%cr0\n"
25006 +#endif
25007 +
25008 " jmp 2b\n"
25009 ".previous\n"
25010 _ASM_EXTABLE(1b, 3b)
25011 - : : "r" (from));
25012 + : "=&r" (cr0) : "r" (from) : "ax");
25013
25014 for ( ; i > 5; i--) {
25015 __asm__ __volatile__ (
25016 - "1: prefetch 320(%0)\n"
25017 - "2: movq (%0), %%mm0\n"
25018 - " movq 8(%0), %%mm1\n"
25019 - " movq 16(%0), %%mm2\n"
25020 - " movq 24(%0), %%mm3\n"
25021 - " movq %%mm0, (%1)\n"
25022 - " movq %%mm1, 8(%1)\n"
25023 - " movq %%mm2, 16(%1)\n"
25024 - " movq %%mm3, 24(%1)\n"
25025 - " movq 32(%0), %%mm0\n"
25026 - " movq 40(%0), %%mm1\n"
25027 - " movq 48(%0), %%mm2\n"
25028 - " movq 56(%0), %%mm3\n"
25029 - " movq %%mm0, 32(%1)\n"
25030 - " movq %%mm1, 40(%1)\n"
25031 - " movq %%mm2, 48(%1)\n"
25032 - " movq %%mm3, 56(%1)\n"
25033 + "1: prefetch 320(%1)\n"
25034 + "2: movq (%1), %%mm0\n"
25035 + " movq 8(%1), %%mm1\n"
25036 + " movq 16(%1), %%mm2\n"
25037 + " movq 24(%1), %%mm3\n"
25038 + " movq %%mm0, (%2)\n"
25039 + " movq %%mm1, 8(%2)\n"
25040 + " movq %%mm2, 16(%2)\n"
25041 + " movq %%mm3, 24(%2)\n"
25042 + " movq 32(%1), %%mm0\n"
25043 + " movq 40(%1), %%mm1\n"
25044 + " movq 48(%1), %%mm2\n"
25045 + " movq 56(%1), %%mm3\n"
25046 + " movq %%mm0, 32(%2)\n"
25047 + " movq %%mm1, 40(%2)\n"
25048 + " movq %%mm2, 48(%2)\n"
25049 + " movq %%mm3, 56(%2)\n"
25050 ".section .fixup, \"ax\"\n"
25051 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25052 + "3:\n"
25053 +
25054 +#ifdef CONFIG_PAX_KERNEXEC
25055 + " movl %%cr0, %0\n"
25056 + " movl %0, %%eax\n"
25057 + " andl $0xFFFEFFFF, %%eax\n"
25058 + " movl %%eax, %%cr0\n"
25059 +#endif
25060 +
25061 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25062 +
25063 +#ifdef CONFIG_PAX_KERNEXEC
25064 + " movl %0, %%cr0\n"
25065 +#endif
25066 +
25067 " jmp 2b\n"
25068 ".previous\n"
25069 _ASM_EXTABLE(1b, 3b)
25070 - : : "r" (from), "r" (to) : "memory");
25071 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25072
25073 from += 64;
25074 to += 64;
25075 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25076 static void fast_copy_page(void *to, void *from)
25077 {
25078 int i;
25079 + unsigned long cr0;
25080
25081 kernel_fpu_begin();
25082
25083 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25084 * but that is for later. -AV
25085 */
25086 __asm__ __volatile__(
25087 - "1: prefetch (%0)\n"
25088 - " prefetch 64(%0)\n"
25089 - " prefetch 128(%0)\n"
25090 - " prefetch 192(%0)\n"
25091 - " prefetch 256(%0)\n"
25092 + "1: prefetch (%1)\n"
25093 + " prefetch 64(%1)\n"
25094 + " prefetch 128(%1)\n"
25095 + " prefetch 192(%1)\n"
25096 + " prefetch 256(%1)\n"
25097 "2: \n"
25098 ".section .fixup, \"ax\"\n"
25099 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25100 + "3: \n"
25101 +
25102 +#ifdef CONFIG_PAX_KERNEXEC
25103 + " movl %%cr0, %0\n"
25104 + " movl %0, %%eax\n"
25105 + " andl $0xFFFEFFFF, %%eax\n"
25106 + " movl %%eax, %%cr0\n"
25107 +#endif
25108 +
25109 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25110 +
25111 +#ifdef CONFIG_PAX_KERNEXEC
25112 + " movl %0, %%cr0\n"
25113 +#endif
25114 +
25115 " jmp 2b\n"
25116 ".previous\n"
25117 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
25118 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25119
25120 for (i = 0; i < (4096-320)/64; i++) {
25121 __asm__ __volatile__ (
25122 - "1: prefetch 320(%0)\n"
25123 - "2: movq (%0), %%mm0\n"
25124 - " movntq %%mm0, (%1)\n"
25125 - " movq 8(%0), %%mm1\n"
25126 - " movntq %%mm1, 8(%1)\n"
25127 - " movq 16(%0), %%mm2\n"
25128 - " movntq %%mm2, 16(%1)\n"
25129 - " movq 24(%0), %%mm3\n"
25130 - " movntq %%mm3, 24(%1)\n"
25131 - " movq 32(%0), %%mm4\n"
25132 - " movntq %%mm4, 32(%1)\n"
25133 - " movq 40(%0), %%mm5\n"
25134 - " movntq %%mm5, 40(%1)\n"
25135 - " movq 48(%0), %%mm6\n"
25136 - " movntq %%mm6, 48(%1)\n"
25137 - " movq 56(%0), %%mm7\n"
25138 - " movntq %%mm7, 56(%1)\n"
25139 + "1: prefetch 320(%1)\n"
25140 + "2: movq (%1), %%mm0\n"
25141 + " movntq %%mm0, (%2)\n"
25142 + " movq 8(%1), %%mm1\n"
25143 + " movntq %%mm1, 8(%2)\n"
25144 + " movq 16(%1), %%mm2\n"
25145 + " movntq %%mm2, 16(%2)\n"
25146 + " movq 24(%1), %%mm3\n"
25147 + " movntq %%mm3, 24(%2)\n"
25148 + " movq 32(%1), %%mm4\n"
25149 + " movntq %%mm4, 32(%2)\n"
25150 + " movq 40(%1), %%mm5\n"
25151 + " movntq %%mm5, 40(%2)\n"
25152 + " movq 48(%1), %%mm6\n"
25153 + " movntq %%mm6, 48(%2)\n"
25154 + " movq 56(%1), %%mm7\n"
25155 + " movntq %%mm7, 56(%2)\n"
25156 ".section .fixup, \"ax\"\n"
25157 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25158 + "3:\n"
25159 +
25160 +#ifdef CONFIG_PAX_KERNEXEC
25161 + " movl %%cr0, %0\n"
25162 + " movl %0, %%eax\n"
25163 + " andl $0xFFFEFFFF, %%eax\n"
25164 + " movl %%eax, %%cr0\n"
25165 +#endif
25166 +
25167 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25168 +
25169 +#ifdef CONFIG_PAX_KERNEXEC
25170 + " movl %0, %%cr0\n"
25171 +#endif
25172 +
25173 " jmp 2b\n"
25174 ".previous\n"
25175 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
25176 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25177
25178 from += 64;
25179 to += 64;
25180 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
25181 static void fast_copy_page(void *to, void *from)
25182 {
25183 int i;
25184 + unsigned long cr0;
25185
25186 kernel_fpu_begin();
25187
25188 __asm__ __volatile__ (
25189 - "1: prefetch (%0)\n"
25190 - " prefetch 64(%0)\n"
25191 - " prefetch 128(%0)\n"
25192 - " prefetch 192(%0)\n"
25193 - " prefetch 256(%0)\n"
25194 + "1: prefetch (%1)\n"
25195 + " prefetch 64(%1)\n"
25196 + " prefetch 128(%1)\n"
25197 + " prefetch 192(%1)\n"
25198 + " prefetch 256(%1)\n"
25199 "2: \n"
25200 ".section .fixup, \"ax\"\n"
25201 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25202 + "3: \n"
25203 +
25204 +#ifdef CONFIG_PAX_KERNEXEC
25205 + " movl %%cr0, %0\n"
25206 + " movl %0, %%eax\n"
25207 + " andl $0xFFFEFFFF, %%eax\n"
25208 + " movl %%eax, %%cr0\n"
25209 +#endif
25210 +
25211 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25212 +
25213 +#ifdef CONFIG_PAX_KERNEXEC
25214 + " movl %0, %%cr0\n"
25215 +#endif
25216 +
25217 " jmp 2b\n"
25218 ".previous\n"
25219 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
25220 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25221
25222 for (i = 0; i < 4096/64; i++) {
25223 __asm__ __volatile__ (
25224 - "1: prefetch 320(%0)\n"
25225 - "2: movq (%0), %%mm0\n"
25226 - " movq 8(%0), %%mm1\n"
25227 - " movq 16(%0), %%mm2\n"
25228 - " movq 24(%0), %%mm3\n"
25229 - " movq %%mm0, (%1)\n"
25230 - " movq %%mm1, 8(%1)\n"
25231 - " movq %%mm2, 16(%1)\n"
25232 - " movq %%mm3, 24(%1)\n"
25233 - " movq 32(%0), %%mm0\n"
25234 - " movq 40(%0), %%mm1\n"
25235 - " movq 48(%0), %%mm2\n"
25236 - " movq 56(%0), %%mm3\n"
25237 - " movq %%mm0, 32(%1)\n"
25238 - " movq %%mm1, 40(%1)\n"
25239 - " movq %%mm2, 48(%1)\n"
25240 - " movq %%mm3, 56(%1)\n"
25241 + "1: prefetch 320(%1)\n"
25242 + "2: movq (%1), %%mm0\n"
25243 + " movq 8(%1), %%mm1\n"
25244 + " movq 16(%1), %%mm2\n"
25245 + " movq 24(%1), %%mm3\n"
25246 + " movq %%mm0, (%2)\n"
25247 + " movq %%mm1, 8(%2)\n"
25248 + " movq %%mm2, 16(%2)\n"
25249 + " movq %%mm3, 24(%2)\n"
25250 + " movq 32(%1), %%mm0\n"
25251 + " movq 40(%1), %%mm1\n"
25252 + " movq 48(%1), %%mm2\n"
25253 + " movq 56(%1), %%mm3\n"
25254 + " movq %%mm0, 32(%2)\n"
25255 + " movq %%mm1, 40(%2)\n"
25256 + " movq %%mm2, 48(%2)\n"
25257 + " movq %%mm3, 56(%2)\n"
25258 ".section .fixup, \"ax\"\n"
25259 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25260 + "3:\n"
25261 +
25262 +#ifdef CONFIG_PAX_KERNEXEC
25263 + " movl %%cr0, %0\n"
25264 + " movl %0, %%eax\n"
25265 + " andl $0xFFFEFFFF, %%eax\n"
25266 + " movl %%eax, %%cr0\n"
25267 +#endif
25268 +
25269 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25270 +
25271 +#ifdef CONFIG_PAX_KERNEXEC
25272 + " movl %0, %%cr0\n"
25273 +#endif
25274 +
25275 " jmp 2b\n"
25276 ".previous\n"
25277 _ASM_EXTABLE(1b, 3b)
25278 - : : "r" (from), "r" (to) : "memory");
25279 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25280
25281 from += 64;
25282 to += 64;
25283 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
25284 index f6d13ee..aca5f0b 100644
25285 --- a/arch/x86/lib/msr-reg.S
25286 +++ b/arch/x86/lib/msr-reg.S
25287 @@ -3,6 +3,7 @@
25288 #include <asm/dwarf2.h>
25289 #include <asm/asm.h>
25290 #include <asm/msr.h>
25291 +#include <asm/alternative-asm.h>
25292
25293 #ifdef CONFIG_X86_64
25294 /*
25295 @@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
25296 CFI_STARTPROC
25297 pushq_cfi %rbx
25298 pushq_cfi %rbp
25299 - movq %rdi, %r10 /* Save pointer */
25300 + movq %rdi, %r9 /* Save pointer */
25301 xorl %r11d, %r11d /* Return value */
25302 movl (%rdi), %eax
25303 movl 4(%rdi), %ecx
25304 @@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
25305 movl 28(%rdi), %edi
25306 CFI_REMEMBER_STATE
25307 1: \op
25308 -2: movl %eax, (%r10)
25309 +2: movl %eax, (%r9)
25310 movl %r11d, %eax /* Return value */
25311 - movl %ecx, 4(%r10)
25312 - movl %edx, 8(%r10)
25313 - movl %ebx, 12(%r10)
25314 - movl %ebp, 20(%r10)
25315 - movl %esi, 24(%r10)
25316 - movl %edi, 28(%r10)
25317 + movl %ecx, 4(%r9)
25318 + movl %edx, 8(%r9)
25319 + movl %ebx, 12(%r9)
25320 + movl %ebp, 20(%r9)
25321 + movl %esi, 24(%r9)
25322 + movl %edi, 28(%r9)
25323 popq_cfi %rbp
25324 popq_cfi %rbx
25325 + pax_force_retaddr
25326 ret
25327 3:
25328 CFI_RESTORE_STATE
25329 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
25330 index fc6ba17..04471c5 100644
25331 --- a/arch/x86/lib/putuser.S
25332 +++ b/arch/x86/lib/putuser.S
25333 @@ -16,7 +16,9 @@
25334 #include <asm/errno.h>
25335 #include <asm/asm.h>
25336 #include <asm/smap.h>
25337 -
25338 +#include <asm/segment.h>
25339 +#include <asm/pgtable.h>
25340 +#include <asm/alternative-asm.h>
25341
25342 /*
25343 * __put_user_X
25344 @@ -30,57 +32,125 @@
25345 * as they get called from within inline assembly.
25346 */
25347
25348 -#define ENTER CFI_STARTPROC ; \
25349 - GET_THREAD_INFO(%_ASM_BX)
25350 -#define EXIT ASM_CLAC ; \
25351 - ret ; \
25352 +#define ENTER CFI_STARTPROC
25353 +#define EXIT ASM_CLAC ; \
25354 + pax_force_retaddr ; \
25355 + ret ; \
25356 CFI_ENDPROC
25357
25358 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25359 +#define _DEST %_ASM_CX,%_ASM_BX
25360 +#else
25361 +#define _DEST %_ASM_CX
25362 +#endif
25363 +
25364 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25365 +#define __copyuser_seg gs;
25366 +#else
25367 +#define __copyuser_seg
25368 +#endif
25369 +
25370 .text
25371 ENTRY(__put_user_1)
25372 ENTER
25373 +
25374 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25375 + GET_THREAD_INFO(%_ASM_BX)
25376 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
25377 jae bad_put_user
25378 ASM_STAC
25379 -1: movb %al,(%_ASM_CX)
25380 +
25381 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25382 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25383 + cmp %_ASM_BX,%_ASM_CX
25384 + jb 1234f
25385 + xor %ebx,%ebx
25386 +1234:
25387 +#endif
25388 +
25389 +#endif
25390 +
25391 +1: __copyuser_seg movb %al,(_DEST)
25392 xor %eax,%eax
25393 EXIT
25394 ENDPROC(__put_user_1)
25395
25396 ENTRY(__put_user_2)
25397 ENTER
25398 +
25399 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25400 + GET_THREAD_INFO(%_ASM_BX)
25401 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25402 sub $1,%_ASM_BX
25403 cmp %_ASM_BX,%_ASM_CX
25404 jae bad_put_user
25405 ASM_STAC
25406 -2: movw %ax,(%_ASM_CX)
25407 +
25408 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25409 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25410 + cmp %_ASM_BX,%_ASM_CX
25411 + jb 1234f
25412 + xor %ebx,%ebx
25413 +1234:
25414 +#endif
25415 +
25416 +#endif
25417 +
25418 +2: __copyuser_seg movw %ax,(_DEST)
25419 xor %eax,%eax
25420 EXIT
25421 ENDPROC(__put_user_2)
25422
25423 ENTRY(__put_user_4)
25424 ENTER
25425 +
25426 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25427 + GET_THREAD_INFO(%_ASM_BX)
25428 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25429 sub $3,%_ASM_BX
25430 cmp %_ASM_BX,%_ASM_CX
25431 jae bad_put_user
25432 ASM_STAC
25433 -3: movl %eax,(%_ASM_CX)
25434 +
25435 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25436 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25437 + cmp %_ASM_BX,%_ASM_CX
25438 + jb 1234f
25439 + xor %ebx,%ebx
25440 +1234:
25441 +#endif
25442 +
25443 +#endif
25444 +
25445 +3: __copyuser_seg movl %eax,(_DEST)
25446 xor %eax,%eax
25447 EXIT
25448 ENDPROC(__put_user_4)
25449
25450 ENTRY(__put_user_8)
25451 ENTER
25452 +
25453 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25454 + GET_THREAD_INFO(%_ASM_BX)
25455 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25456 sub $7,%_ASM_BX
25457 cmp %_ASM_BX,%_ASM_CX
25458 jae bad_put_user
25459 ASM_STAC
25460 -4: mov %_ASM_AX,(%_ASM_CX)
25461 +
25462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25463 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25464 + cmp %_ASM_BX,%_ASM_CX
25465 + jb 1234f
25466 + xor %ebx,%ebx
25467 +1234:
25468 +#endif
25469 +
25470 +#endif
25471 +
25472 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
25473 #ifdef CONFIG_X86_32
25474 -5: movl %edx,4(%_ASM_CX)
25475 +5: __copyuser_seg movl %edx,4(_DEST)
25476 #endif
25477 xor %eax,%eax
25478 EXIT
25479 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
25480 index 1cad221..de671ee 100644
25481 --- a/arch/x86/lib/rwlock.S
25482 +++ b/arch/x86/lib/rwlock.S
25483 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
25484 FRAME
25485 0: LOCK_PREFIX
25486 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
25487 +
25488 +#ifdef CONFIG_PAX_REFCOUNT
25489 + jno 1234f
25490 + LOCK_PREFIX
25491 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
25492 + int $4
25493 +1234:
25494 + _ASM_EXTABLE(1234b, 1234b)
25495 +#endif
25496 +
25497 1: rep; nop
25498 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
25499 jne 1b
25500 LOCK_PREFIX
25501 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
25502 +
25503 +#ifdef CONFIG_PAX_REFCOUNT
25504 + jno 1234f
25505 + LOCK_PREFIX
25506 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
25507 + int $4
25508 +1234:
25509 + _ASM_EXTABLE(1234b, 1234b)
25510 +#endif
25511 +
25512 jnz 0b
25513 ENDFRAME
25514 + pax_force_retaddr
25515 ret
25516 CFI_ENDPROC
25517 END(__write_lock_failed)
25518 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
25519 FRAME
25520 0: LOCK_PREFIX
25521 READ_LOCK_SIZE(inc) (%__lock_ptr)
25522 +
25523 +#ifdef CONFIG_PAX_REFCOUNT
25524 + jno 1234f
25525 + LOCK_PREFIX
25526 + READ_LOCK_SIZE(dec) (%__lock_ptr)
25527 + int $4
25528 +1234:
25529 + _ASM_EXTABLE(1234b, 1234b)
25530 +#endif
25531 +
25532 1: rep; nop
25533 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
25534 js 1b
25535 LOCK_PREFIX
25536 READ_LOCK_SIZE(dec) (%__lock_ptr)
25537 +
25538 +#ifdef CONFIG_PAX_REFCOUNT
25539 + jno 1234f
25540 + LOCK_PREFIX
25541 + READ_LOCK_SIZE(inc) (%__lock_ptr)
25542 + int $4
25543 +1234:
25544 + _ASM_EXTABLE(1234b, 1234b)
25545 +#endif
25546 +
25547 js 0b
25548 ENDFRAME
25549 + pax_force_retaddr
25550 ret
25551 CFI_ENDPROC
25552 END(__read_lock_failed)
25553 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
25554 index 5dff5f0..cadebf4 100644
25555 --- a/arch/x86/lib/rwsem.S
25556 +++ b/arch/x86/lib/rwsem.S
25557 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
25558 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
25559 CFI_RESTORE __ASM_REG(dx)
25560 restore_common_regs
25561 + pax_force_retaddr
25562 ret
25563 CFI_ENDPROC
25564 ENDPROC(call_rwsem_down_read_failed)
25565 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
25566 movq %rax,%rdi
25567 call rwsem_down_write_failed
25568 restore_common_regs
25569 + pax_force_retaddr
25570 ret
25571 CFI_ENDPROC
25572 ENDPROC(call_rwsem_down_write_failed)
25573 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
25574 movq %rax,%rdi
25575 call rwsem_wake
25576 restore_common_regs
25577 -1: ret
25578 +1: pax_force_retaddr
25579 + ret
25580 CFI_ENDPROC
25581 ENDPROC(call_rwsem_wake)
25582
25583 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
25584 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
25585 CFI_RESTORE __ASM_REG(dx)
25586 restore_common_regs
25587 + pax_force_retaddr
25588 ret
25589 CFI_ENDPROC
25590 ENDPROC(call_rwsem_downgrade_wake)
25591 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
25592 index a63efd6..ccecad8 100644
25593 --- a/arch/x86/lib/thunk_64.S
25594 +++ b/arch/x86/lib/thunk_64.S
25595 @@ -8,6 +8,7 @@
25596 #include <linux/linkage.h>
25597 #include <asm/dwarf2.h>
25598 #include <asm/calling.h>
25599 +#include <asm/alternative-asm.h>
25600
25601 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
25602 .macro THUNK name, func, put_ret_addr_in_rdi=0
25603 @@ -41,5 +42,6 @@
25604 SAVE_ARGS
25605 restore:
25606 RESTORE_ARGS
25607 + pax_force_retaddr
25608 ret
25609 CFI_ENDPROC
25610 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
25611 index f0312d7..9c39d63 100644
25612 --- a/arch/x86/lib/usercopy_32.c
25613 +++ b/arch/x86/lib/usercopy_32.c
25614 @@ -42,11 +42,13 @@ do { \
25615 int __d0; \
25616 might_fault(); \
25617 __asm__ __volatile__( \
25618 + __COPYUSER_SET_ES \
25619 ASM_STAC "\n" \
25620 "0: rep; stosl\n" \
25621 " movl %2,%0\n" \
25622 "1: rep; stosb\n" \
25623 "2: " ASM_CLAC "\n" \
25624 + __COPYUSER_RESTORE_ES \
25625 ".section .fixup,\"ax\"\n" \
25626 "3: lea 0(%2,%0,4),%0\n" \
25627 " jmp 2b\n" \
25628 @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
25629
25630 #ifdef CONFIG_X86_INTEL_USERCOPY
25631 static unsigned long
25632 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
25633 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
25634 {
25635 int d0, d1;
25636 __asm__ __volatile__(
25637 @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25638 " .align 2,0x90\n"
25639 "3: movl 0(%4), %%eax\n"
25640 "4: movl 4(%4), %%edx\n"
25641 - "5: movl %%eax, 0(%3)\n"
25642 - "6: movl %%edx, 4(%3)\n"
25643 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
25644 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
25645 "7: movl 8(%4), %%eax\n"
25646 "8: movl 12(%4),%%edx\n"
25647 - "9: movl %%eax, 8(%3)\n"
25648 - "10: movl %%edx, 12(%3)\n"
25649 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
25650 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
25651 "11: movl 16(%4), %%eax\n"
25652 "12: movl 20(%4), %%edx\n"
25653 - "13: movl %%eax, 16(%3)\n"
25654 - "14: movl %%edx, 20(%3)\n"
25655 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
25656 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
25657 "15: movl 24(%4), %%eax\n"
25658 "16: movl 28(%4), %%edx\n"
25659 - "17: movl %%eax, 24(%3)\n"
25660 - "18: movl %%edx, 28(%3)\n"
25661 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
25662 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
25663 "19: movl 32(%4), %%eax\n"
25664 "20: movl 36(%4), %%edx\n"
25665 - "21: movl %%eax, 32(%3)\n"
25666 - "22: movl %%edx, 36(%3)\n"
25667 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
25668 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
25669 "23: movl 40(%4), %%eax\n"
25670 "24: movl 44(%4), %%edx\n"
25671 - "25: movl %%eax, 40(%3)\n"
25672 - "26: movl %%edx, 44(%3)\n"
25673 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
25674 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
25675 "27: movl 48(%4), %%eax\n"
25676 "28: movl 52(%4), %%edx\n"
25677 - "29: movl %%eax, 48(%3)\n"
25678 - "30: movl %%edx, 52(%3)\n"
25679 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
25680 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
25681 "31: movl 56(%4), %%eax\n"
25682 "32: movl 60(%4), %%edx\n"
25683 - "33: movl %%eax, 56(%3)\n"
25684 - "34: movl %%edx, 60(%3)\n"
25685 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
25686 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
25687 " addl $-64, %0\n"
25688 " addl $64, %4\n"
25689 " addl $64, %3\n"
25690 @@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25691 " shrl $2, %0\n"
25692 " andl $3, %%eax\n"
25693 " cld\n"
25694 + __COPYUSER_SET_ES
25695 "99: rep; movsl\n"
25696 "36: movl %%eax, %0\n"
25697 "37: rep; movsb\n"
25698 "100:\n"
25699 + __COPYUSER_RESTORE_ES
25700 ".section .fixup,\"ax\"\n"
25701 "101: lea 0(%%eax,%0,4),%0\n"
25702 " jmp 100b\n"
25703 @@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25704 }
25705
25706 static unsigned long
25707 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
25708 +{
25709 + int d0, d1;
25710 + __asm__ __volatile__(
25711 + " .align 2,0x90\n"
25712 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
25713 + " cmpl $67, %0\n"
25714 + " jbe 3f\n"
25715 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
25716 + " .align 2,0x90\n"
25717 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
25718 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
25719 + "5: movl %%eax, 0(%3)\n"
25720 + "6: movl %%edx, 4(%3)\n"
25721 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
25722 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
25723 + "9: movl %%eax, 8(%3)\n"
25724 + "10: movl %%edx, 12(%3)\n"
25725 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
25726 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
25727 + "13: movl %%eax, 16(%3)\n"
25728 + "14: movl %%edx, 20(%3)\n"
25729 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
25730 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
25731 + "17: movl %%eax, 24(%3)\n"
25732 + "18: movl %%edx, 28(%3)\n"
25733 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
25734 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
25735 + "21: movl %%eax, 32(%3)\n"
25736 + "22: movl %%edx, 36(%3)\n"
25737 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
25738 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
25739 + "25: movl %%eax, 40(%3)\n"
25740 + "26: movl %%edx, 44(%3)\n"
25741 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
25742 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
25743 + "29: movl %%eax, 48(%3)\n"
25744 + "30: movl %%edx, 52(%3)\n"
25745 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
25746 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
25747 + "33: movl %%eax, 56(%3)\n"
25748 + "34: movl %%edx, 60(%3)\n"
25749 + " addl $-64, %0\n"
25750 + " addl $64, %4\n"
25751 + " addl $64, %3\n"
25752 + " cmpl $63, %0\n"
25753 + " ja 1b\n"
25754 + "35: movl %0, %%eax\n"
25755 + " shrl $2, %0\n"
25756 + " andl $3, %%eax\n"
25757 + " cld\n"
25758 + "99: rep; "__copyuser_seg" movsl\n"
25759 + "36: movl %%eax, %0\n"
25760 + "37: rep; "__copyuser_seg" movsb\n"
25761 + "100:\n"
25762 + ".section .fixup,\"ax\"\n"
25763 + "101: lea 0(%%eax,%0,4),%0\n"
25764 + " jmp 100b\n"
25765 + ".previous\n"
25766 + _ASM_EXTABLE(1b,100b)
25767 + _ASM_EXTABLE(2b,100b)
25768 + _ASM_EXTABLE(3b,100b)
25769 + _ASM_EXTABLE(4b,100b)
25770 + _ASM_EXTABLE(5b,100b)
25771 + _ASM_EXTABLE(6b,100b)
25772 + _ASM_EXTABLE(7b,100b)
25773 + _ASM_EXTABLE(8b,100b)
25774 + _ASM_EXTABLE(9b,100b)
25775 + _ASM_EXTABLE(10b,100b)
25776 + _ASM_EXTABLE(11b,100b)
25777 + _ASM_EXTABLE(12b,100b)
25778 + _ASM_EXTABLE(13b,100b)
25779 + _ASM_EXTABLE(14b,100b)
25780 + _ASM_EXTABLE(15b,100b)
25781 + _ASM_EXTABLE(16b,100b)
25782 + _ASM_EXTABLE(17b,100b)
25783 + _ASM_EXTABLE(18b,100b)
25784 + _ASM_EXTABLE(19b,100b)
25785 + _ASM_EXTABLE(20b,100b)
25786 + _ASM_EXTABLE(21b,100b)
25787 + _ASM_EXTABLE(22b,100b)
25788 + _ASM_EXTABLE(23b,100b)
25789 + _ASM_EXTABLE(24b,100b)
25790 + _ASM_EXTABLE(25b,100b)
25791 + _ASM_EXTABLE(26b,100b)
25792 + _ASM_EXTABLE(27b,100b)
25793 + _ASM_EXTABLE(28b,100b)
25794 + _ASM_EXTABLE(29b,100b)
25795 + _ASM_EXTABLE(30b,100b)
25796 + _ASM_EXTABLE(31b,100b)
25797 + _ASM_EXTABLE(32b,100b)
25798 + _ASM_EXTABLE(33b,100b)
25799 + _ASM_EXTABLE(34b,100b)
25800 + _ASM_EXTABLE(35b,100b)
25801 + _ASM_EXTABLE(36b,100b)
25802 + _ASM_EXTABLE(37b,100b)
25803 + _ASM_EXTABLE(99b,101b)
25804 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
25805 + : "1"(to), "2"(from), "0"(size)
25806 + : "eax", "edx", "memory");
25807 + return size;
25808 +}
25809 +
25810 +static unsigned long __size_overflow(3)
25811 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
25812 {
25813 int d0, d1;
25814 __asm__ __volatile__(
25815 " .align 2,0x90\n"
25816 - "0: movl 32(%4), %%eax\n"
25817 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
25818 " cmpl $67, %0\n"
25819 " jbe 2f\n"
25820 - "1: movl 64(%4), %%eax\n"
25821 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
25822 " .align 2,0x90\n"
25823 - "2: movl 0(%4), %%eax\n"
25824 - "21: movl 4(%4), %%edx\n"
25825 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
25826 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
25827 " movl %%eax, 0(%3)\n"
25828 " movl %%edx, 4(%3)\n"
25829 - "3: movl 8(%4), %%eax\n"
25830 - "31: movl 12(%4),%%edx\n"
25831 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
25832 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
25833 " movl %%eax, 8(%3)\n"
25834 " movl %%edx, 12(%3)\n"
25835 - "4: movl 16(%4), %%eax\n"
25836 - "41: movl 20(%4), %%edx\n"
25837 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
25838 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
25839 " movl %%eax, 16(%3)\n"
25840 " movl %%edx, 20(%3)\n"
25841 - "10: movl 24(%4), %%eax\n"
25842 - "51: movl 28(%4), %%edx\n"
25843 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
25844 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
25845 " movl %%eax, 24(%3)\n"
25846 " movl %%edx, 28(%3)\n"
25847 - "11: movl 32(%4), %%eax\n"
25848 - "61: movl 36(%4), %%edx\n"
25849 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
25850 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
25851 " movl %%eax, 32(%3)\n"
25852 " movl %%edx, 36(%3)\n"
25853 - "12: movl 40(%4), %%eax\n"
25854 - "71: movl 44(%4), %%edx\n"
25855 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
25856 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
25857 " movl %%eax, 40(%3)\n"
25858 " movl %%edx, 44(%3)\n"
25859 - "13: movl 48(%4), %%eax\n"
25860 - "81: movl 52(%4), %%edx\n"
25861 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
25862 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
25863 " movl %%eax, 48(%3)\n"
25864 " movl %%edx, 52(%3)\n"
25865 - "14: movl 56(%4), %%eax\n"
25866 - "91: movl 60(%4), %%edx\n"
25867 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
25868 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
25869 " movl %%eax, 56(%3)\n"
25870 " movl %%edx, 60(%3)\n"
25871 " addl $-64, %0\n"
25872 @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
25873 " shrl $2, %0\n"
25874 " andl $3, %%eax\n"
25875 " cld\n"
25876 - "6: rep; movsl\n"
25877 + "6: rep; "__copyuser_seg" movsl\n"
25878 " movl %%eax,%0\n"
25879 - "7: rep; movsb\n"
25880 + "7: rep; "__copyuser_seg" movsb\n"
25881 "8:\n"
25882 ".section .fixup,\"ax\"\n"
25883 "9: lea 0(%%eax,%0,4),%0\n"
25884 @@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
25885 * hyoshiok@miraclelinux.com
25886 */
25887
25888 -static unsigned long __copy_user_zeroing_intel_nocache(void *to,
25889 +static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
25890 const void __user *from, unsigned long size)
25891 {
25892 int d0, d1;
25893
25894 __asm__ __volatile__(
25895 " .align 2,0x90\n"
25896 - "0: movl 32(%4), %%eax\n"
25897 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
25898 " cmpl $67, %0\n"
25899 " jbe 2f\n"
25900 - "1: movl 64(%4), %%eax\n"
25901 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
25902 " .align 2,0x90\n"
25903 - "2: movl 0(%4), %%eax\n"
25904 - "21: movl 4(%4), %%edx\n"
25905 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
25906 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
25907 " movnti %%eax, 0(%3)\n"
25908 " movnti %%edx, 4(%3)\n"
25909 - "3: movl 8(%4), %%eax\n"
25910 - "31: movl 12(%4),%%edx\n"
25911 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
25912 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
25913 " movnti %%eax, 8(%3)\n"
25914 " movnti %%edx, 12(%3)\n"
25915 - "4: movl 16(%4), %%eax\n"
25916 - "41: movl 20(%4), %%edx\n"
25917 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
25918 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
25919 " movnti %%eax, 16(%3)\n"
25920 " movnti %%edx, 20(%3)\n"
25921 - "10: movl 24(%4), %%eax\n"
25922 - "51: movl 28(%4), %%edx\n"
25923 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
25924 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
25925 " movnti %%eax, 24(%3)\n"
25926 " movnti %%edx, 28(%3)\n"
25927 - "11: movl 32(%4), %%eax\n"
25928 - "61: movl 36(%4), %%edx\n"
25929 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
25930 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
25931 " movnti %%eax, 32(%3)\n"
25932 " movnti %%edx, 36(%3)\n"
25933 - "12: movl 40(%4), %%eax\n"
25934 - "71: movl 44(%4), %%edx\n"
25935 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
25936 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
25937 " movnti %%eax, 40(%3)\n"
25938 " movnti %%edx, 44(%3)\n"
25939 - "13: movl 48(%4), %%eax\n"
25940 - "81: movl 52(%4), %%edx\n"
25941 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
25942 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
25943 " movnti %%eax, 48(%3)\n"
25944 " movnti %%edx, 52(%3)\n"
25945 - "14: movl 56(%4), %%eax\n"
25946 - "91: movl 60(%4), %%edx\n"
25947 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
25948 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
25949 " movnti %%eax, 56(%3)\n"
25950 " movnti %%edx, 60(%3)\n"
25951 " addl $-64, %0\n"
25952 @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
25953 " shrl $2, %0\n"
25954 " andl $3, %%eax\n"
25955 " cld\n"
25956 - "6: rep; movsl\n"
25957 + "6: rep; "__copyuser_seg" movsl\n"
25958 " movl %%eax,%0\n"
25959 - "7: rep; movsb\n"
25960 + "7: rep; "__copyuser_seg" movsb\n"
25961 "8:\n"
25962 ".section .fixup,\"ax\"\n"
25963 "9: lea 0(%%eax,%0,4),%0\n"
25964 @@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
25965 return size;
25966 }
25967
25968 -static unsigned long __copy_user_intel_nocache(void *to,
25969 +static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
25970 const void __user *from, unsigned long size)
25971 {
25972 int d0, d1;
25973
25974 __asm__ __volatile__(
25975 " .align 2,0x90\n"
25976 - "0: movl 32(%4), %%eax\n"
25977 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
25978 " cmpl $67, %0\n"
25979 " jbe 2f\n"
25980 - "1: movl 64(%4), %%eax\n"
25981 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
25982 " .align 2,0x90\n"
25983 - "2: movl 0(%4), %%eax\n"
25984 - "21: movl 4(%4), %%edx\n"
25985 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
25986 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
25987 " movnti %%eax, 0(%3)\n"
25988 " movnti %%edx, 4(%3)\n"
25989 - "3: movl 8(%4), %%eax\n"
25990 - "31: movl 12(%4),%%edx\n"
25991 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
25992 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
25993 " movnti %%eax, 8(%3)\n"
25994 " movnti %%edx, 12(%3)\n"
25995 - "4: movl 16(%4), %%eax\n"
25996 - "41: movl 20(%4), %%edx\n"
25997 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
25998 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
25999 " movnti %%eax, 16(%3)\n"
26000 " movnti %%edx, 20(%3)\n"
26001 - "10: movl 24(%4), %%eax\n"
26002 - "51: movl 28(%4), %%edx\n"
26003 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26004 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26005 " movnti %%eax, 24(%3)\n"
26006 " movnti %%edx, 28(%3)\n"
26007 - "11: movl 32(%4), %%eax\n"
26008 - "61: movl 36(%4), %%edx\n"
26009 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26010 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26011 " movnti %%eax, 32(%3)\n"
26012 " movnti %%edx, 36(%3)\n"
26013 - "12: movl 40(%4), %%eax\n"
26014 - "71: movl 44(%4), %%edx\n"
26015 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26016 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26017 " movnti %%eax, 40(%3)\n"
26018 " movnti %%edx, 44(%3)\n"
26019 - "13: movl 48(%4), %%eax\n"
26020 - "81: movl 52(%4), %%edx\n"
26021 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26022 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26023 " movnti %%eax, 48(%3)\n"
26024 " movnti %%edx, 52(%3)\n"
26025 - "14: movl 56(%4), %%eax\n"
26026 - "91: movl 60(%4), %%edx\n"
26027 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26028 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26029 " movnti %%eax, 56(%3)\n"
26030 " movnti %%edx, 60(%3)\n"
26031 " addl $-64, %0\n"
26032 @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26033 " shrl $2, %0\n"
26034 " andl $3, %%eax\n"
26035 " cld\n"
26036 - "6: rep; movsl\n"
26037 + "6: rep; "__copyuser_seg" movsl\n"
26038 " movl %%eax,%0\n"
26039 - "7: rep; movsb\n"
26040 + "7: rep; "__copyuser_seg" movsb\n"
26041 "8:\n"
26042 ".section .fixup,\"ax\"\n"
26043 "9: lea 0(%%eax,%0,4),%0\n"
26044 @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26045 */
26046 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26047 unsigned long size);
26048 -unsigned long __copy_user_intel(void __user *to, const void *from,
26049 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26050 + unsigned long size);
26051 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26052 unsigned long size);
26053 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26054 const void __user *from, unsigned long size);
26055 #endif /* CONFIG_X86_INTEL_USERCOPY */
26056
26057 /* Generic arbitrary sized copy. */
26058 -#define __copy_user(to, from, size) \
26059 +#define __copy_user(to, from, size, prefix, set, restore) \
26060 do { \
26061 int __d0, __d1, __d2; \
26062 __asm__ __volatile__( \
26063 + set \
26064 " cmp $7,%0\n" \
26065 " jbe 1f\n" \
26066 " movl %1,%0\n" \
26067 " negl %0\n" \
26068 " andl $7,%0\n" \
26069 " subl %0,%3\n" \
26070 - "4: rep; movsb\n" \
26071 + "4: rep; "prefix"movsb\n" \
26072 " movl %3,%0\n" \
26073 " shrl $2,%0\n" \
26074 " andl $3,%3\n" \
26075 " .align 2,0x90\n" \
26076 - "0: rep; movsl\n" \
26077 + "0: rep; "prefix"movsl\n" \
26078 " movl %3,%0\n" \
26079 - "1: rep; movsb\n" \
26080 + "1: rep; "prefix"movsb\n" \
26081 "2:\n" \
26082 + restore \
26083 ".section .fixup,\"ax\"\n" \
26084 "5: addl %3,%0\n" \
26085 " jmp 2b\n" \
26086 @@ -538,14 +650,14 @@ do { \
26087 " negl %0\n" \
26088 " andl $7,%0\n" \
26089 " subl %0,%3\n" \
26090 - "4: rep; movsb\n" \
26091 + "4: rep; "__copyuser_seg"movsb\n" \
26092 " movl %3,%0\n" \
26093 " shrl $2,%0\n" \
26094 " andl $3,%3\n" \
26095 " .align 2,0x90\n" \
26096 - "0: rep; movsl\n" \
26097 + "0: rep; "__copyuser_seg"movsl\n" \
26098 " movl %3,%0\n" \
26099 - "1: rep; movsb\n" \
26100 + "1: rep; "__copyuser_seg"movsb\n" \
26101 "2:\n" \
26102 ".section .fixup,\"ax\"\n" \
26103 "5: addl %3,%0\n" \
26104 @@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
26105 {
26106 stac();
26107 if (movsl_is_ok(to, from, n))
26108 - __copy_user(to, from, n);
26109 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
26110 else
26111 - n = __copy_user_intel(to, from, n);
26112 + n = __generic_copy_to_user_intel(to, from, n);
26113 clac();
26114 return n;
26115 }
26116 @@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
26117 {
26118 stac();
26119 if (movsl_is_ok(to, from, n))
26120 - __copy_user(to, from, n);
26121 + __copy_user(to, from, n, __copyuser_seg, "", "");
26122 else
26123 - n = __copy_user_intel((void __user *)to,
26124 - (const void *)from, n);
26125 + n = __generic_copy_from_user_intel(to, from, n);
26126 clac();
26127 return n;
26128 }
26129 @@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
26130 if (n > 64 && cpu_has_xmm2)
26131 n = __copy_user_intel_nocache(to, from, n);
26132 else
26133 - __copy_user(to, from, n);
26134 + __copy_user(to, from, n, __copyuser_seg, "", "");
26135 #else
26136 - __copy_user(to, from, n);
26137 + __copy_user(to, from, n, __copyuser_seg, "", "");
26138 #endif
26139 clac();
26140 return n;
26141 }
26142 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
26143
26144 -/**
26145 - * copy_to_user: - Copy a block of data into user space.
26146 - * @to: Destination address, in user space.
26147 - * @from: Source address, in kernel space.
26148 - * @n: Number of bytes to copy.
26149 - *
26150 - * Context: User context only. This function may sleep.
26151 - *
26152 - * Copy data from kernel space to user space.
26153 - *
26154 - * Returns number of bytes that could not be copied.
26155 - * On success, this will be zero.
26156 - */
26157 -unsigned long
26158 -copy_to_user(void __user *to, const void *from, unsigned long n)
26159 -{
26160 - if (access_ok(VERIFY_WRITE, to, n))
26161 - n = __copy_to_user(to, from, n);
26162 - return n;
26163 -}
26164 -EXPORT_SYMBOL(copy_to_user);
26165 -
26166 -/**
26167 - * copy_from_user: - Copy a block of data from user space.
26168 - * @to: Destination address, in kernel space.
26169 - * @from: Source address, in user space.
26170 - * @n: Number of bytes to copy.
26171 - *
26172 - * Context: User context only. This function may sleep.
26173 - *
26174 - * Copy data from user space to kernel space.
26175 - *
26176 - * Returns number of bytes that could not be copied.
26177 - * On success, this will be zero.
26178 - *
26179 - * If some data could not be copied, this function will pad the copied
26180 - * data to the requested size using zero bytes.
26181 - */
26182 -unsigned long
26183 -_copy_from_user(void *to, const void __user *from, unsigned long n)
26184 -{
26185 - if (access_ok(VERIFY_READ, from, n))
26186 - n = __copy_from_user(to, from, n);
26187 - else
26188 - memset(to, 0, n);
26189 - return n;
26190 -}
26191 -EXPORT_SYMBOL(_copy_from_user);
26192 -
26193 void copy_from_user_overflow(void)
26194 {
26195 WARN(1, "Buffer overflow detected!\n");
26196 }
26197 EXPORT_SYMBOL(copy_from_user_overflow);
26198 +
26199 +void copy_to_user_overflow(void)
26200 +{
26201 + WARN(1, "Buffer overflow detected!\n");
26202 +}
26203 +EXPORT_SYMBOL(copy_to_user_overflow);
26204 +
26205 +#ifdef CONFIG_PAX_MEMORY_UDEREF
26206 +void __set_fs(mm_segment_t x)
26207 +{
26208 + switch (x.seg) {
26209 + case 0:
26210 + loadsegment(gs, 0);
26211 + break;
26212 + case TASK_SIZE_MAX:
26213 + loadsegment(gs, __USER_DS);
26214 + break;
26215 + case -1UL:
26216 + loadsegment(gs, __KERNEL_DS);
26217 + break;
26218 + default:
26219 + BUG();
26220 + }
26221 + return;
26222 +}
26223 +EXPORT_SYMBOL(__set_fs);
26224 +
26225 +void set_fs(mm_segment_t x)
26226 +{
26227 + current_thread_info()->addr_limit = x;
26228 + __set_fs(x);
26229 +}
26230 +EXPORT_SYMBOL(set_fs);
26231 +#endif
26232 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
26233 index 05928aa..b33dea1 100644
26234 --- a/arch/x86/lib/usercopy_64.c
26235 +++ b/arch/x86/lib/usercopy_64.c
26236 @@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
26237 _ASM_EXTABLE(0b,3b)
26238 _ASM_EXTABLE(1b,2b)
26239 : [size8] "=&c"(size), [dst] "=&D" (__d0)
26240 - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
26241 + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
26242 [zero] "r" (0UL), [eight] "r" (8UL));
26243 clac();
26244 return size;
26245 @@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
26246 }
26247 EXPORT_SYMBOL(clear_user);
26248
26249 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
26250 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
26251 {
26252 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
26253 - return copy_user_generic((__force void *)to, (__force void *)from, len);
26254 - }
26255 - return len;
26256 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
26257 + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
26258 + return len;
26259 }
26260 EXPORT_SYMBOL(copy_in_user);
26261
26262 @@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
26263 * it is not necessary to optimize tail handling.
26264 */
26265 unsigned long
26266 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
26267 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
26268 {
26269 char c;
26270 unsigned zero_len;
26271 @@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
26272 clac();
26273 return len;
26274 }
26275 +
26276 +void copy_from_user_overflow(void)
26277 +{
26278 + WARN(1, "Buffer overflow detected!\n");
26279 +}
26280 +EXPORT_SYMBOL(copy_from_user_overflow);
26281 +
26282 +void copy_to_user_overflow(void)
26283 +{
26284 + WARN(1, "Buffer overflow detected!\n");
26285 +}
26286 +EXPORT_SYMBOL(copy_to_user_overflow);
26287 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
26288 index 903ec1e..c4166b2 100644
26289 --- a/arch/x86/mm/extable.c
26290 +++ b/arch/x86/mm/extable.c
26291 @@ -6,12 +6,24 @@
26292 static inline unsigned long
26293 ex_insn_addr(const struct exception_table_entry *x)
26294 {
26295 - return (unsigned long)&x->insn + x->insn;
26296 + unsigned long reloc = 0;
26297 +
26298 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26299 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26300 +#endif
26301 +
26302 + return (unsigned long)&x->insn + x->insn + reloc;
26303 }
26304 static inline unsigned long
26305 ex_fixup_addr(const struct exception_table_entry *x)
26306 {
26307 - return (unsigned long)&x->fixup + x->fixup;
26308 + unsigned long reloc = 0;
26309 +
26310 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26311 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26312 +#endif
26313 +
26314 + return (unsigned long)&x->fixup + x->fixup + reloc;
26315 }
26316
26317 int fixup_exception(struct pt_regs *regs)
26318 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
26319 unsigned long new_ip;
26320
26321 #ifdef CONFIG_PNPBIOS
26322 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
26323 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
26324 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
26325 extern u32 pnp_bios_is_utter_crap;
26326 pnp_bios_is_utter_crap = 1;
26327 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
26328 i += 4;
26329 p->fixup -= i;
26330 i += 4;
26331 +
26332 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26333 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
26334 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26335 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26336 +#endif
26337 +
26338 }
26339 }
26340
26341 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
26342 index fb674fd..272f369 100644
26343 --- a/arch/x86/mm/fault.c
26344 +++ b/arch/x86/mm/fault.c
26345 @@ -13,12 +13,19 @@
26346 #include <linux/perf_event.h> /* perf_sw_event */
26347 #include <linux/hugetlb.h> /* hstate_index_to_shift */
26348 #include <linux/prefetch.h> /* prefetchw */
26349 +#include <linux/unistd.h>
26350 +#include <linux/compiler.h>
26351
26352 #include <asm/traps.h> /* dotraplinkage, ... */
26353 #include <asm/pgalloc.h> /* pgd_*(), ... */
26354 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
26355 #include <asm/fixmap.h> /* VSYSCALL_START */
26356 #include <asm/context_tracking.h> /* exception_enter(), ... */
26357 +#include <asm/tlbflush.h>
26358 +
26359 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26360 +#include <asm/stacktrace.h>
26361 +#endif
26362
26363 /*
26364 * Page fault error code bits:
26365 @@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
26366 int ret = 0;
26367
26368 /* kprobe_running() needs smp_processor_id() */
26369 - if (kprobes_built_in() && !user_mode_vm(regs)) {
26370 + if (kprobes_built_in() && !user_mode(regs)) {
26371 preempt_disable();
26372 if (kprobe_running() && kprobe_fault_handler(regs, 14))
26373 ret = 1;
26374 @@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
26375 return !instr_lo || (instr_lo>>1) == 1;
26376 case 0x00:
26377 /* Prefetch instruction is 0x0F0D or 0x0F18 */
26378 - if (probe_kernel_address(instr, opcode))
26379 + if (user_mode(regs)) {
26380 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
26381 + return 0;
26382 + } else if (probe_kernel_address(instr, opcode))
26383 return 0;
26384
26385 *prefetch = (instr_lo == 0xF) &&
26386 @@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
26387 while (instr < max_instr) {
26388 unsigned char opcode;
26389
26390 - if (probe_kernel_address(instr, opcode))
26391 + if (user_mode(regs)) {
26392 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
26393 + break;
26394 + } else if (probe_kernel_address(instr, opcode))
26395 break;
26396
26397 instr++;
26398 @@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
26399 force_sig_info(si_signo, &info, tsk);
26400 }
26401
26402 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26403 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
26404 +#endif
26405 +
26406 +#ifdef CONFIG_PAX_EMUTRAMP
26407 +static int pax_handle_fetch_fault(struct pt_regs *regs);
26408 +#endif
26409 +
26410 +#ifdef CONFIG_PAX_PAGEEXEC
26411 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
26412 +{
26413 + pgd_t *pgd;
26414 + pud_t *pud;
26415 + pmd_t *pmd;
26416 +
26417 + pgd = pgd_offset(mm, address);
26418 + if (!pgd_present(*pgd))
26419 + return NULL;
26420 + pud = pud_offset(pgd, address);
26421 + if (!pud_present(*pud))
26422 + return NULL;
26423 + pmd = pmd_offset(pud, address);
26424 + if (!pmd_present(*pmd))
26425 + return NULL;
26426 + return pmd;
26427 +}
26428 +#endif
26429 +
26430 DEFINE_SPINLOCK(pgd_lock);
26431 LIST_HEAD(pgd_list);
26432
26433 @@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
26434 for (address = VMALLOC_START & PMD_MASK;
26435 address >= TASK_SIZE && address < FIXADDR_TOP;
26436 address += PMD_SIZE) {
26437 +
26438 +#ifdef CONFIG_PAX_PER_CPU_PGD
26439 + unsigned long cpu;
26440 +#else
26441 struct page *page;
26442 +#endif
26443
26444 spin_lock(&pgd_lock);
26445 +
26446 +#ifdef CONFIG_PAX_PER_CPU_PGD
26447 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26448 + pgd_t *pgd = get_cpu_pgd(cpu);
26449 + pmd_t *ret;
26450 +#else
26451 list_for_each_entry(page, &pgd_list, lru) {
26452 + pgd_t *pgd = page_address(page);
26453 spinlock_t *pgt_lock;
26454 pmd_t *ret;
26455
26456 @@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
26457 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26458
26459 spin_lock(pgt_lock);
26460 - ret = vmalloc_sync_one(page_address(page), address);
26461 +#endif
26462 +
26463 + ret = vmalloc_sync_one(pgd, address);
26464 +
26465 +#ifndef CONFIG_PAX_PER_CPU_PGD
26466 spin_unlock(pgt_lock);
26467 +#endif
26468
26469 if (!ret)
26470 break;
26471 @@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
26472 * an interrupt in the middle of a task switch..
26473 */
26474 pgd_paddr = read_cr3();
26475 +
26476 +#ifdef CONFIG_PAX_PER_CPU_PGD
26477 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
26478 +#endif
26479 +
26480 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
26481 if (!pmd_k)
26482 return -1;
26483 @@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
26484 * happen within a race in page table update. In the later
26485 * case just flush:
26486 */
26487 +
26488 +#ifdef CONFIG_PAX_PER_CPU_PGD
26489 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
26490 + pgd = pgd_offset_cpu(smp_processor_id(), address);
26491 +#else
26492 pgd = pgd_offset(current->active_mm, address);
26493 +#endif
26494 +
26495 pgd_ref = pgd_offset_k(address);
26496 if (pgd_none(*pgd_ref))
26497 return -1;
26498 @@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
26499 static int is_errata100(struct pt_regs *regs, unsigned long address)
26500 {
26501 #ifdef CONFIG_X86_64
26502 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
26503 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
26504 return 1;
26505 #endif
26506 return 0;
26507 @@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
26508 }
26509
26510 static const char nx_warning[] = KERN_CRIT
26511 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
26512 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
26513
26514 static void
26515 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
26516 @@ -577,15 +647,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
26517 if (!oops_may_print())
26518 return;
26519
26520 - if (error_code & PF_INSTR) {
26521 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
26522 unsigned int level;
26523
26524 pte_t *pte = lookup_address(address, &level);
26525
26526 if (pte && pte_present(*pte) && !pte_exec(*pte))
26527 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
26528 + printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
26529 }
26530
26531 +#ifdef CONFIG_PAX_KERNEXEC
26532 + if (init_mm.start_code <= address && address < init_mm.end_code) {
26533 + if (current->signal->curr_ip)
26534 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
26535 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
26536 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
26537 + else
26538 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
26539 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
26540 + }
26541 +#endif
26542 +
26543 printk(KERN_ALERT "BUG: unable to handle kernel ");
26544 if (address < PAGE_SIZE)
26545 printk(KERN_CONT "NULL pointer dereference");
26546 @@ -748,6 +830,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
26547 return;
26548 }
26549 #endif
26550 +
26551 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26552 + if (pax_is_fetch_fault(regs, error_code, address)) {
26553 +
26554 +#ifdef CONFIG_PAX_EMUTRAMP
26555 + switch (pax_handle_fetch_fault(regs)) {
26556 + case 2:
26557 + return;
26558 + }
26559 +#endif
26560 +
26561 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
26562 + do_group_exit(SIGKILL);
26563 + }
26564 +#endif
26565 +
26566 /* Kernel addresses are always protection faults: */
26567 if (address >= TASK_SIZE)
26568 error_code |= PF_PROT;
26569 @@ -833,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
26570 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
26571 printk(KERN_ERR
26572 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
26573 - tsk->comm, tsk->pid, address);
26574 + tsk->comm, task_pid_nr(tsk), address);
26575 code = BUS_MCEERR_AR;
26576 }
26577 #endif
26578 @@ -896,6 +994,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
26579 return 1;
26580 }
26581
26582 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26583 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
26584 +{
26585 + pte_t *pte;
26586 + pmd_t *pmd;
26587 + spinlock_t *ptl;
26588 + unsigned char pte_mask;
26589 +
26590 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
26591 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
26592 + return 0;
26593 +
26594 + /* PaX: it's our fault, let's handle it if we can */
26595 +
26596 + /* PaX: take a look at read faults before acquiring any locks */
26597 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
26598 + /* instruction fetch attempt from a protected page in user mode */
26599 + up_read(&mm->mmap_sem);
26600 +
26601 +#ifdef CONFIG_PAX_EMUTRAMP
26602 + switch (pax_handle_fetch_fault(regs)) {
26603 + case 2:
26604 + return 1;
26605 + }
26606 +#endif
26607 +
26608 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
26609 + do_group_exit(SIGKILL);
26610 + }
26611 +
26612 + pmd = pax_get_pmd(mm, address);
26613 + if (unlikely(!pmd))
26614 + return 0;
26615 +
26616 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
26617 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
26618 + pte_unmap_unlock(pte, ptl);
26619 + return 0;
26620 + }
26621 +
26622 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
26623 + /* write attempt to a protected page in user mode */
26624 + pte_unmap_unlock(pte, ptl);
26625 + return 0;
26626 + }
26627 +
26628 +#ifdef CONFIG_SMP
26629 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
26630 +#else
26631 + if (likely(address > get_limit(regs->cs)))
26632 +#endif
26633 + {
26634 + set_pte(pte, pte_mkread(*pte));
26635 + __flush_tlb_one(address);
26636 + pte_unmap_unlock(pte, ptl);
26637 + up_read(&mm->mmap_sem);
26638 + return 1;
26639 + }
26640 +
26641 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
26642 +
26643 + /*
26644 + * PaX: fill DTLB with user rights and retry
26645 + */
26646 + __asm__ __volatile__ (
26647 + "orb %2,(%1)\n"
26648 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
26649 +/*
26650 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
26651 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
26652 + * page fault when examined during a TLB load attempt. this is true not only
26653 + * for PTEs holding a non-present entry but also present entries that will
26654 + * raise a page fault (such as those set up by PaX, or the copy-on-write
26655 + * mechanism). in effect it means that we do *not* need to flush the TLBs
26656 + * for our target pages since their PTEs are simply not in the TLBs at all.
26657 +
26658 + * the best thing in omitting it is that we gain around 15-20% speed in the
26659 + * fast path of the page fault handler and can get rid of tracing since we
26660 + * can no longer flush unintended entries.
26661 + */
26662 + "invlpg (%0)\n"
26663 +#endif
26664 + __copyuser_seg"testb $0,(%0)\n"
26665 + "xorb %3,(%1)\n"
26666 + :
26667 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
26668 + : "memory", "cc");
26669 + pte_unmap_unlock(pte, ptl);
26670 + up_read(&mm->mmap_sem);
26671 + return 1;
26672 +}
26673 +#endif
26674 +
26675 /*
26676 * Handle a spurious fault caused by a stale TLB entry.
26677 *
26678 @@ -968,6 +1159,9 @@ int show_unhandled_signals = 1;
26679 static inline int
26680 access_error(unsigned long error_code, struct vm_area_struct *vma)
26681 {
26682 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
26683 + return 1;
26684 +
26685 if (error_code & PF_WRITE) {
26686 /* write, present and write, not present: */
26687 if (unlikely(!(vma->vm_flags & VM_WRITE)))
26688 @@ -996,7 +1190,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
26689 if (error_code & PF_USER)
26690 return false;
26691
26692 - if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
26693 + if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
26694 return false;
26695
26696 return true;
26697 @@ -1012,18 +1206,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
26698 {
26699 struct vm_area_struct *vma;
26700 struct task_struct *tsk;
26701 - unsigned long address;
26702 struct mm_struct *mm;
26703 int fault;
26704 int write = error_code & PF_WRITE;
26705 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
26706 (write ? FAULT_FLAG_WRITE : 0);
26707
26708 - tsk = current;
26709 - mm = tsk->mm;
26710 -
26711 /* Get the faulting address: */
26712 - address = read_cr2();
26713 + unsigned long address = read_cr2();
26714 +
26715 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26716 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
26717 + if (!search_exception_tables(regs->ip)) {
26718 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
26719 + bad_area_nosemaphore(regs, error_code, address);
26720 + return;
26721 + }
26722 + if (address < PAX_USER_SHADOW_BASE) {
26723 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
26724 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
26725 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
26726 + } else
26727 + address -= PAX_USER_SHADOW_BASE;
26728 + }
26729 +#endif
26730 +
26731 + tsk = current;
26732 + mm = tsk->mm;
26733
26734 /*
26735 * Detect and handle instructions that would cause a page fault for
26736 @@ -1084,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
26737 * User-mode registers count as a user access even for any
26738 * potential system fault or CPU buglet:
26739 */
26740 - if (user_mode_vm(regs)) {
26741 + if (user_mode(regs)) {
26742 local_irq_enable();
26743 error_code |= PF_USER;
26744 } else {
26745 @@ -1146,6 +1355,11 @@ retry:
26746 might_sleep();
26747 }
26748
26749 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26750 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
26751 + return;
26752 +#endif
26753 +
26754 vma = find_vma(mm, address);
26755 if (unlikely(!vma)) {
26756 bad_area(regs, error_code, address);
26757 @@ -1157,18 +1371,24 @@ retry:
26758 bad_area(regs, error_code, address);
26759 return;
26760 }
26761 - if (error_code & PF_USER) {
26762 - /*
26763 - * Accessing the stack below %sp is always a bug.
26764 - * The large cushion allows instructions like enter
26765 - * and pusha to work. ("enter $65535, $31" pushes
26766 - * 32 pointers and then decrements %sp by 65535.)
26767 - */
26768 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
26769 - bad_area(regs, error_code, address);
26770 - return;
26771 - }
26772 + /*
26773 + * Accessing the stack below %sp is always a bug.
26774 + * The large cushion allows instructions like enter
26775 + * and pusha to work. ("enter $65535, $31" pushes
26776 + * 32 pointers and then decrements %sp by 65535.)
26777 + */
26778 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
26779 + bad_area(regs, error_code, address);
26780 + return;
26781 }
26782 +
26783 +#ifdef CONFIG_PAX_SEGMEXEC
26784 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
26785 + bad_area(regs, error_code, address);
26786 + return;
26787 + }
26788 +#endif
26789 +
26790 if (unlikely(expand_stack(vma, address))) {
26791 bad_area(regs, error_code, address);
26792 return;
26793 @@ -1232,3 +1452,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
26794 __do_page_fault(regs, error_code);
26795 exception_exit(regs);
26796 }
26797 +
26798 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26799 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
26800 +{
26801 + struct mm_struct *mm = current->mm;
26802 + unsigned long ip = regs->ip;
26803 +
26804 + if (v8086_mode(regs))
26805 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
26806 +
26807 +#ifdef CONFIG_PAX_PAGEEXEC
26808 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
26809 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
26810 + return true;
26811 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
26812 + return true;
26813 + return false;
26814 + }
26815 +#endif
26816 +
26817 +#ifdef CONFIG_PAX_SEGMEXEC
26818 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
26819 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
26820 + return true;
26821 + return false;
26822 + }
26823 +#endif
26824 +
26825 + return false;
26826 +}
26827 +#endif
26828 +
26829 +#ifdef CONFIG_PAX_EMUTRAMP
26830 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
26831 +{
26832 + int err;
26833 +
26834 + do { /* PaX: libffi trampoline emulation */
26835 + unsigned char mov, jmp;
26836 + unsigned int addr1, addr2;
26837 +
26838 +#ifdef CONFIG_X86_64
26839 + if ((regs->ip + 9) >> 32)
26840 + break;
26841 +#endif
26842 +
26843 + err = get_user(mov, (unsigned char __user *)regs->ip);
26844 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
26845 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
26846 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
26847 +
26848 + if (err)
26849 + break;
26850 +
26851 + if (mov == 0xB8 && jmp == 0xE9) {
26852 + regs->ax = addr1;
26853 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
26854 + return 2;
26855 + }
26856 + } while (0);
26857 +
26858 + do { /* PaX: gcc trampoline emulation #1 */
26859 + unsigned char mov1, mov2;
26860 + unsigned short jmp;
26861 + unsigned int addr1, addr2;
26862 +
26863 +#ifdef CONFIG_X86_64
26864 + if ((regs->ip + 11) >> 32)
26865 + break;
26866 +#endif
26867 +
26868 + err = get_user(mov1, (unsigned char __user *)regs->ip);
26869 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
26870 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
26871 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
26872 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
26873 +
26874 + if (err)
26875 + break;
26876 +
26877 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
26878 + regs->cx = addr1;
26879 + regs->ax = addr2;
26880 + regs->ip = addr2;
26881 + return 2;
26882 + }
26883 + } while (0);
26884 +
26885 + do { /* PaX: gcc trampoline emulation #2 */
26886 + unsigned char mov, jmp;
26887 + unsigned int addr1, addr2;
26888 +
26889 +#ifdef CONFIG_X86_64
26890 + if ((regs->ip + 9) >> 32)
26891 + break;
26892 +#endif
26893 +
26894 + err = get_user(mov, (unsigned char __user *)regs->ip);
26895 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
26896 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
26897 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
26898 +
26899 + if (err)
26900 + break;
26901 +
26902 + if (mov == 0xB9 && jmp == 0xE9) {
26903 + regs->cx = addr1;
26904 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
26905 + return 2;
26906 + }
26907 + } while (0);
26908 +
26909 + return 1; /* PaX in action */
26910 +}
26911 +
26912 +#ifdef CONFIG_X86_64
26913 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
26914 +{
26915 + int err;
26916 +
26917 + do { /* PaX: libffi trampoline emulation */
26918 + unsigned short mov1, mov2, jmp1;
26919 + unsigned char stcclc, jmp2;
26920 + unsigned long addr1, addr2;
26921 +
26922 + err = get_user(mov1, (unsigned short __user *)regs->ip);
26923 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
26924 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
26925 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
26926 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
26927 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
26928 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
26929 +
26930 + if (err)
26931 + break;
26932 +
26933 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26934 + regs->r11 = addr1;
26935 + regs->r10 = addr2;
26936 + if (stcclc == 0xF8)
26937 + regs->flags &= ~X86_EFLAGS_CF;
26938 + else
26939 + regs->flags |= X86_EFLAGS_CF;
26940 + regs->ip = addr1;
26941 + return 2;
26942 + }
26943 + } while (0);
26944 +
26945 + do { /* PaX: gcc trampoline emulation #1 */
26946 + unsigned short mov1, mov2, jmp1;
26947 + unsigned char jmp2;
26948 + unsigned int addr1;
26949 + unsigned long addr2;
26950 +
26951 + err = get_user(mov1, (unsigned short __user *)regs->ip);
26952 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
26953 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
26954 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
26955 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
26956 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
26957 +
26958 + if (err)
26959 + break;
26960 +
26961 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26962 + regs->r11 = addr1;
26963 + regs->r10 = addr2;
26964 + regs->ip = addr1;
26965 + return 2;
26966 + }
26967 + } while (0);
26968 +
26969 + do { /* PaX: gcc trampoline emulation #2 */
26970 + unsigned short mov1, mov2, jmp1;
26971 + unsigned char jmp2;
26972 + unsigned long addr1, addr2;
26973 +
26974 + err = get_user(mov1, (unsigned short __user *)regs->ip);
26975 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
26976 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
26977 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
26978 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
26979 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
26980 +
26981 + if (err)
26982 + break;
26983 +
26984 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26985 + regs->r11 = addr1;
26986 + regs->r10 = addr2;
26987 + regs->ip = addr1;
26988 + return 2;
26989 + }
26990 + } while (0);
26991 +
26992 + return 1; /* PaX in action */
26993 +}
26994 +#endif
26995 +
26996 +/*
26997 + * PaX: decide what to do with offenders (regs->ip = fault address)
26998 + *
26999 + * returns 1 when task should be killed
27000 + * 2 when gcc trampoline was detected
27001 + */
27002 +static int pax_handle_fetch_fault(struct pt_regs *regs)
27003 +{
27004 + if (v8086_mode(regs))
27005 + return 1;
27006 +
27007 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27008 + return 1;
27009 +
27010 +#ifdef CONFIG_X86_32
27011 + return pax_handle_fetch_fault_32(regs);
27012 +#else
27013 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27014 + return pax_handle_fetch_fault_32(regs);
27015 + else
27016 + return pax_handle_fetch_fault_64(regs);
27017 +#endif
27018 +}
27019 +#endif
27020 +
27021 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27022 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27023 +{
27024 + long i;
27025 +
27026 + printk(KERN_ERR "PAX: bytes at PC: ");
27027 + for (i = 0; i < 20; i++) {
27028 + unsigned char c;
27029 + if (get_user(c, (unsigned char __force_user *)pc+i))
27030 + printk(KERN_CONT "?? ");
27031 + else
27032 + printk(KERN_CONT "%02x ", c);
27033 + }
27034 + printk("\n");
27035 +
27036 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27037 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
27038 + unsigned long c;
27039 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
27040 +#ifdef CONFIG_X86_32
27041 + printk(KERN_CONT "???????? ");
27042 +#else
27043 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27044 + printk(KERN_CONT "???????? ???????? ");
27045 + else
27046 + printk(KERN_CONT "???????????????? ");
27047 +#endif
27048 + } else {
27049 +#ifdef CONFIG_X86_64
27050 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27051 + printk(KERN_CONT "%08x ", (unsigned int)c);
27052 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27053 + } else
27054 +#endif
27055 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27056 + }
27057 + }
27058 + printk("\n");
27059 +}
27060 +#endif
27061 +
27062 +/**
27063 + * probe_kernel_write(): safely attempt to write to a location
27064 + * @dst: address to write to
27065 + * @src: pointer to the data that shall be written
27066 + * @size: size of the data chunk
27067 + *
27068 + * Safely write to address @dst from the buffer at @src. If a kernel fault
27069 + * happens, handle that and return -EFAULT.
27070 + */
27071 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27072 +{
27073 + long ret;
27074 + mm_segment_t old_fs = get_fs();
27075 +
27076 + set_fs(KERNEL_DS);
27077 + pagefault_disable();
27078 + pax_open_kernel();
27079 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27080 + pax_close_kernel();
27081 + pagefault_enable();
27082 + set_fs(old_fs);
27083 +
27084 + return ret ? -EFAULT : 0;
27085 +}
27086 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27087 index dd74e46..7d26398 100644
27088 --- a/arch/x86/mm/gup.c
27089 +++ b/arch/x86/mm/gup.c
27090 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27091 addr = start;
27092 len = (unsigned long) nr_pages << PAGE_SHIFT;
27093 end = start + len;
27094 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27095 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27096 (void __user *)start, len)))
27097 return 0;
27098
27099 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27100 index 6f31ee5..8ee4164 100644
27101 --- a/arch/x86/mm/highmem_32.c
27102 +++ b/arch/x86/mm/highmem_32.c
27103 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27104 idx = type + KM_TYPE_NR*smp_processor_id();
27105 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27106 BUG_ON(!pte_none(*(kmap_pte-idx)));
27107 +
27108 + pax_open_kernel();
27109 set_pte(kmap_pte-idx, mk_pte(page, prot));
27110 + pax_close_kernel();
27111 +
27112 arch_flush_lazy_mmu_mode();
27113
27114 return (void *)vaddr;
27115 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27116 index ae1aa71..56316db 100644
27117 --- a/arch/x86/mm/hugetlbpage.c
27118 +++ b/arch/x86/mm/hugetlbpage.c
27119 @@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27120 info.flags = 0;
27121 info.length = len;
27122 info.low_limit = TASK_UNMAPPED_BASE;
27123 +
27124 +#ifdef CONFIG_PAX_RANDMMAP
27125 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27126 + info.low_limit += current->mm->delta_mmap;
27127 +#endif
27128 +
27129 info.high_limit = TASK_SIZE;
27130 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27131 info.align_offset = 0;
27132 @@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27133 VM_BUG_ON(addr != -ENOMEM);
27134 info.flags = 0;
27135 info.low_limit = TASK_UNMAPPED_BASE;
27136 +
27137 +#ifdef CONFIG_PAX_RANDMMAP
27138 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27139 + info.low_limit += current->mm->delta_mmap;
27140 +#endif
27141 +
27142 info.high_limit = TASK_SIZE;
27143 addr = vm_unmapped_area(&info);
27144 }
27145 @@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27146 struct hstate *h = hstate_file(file);
27147 struct mm_struct *mm = current->mm;
27148 struct vm_area_struct *vma;
27149 + unsigned long pax_task_size = TASK_SIZE;
27150 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
27151
27152 if (len & ~huge_page_mask(h))
27153 return -EINVAL;
27154 - if (len > TASK_SIZE)
27155 +
27156 +#ifdef CONFIG_PAX_SEGMEXEC
27157 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27158 + pax_task_size = SEGMEXEC_TASK_SIZE;
27159 +#endif
27160 +
27161 + pax_task_size -= PAGE_SIZE;
27162 +
27163 + if (len > pax_task_size)
27164 return -ENOMEM;
27165
27166 if (flags & MAP_FIXED) {
27167 @@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27168 return addr;
27169 }
27170
27171 +#ifdef CONFIG_PAX_RANDMMAP
27172 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27173 +#endif
27174 +
27175 if (addr) {
27176 addr = ALIGN(addr, huge_page_size(h));
27177 vma = find_vma(mm, addr);
27178 - if (TASK_SIZE - len >= addr &&
27179 - (!vma || addr + len <= vma->vm_start))
27180 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27181 return addr;
27182 }
27183 if (mm->get_unmapped_area == arch_get_unmapped_area)
27184 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
27185 index d7aea41..0fc945b 100644
27186 --- a/arch/x86/mm/init.c
27187 +++ b/arch/x86/mm/init.c
27188 @@ -4,6 +4,7 @@
27189 #include <linux/swap.h>
27190 #include <linux/memblock.h>
27191 #include <linux/bootmem.h> /* for max_low_pfn */
27192 +#include <linux/tboot.h>
27193
27194 #include <asm/cacheflush.h>
27195 #include <asm/e820.h>
27196 @@ -16,6 +17,8 @@
27197 #include <asm/tlb.h>
27198 #include <asm/proto.h>
27199 #include <asm/dma.h> /* for MAX_DMA_PFN */
27200 +#include <asm/desc.h>
27201 +#include <asm/bios_ebda.h>
27202
27203 unsigned long __initdata pgt_buf_start;
27204 unsigned long __meminitdata pgt_buf_end;
27205 @@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
27206 {
27207 int i;
27208 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
27209 - unsigned long start = 0, good_end;
27210 + unsigned long start = 0x100000, good_end;
27211 phys_addr_t base;
27212
27213 for (i = 0; i < nr_range; i++) {
27214 @@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
27215 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
27216 * mmio resources as well as potential bios/acpi data regions.
27217 */
27218 +
27219 +#ifdef CONFIG_GRKERNSEC_KMEM
27220 +static unsigned int ebda_start __read_only;
27221 +static unsigned int ebda_end __read_only;
27222 +#endif
27223 +
27224 int devmem_is_allowed(unsigned long pagenr)
27225 {
27226 - if (pagenr < 256)
27227 +#ifdef CONFIG_GRKERNSEC_KMEM
27228 + /* allow BDA */
27229 + if (!pagenr)
27230 return 1;
27231 + /* allow EBDA */
27232 + if (pagenr >= ebda_start && pagenr < ebda_end)
27233 + return 1;
27234 + /* if tboot is in use, allow access to its hardcoded serial log range */
27235 + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
27236 + return 1;
27237 +#else
27238 + if (!pagenr)
27239 + return 1;
27240 +#ifdef CONFIG_VM86
27241 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
27242 + return 1;
27243 +#endif
27244 +#endif
27245 +
27246 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
27247 + return 1;
27248 +#ifdef CONFIG_GRKERNSEC_KMEM
27249 + /* throw out everything else below 1MB */
27250 + if (pagenr <= 256)
27251 + return 0;
27252 +#endif
27253 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
27254 return 0;
27255 if (!page_is_ram(pagenr))
27256 @@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
27257 #endif
27258 }
27259
27260 +#ifdef CONFIG_GRKERNSEC_KMEM
27261 +static inline void gr_init_ebda(void)
27262 +{
27263 + unsigned int ebda_addr;
27264 + unsigned int ebda_size = 0;
27265 +
27266 + ebda_addr = get_bios_ebda();
27267 + if (ebda_addr) {
27268 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
27269 + ebda_size <<= 10;
27270 + }
27271 + if (ebda_addr && ebda_size) {
27272 + ebda_start = ebda_addr >> PAGE_SHIFT;
27273 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
27274 + } else {
27275 + ebda_start = 0x9f000 >> PAGE_SHIFT;
27276 + ebda_end = 0xa0000 >> PAGE_SHIFT;
27277 + }
27278 +}
27279 +#else
27280 +static inline void gr_init_ebda(void) { }
27281 +#endif
27282 +
27283 void free_initmem(void)
27284 {
27285 +#ifdef CONFIG_PAX_KERNEXEC
27286 +#ifdef CONFIG_X86_32
27287 + /* PaX: limit KERNEL_CS to actual size */
27288 + unsigned long addr, limit;
27289 + struct desc_struct d;
27290 + int cpu;
27291 +#else
27292 + pgd_t *pgd;
27293 + pud_t *pud;
27294 + pmd_t *pmd;
27295 + unsigned long addr, end;
27296 +#endif
27297 +#endif
27298 +
27299 + gr_init_ebda();
27300 +
27301 +#ifdef CONFIG_PAX_KERNEXEC
27302 +#ifdef CONFIG_X86_32
27303 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
27304 + limit = (limit - 1UL) >> PAGE_SHIFT;
27305 +
27306 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
27307 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27308 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
27309 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
27310 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
27311 + }
27312 +
27313 + /* PaX: make KERNEL_CS read-only */
27314 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
27315 + if (!paravirt_enabled())
27316 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
27317 +/*
27318 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
27319 + pgd = pgd_offset_k(addr);
27320 + pud = pud_offset(pgd, addr);
27321 + pmd = pmd_offset(pud, addr);
27322 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27323 + }
27324 +*/
27325 +#ifdef CONFIG_X86_PAE
27326 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
27327 +/*
27328 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
27329 + pgd = pgd_offset_k(addr);
27330 + pud = pud_offset(pgd, addr);
27331 + pmd = pmd_offset(pud, addr);
27332 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
27333 + }
27334 +*/
27335 +#endif
27336 +
27337 +#ifdef CONFIG_MODULES
27338 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
27339 +#endif
27340 +
27341 +#else
27342 + /* PaX: make kernel code/rodata read-only, rest non-executable */
27343 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
27344 + pgd = pgd_offset_k(addr);
27345 + pud = pud_offset(pgd, addr);
27346 + pmd = pmd_offset(pud, addr);
27347 + if (!pmd_present(*pmd))
27348 + continue;
27349 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
27350 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27351 + else
27352 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
27353 + }
27354 +
27355 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
27356 + end = addr + KERNEL_IMAGE_SIZE;
27357 + for (; addr < end; addr += PMD_SIZE) {
27358 + pgd = pgd_offset_k(addr);
27359 + pud = pud_offset(pgd, addr);
27360 + pmd = pmd_offset(pud, addr);
27361 + if (!pmd_present(*pmd))
27362 + continue;
27363 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
27364 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27365 + }
27366 +#endif
27367 +
27368 + flush_tlb_all();
27369 +#endif
27370 +
27371 free_init_pages("unused kernel memory",
27372 (unsigned long)(&__init_begin),
27373 (unsigned long)(&__init_end));
27374 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
27375 index 745d66b..56bf568 100644
27376 --- a/arch/x86/mm/init_32.c
27377 +++ b/arch/x86/mm/init_32.c
27378 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
27379 }
27380
27381 /*
27382 - * Creates a middle page table and puts a pointer to it in the
27383 - * given global directory entry. This only returns the gd entry
27384 - * in non-PAE compilation mode, since the middle layer is folded.
27385 - */
27386 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
27387 -{
27388 - pud_t *pud;
27389 - pmd_t *pmd_table;
27390 -
27391 -#ifdef CONFIG_X86_PAE
27392 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
27393 - if (after_bootmem)
27394 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
27395 - else
27396 - pmd_table = (pmd_t *)alloc_low_page();
27397 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
27398 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
27399 - pud = pud_offset(pgd, 0);
27400 - BUG_ON(pmd_table != pmd_offset(pud, 0));
27401 -
27402 - return pmd_table;
27403 - }
27404 -#endif
27405 - pud = pud_offset(pgd, 0);
27406 - pmd_table = pmd_offset(pud, 0);
27407 -
27408 - return pmd_table;
27409 -}
27410 -
27411 -/*
27412 * Create a page table and place a pointer to it in a middle page
27413 * directory entry:
27414 */
27415 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
27416 page_table = (pte_t *)alloc_low_page();
27417
27418 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
27419 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27420 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
27421 +#else
27422 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
27423 +#endif
27424 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
27425 }
27426
27427 return pte_offset_kernel(pmd, 0);
27428 }
27429
27430 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
27431 +{
27432 + pud_t *pud;
27433 + pmd_t *pmd_table;
27434 +
27435 + pud = pud_offset(pgd, 0);
27436 + pmd_table = pmd_offset(pud, 0);
27437 +
27438 + return pmd_table;
27439 +}
27440 +
27441 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
27442 {
27443 int pgd_idx = pgd_index(vaddr);
27444 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27445 int pgd_idx, pmd_idx;
27446 unsigned long vaddr;
27447 pgd_t *pgd;
27448 + pud_t *pud;
27449 pmd_t *pmd;
27450 pte_t *pte = NULL;
27451
27452 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27453 pgd = pgd_base + pgd_idx;
27454
27455 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
27456 - pmd = one_md_table_init(pgd);
27457 - pmd = pmd + pmd_index(vaddr);
27458 + pud = pud_offset(pgd, vaddr);
27459 + pmd = pmd_offset(pud, vaddr);
27460 +
27461 +#ifdef CONFIG_X86_PAE
27462 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
27463 +#endif
27464 +
27465 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
27466 pmd++, pmd_idx++) {
27467 pte = page_table_kmap_check(one_page_table_init(pmd),
27468 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27469 }
27470 }
27471
27472 -static inline int is_kernel_text(unsigned long addr)
27473 +static inline int is_kernel_text(unsigned long start, unsigned long end)
27474 {
27475 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
27476 - return 1;
27477 - return 0;
27478 + if ((start > ktla_ktva((unsigned long)_etext) ||
27479 + end <= ktla_ktva((unsigned long)_stext)) &&
27480 + (start > ktla_ktva((unsigned long)_einittext) ||
27481 + end <= ktla_ktva((unsigned long)_sinittext)) &&
27482 +
27483 +#ifdef CONFIG_ACPI_SLEEP
27484 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
27485 +#endif
27486 +
27487 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
27488 + return 0;
27489 + return 1;
27490 }
27491
27492 /*
27493 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
27494 unsigned long last_map_addr = end;
27495 unsigned long start_pfn, end_pfn;
27496 pgd_t *pgd_base = swapper_pg_dir;
27497 - int pgd_idx, pmd_idx, pte_ofs;
27498 + unsigned int pgd_idx, pmd_idx, pte_ofs;
27499 unsigned long pfn;
27500 pgd_t *pgd;
27501 + pud_t *pud;
27502 pmd_t *pmd;
27503 pte_t *pte;
27504 unsigned pages_2m, pages_4k;
27505 @@ -280,8 +281,13 @@ repeat:
27506 pfn = start_pfn;
27507 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
27508 pgd = pgd_base + pgd_idx;
27509 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
27510 - pmd = one_md_table_init(pgd);
27511 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
27512 + pud = pud_offset(pgd, 0);
27513 + pmd = pmd_offset(pud, 0);
27514 +
27515 +#ifdef CONFIG_X86_PAE
27516 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
27517 +#endif
27518
27519 if (pfn >= end_pfn)
27520 continue;
27521 @@ -293,14 +299,13 @@ repeat:
27522 #endif
27523 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
27524 pmd++, pmd_idx++) {
27525 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
27526 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
27527
27528 /*
27529 * Map with big pages if possible, otherwise
27530 * create normal page tables:
27531 */
27532 if (use_pse) {
27533 - unsigned int addr2;
27534 pgprot_t prot = PAGE_KERNEL_LARGE;
27535 /*
27536 * first pass will use the same initial
27537 @@ -310,11 +315,7 @@ repeat:
27538 __pgprot(PTE_IDENT_ATTR |
27539 _PAGE_PSE);
27540
27541 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
27542 - PAGE_OFFSET + PAGE_SIZE-1;
27543 -
27544 - if (is_kernel_text(addr) ||
27545 - is_kernel_text(addr2))
27546 + if (is_kernel_text(address, address + PMD_SIZE))
27547 prot = PAGE_KERNEL_LARGE_EXEC;
27548
27549 pages_2m++;
27550 @@ -331,7 +332,7 @@ repeat:
27551 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
27552 pte += pte_ofs;
27553 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
27554 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
27555 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
27556 pgprot_t prot = PAGE_KERNEL;
27557 /*
27558 * first pass will use the same initial
27559 @@ -339,7 +340,7 @@ repeat:
27560 */
27561 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
27562
27563 - if (is_kernel_text(addr))
27564 + if (is_kernel_text(address, address + PAGE_SIZE))
27565 prot = PAGE_KERNEL_EXEC;
27566
27567 pages_4k++;
27568 @@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
27569
27570 pud = pud_offset(pgd, va);
27571 pmd = pmd_offset(pud, va);
27572 - if (!pmd_present(*pmd))
27573 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
27574 break;
27575
27576 pte = pte_offset_kernel(pmd, va);
27577 @@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
27578
27579 static void __init pagetable_init(void)
27580 {
27581 - pgd_t *pgd_base = swapper_pg_dir;
27582 -
27583 - permanent_kmaps_init(pgd_base);
27584 + permanent_kmaps_init(swapper_pg_dir);
27585 }
27586
27587 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
27588 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
27589 EXPORT_SYMBOL_GPL(__supported_pte_mask);
27590
27591 /* user-defined highmem size */
27592 @@ -728,6 +727,12 @@ void __init mem_init(void)
27593
27594 pci_iommu_alloc();
27595
27596 +#ifdef CONFIG_PAX_PER_CPU_PGD
27597 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
27598 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27599 + KERNEL_PGD_PTRS);
27600 +#endif
27601 +
27602 #ifdef CONFIG_FLATMEM
27603 BUG_ON(!mem_map);
27604 #endif
27605 @@ -754,7 +759,7 @@ void __init mem_init(void)
27606 reservedpages++;
27607
27608 codesize = (unsigned long) &_etext - (unsigned long) &_text;
27609 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
27610 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
27611 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
27612
27613 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
27614 @@ -795,10 +800,10 @@ void __init mem_init(void)
27615 ((unsigned long)&__init_end -
27616 (unsigned long)&__init_begin) >> 10,
27617
27618 - (unsigned long)&_etext, (unsigned long)&_edata,
27619 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
27620 + (unsigned long)&_sdata, (unsigned long)&_edata,
27621 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
27622
27623 - (unsigned long)&_text, (unsigned long)&_etext,
27624 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
27625 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
27626
27627 /*
27628 @@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
27629 if (!kernel_set_to_readonly)
27630 return;
27631
27632 + start = ktla_ktva(start);
27633 pr_debug("Set kernel text: %lx - %lx for read write\n",
27634 start, start+size);
27635
27636 @@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
27637 if (!kernel_set_to_readonly)
27638 return;
27639
27640 + start = ktla_ktva(start);
27641 pr_debug("Set kernel text: %lx - %lx for read only\n",
27642 start, start+size);
27643
27644 @@ -918,6 +925,7 @@ void mark_rodata_ro(void)
27645 unsigned long start = PFN_ALIGN(_text);
27646 unsigned long size = PFN_ALIGN(_etext) - start;
27647
27648 + start = ktla_ktva(start);
27649 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
27650 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
27651 size >> 10);
27652 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
27653 index 75c9a6a..498d677 100644
27654 --- a/arch/x86/mm/init_64.c
27655 +++ b/arch/x86/mm/init_64.c
27656 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
27657 * around without checking the pgd every time.
27658 */
27659
27660 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
27661 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
27662 EXPORT_SYMBOL_GPL(__supported_pte_mask);
27663
27664 int force_personality32;
27665 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27666
27667 for (address = start; address <= end; address += PGDIR_SIZE) {
27668 const pgd_t *pgd_ref = pgd_offset_k(address);
27669 +
27670 +#ifdef CONFIG_PAX_PER_CPU_PGD
27671 + unsigned long cpu;
27672 +#else
27673 struct page *page;
27674 +#endif
27675
27676 if (pgd_none(*pgd_ref))
27677 continue;
27678
27679 spin_lock(&pgd_lock);
27680 +
27681 +#ifdef CONFIG_PAX_PER_CPU_PGD
27682 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27683 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
27684 +#else
27685 list_for_each_entry(page, &pgd_list, lru) {
27686 pgd_t *pgd;
27687 spinlock_t *pgt_lock;
27688 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27689 /* the pgt_lock only for Xen */
27690 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27691 spin_lock(pgt_lock);
27692 +#endif
27693
27694 if (pgd_none(*pgd))
27695 set_pgd(pgd, *pgd_ref);
27696 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27697 BUG_ON(pgd_page_vaddr(*pgd)
27698 != pgd_page_vaddr(*pgd_ref));
27699
27700 +#ifndef CONFIG_PAX_PER_CPU_PGD
27701 spin_unlock(pgt_lock);
27702 +#endif
27703 +
27704 }
27705 spin_unlock(&pgd_lock);
27706 }
27707 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
27708 {
27709 if (pgd_none(*pgd)) {
27710 pud_t *pud = (pud_t *)spp_getpage();
27711 - pgd_populate(&init_mm, pgd, pud);
27712 + pgd_populate_kernel(&init_mm, pgd, pud);
27713 if (pud != pud_offset(pgd, 0))
27714 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
27715 pud, pud_offset(pgd, 0));
27716 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
27717 {
27718 if (pud_none(*pud)) {
27719 pmd_t *pmd = (pmd_t *) spp_getpage();
27720 - pud_populate(&init_mm, pud, pmd);
27721 + pud_populate_kernel(&init_mm, pud, pmd);
27722 if (pmd != pmd_offset(pud, 0))
27723 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
27724 pmd, pmd_offset(pud, 0));
27725 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
27726 pmd = fill_pmd(pud, vaddr);
27727 pte = fill_pte(pmd, vaddr);
27728
27729 + pax_open_kernel();
27730 set_pte(pte, new_pte);
27731 + pax_close_kernel();
27732
27733 /*
27734 * It's enough to flush this one mapping.
27735 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
27736 pgd = pgd_offset_k((unsigned long)__va(phys));
27737 if (pgd_none(*pgd)) {
27738 pud = (pud_t *) spp_getpage();
27739 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
27740 - _PAGE_USER));
27741 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
27742 }
27743 pud = pud_offset(pgd, (unsigned long)__va(phys));
27744 if (pud_none(*pud)) {
27745 pmd = (pmd_t *) spp_getpage();
27746 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
27747 - _PAGE_USER));
27748 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
27749 }
27750 pmd = pmd_offset(pud, phys);
27751 BUG_ON(!pmd_none(*pmd));
27752 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
27753 if (pfn >= pgt_buf_top)
27754 panic("alloc_low_page: ran out of memory");
27755
27756 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
27757 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
27758 clear_page(adr);
27759 *phys = pfn * PAGE_SIZE;
27760 return adr;
27761 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
27762
27763 phys = __pa(virt);
27764 left = phys & (PAGE_SIZE - 1);
27765 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
27766 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
27767 adr = (void *)(((unsigned long)adr) | left);
27768
27769 return adr;
27770 @@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
27771 unmap_low_page(pmd);
27772
27773 spin_lock(&init_mm.page_table_lock);
27774 - pud_populate(&init_mm, pud, __va(pmd_phys));
27775 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
27776 spin_unlock(&init_mm.page_table_lock);
27777 }
27778 __flush_tlb_all();
27779 @@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
27780 unmap_low_page(pud);
27781
27782 spin_lock(&init_mm.page_table_lock);
27783 - pgd_populate(&init_mm, pgd, __va(pud_phys));
27784 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
27785 spin_unlock(&init_mm.page_table_lock);
27786 pgd_changed = true;
27787 }
27788 @@ -693,6 +707,12 @@ void __init mem_init(void)
27789
27790 pci_iommu_alloc();
27791
27792 +#ifdef CONFIG_PAX_PER_CPU_PGD
27793 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
27794 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27795 + KERNEL_PGD_PTRS);
27796 +#endif
27797 +
27798 /* clear_bss() already clear the empty_zero_page */
27799
27800 reservedpages = 0;
27801 @@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
27802 static struct vm_area_struct gate_vma = {
27803 .vm_start = VSYSCALL_START,
27804 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
27805 - .vm_page_prot = PAGE_READONLY_EXEC,
27806 - .vm_flags = VM_READ | VM_EXEC
27807 + .vm_page_prot = PAGE_READONLY,
27808 + .vm_flags = VM_READ
27809 };
27810
27811 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27812 @@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
27813
27814 const char *arch_vma_name(struct vm_area_struct *vma)
27815 {
27816 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27817 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27818 return "[vdso]";
27819 if (vma == &gate_vma)
27820 return "[vsyscall]";
27821 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
27822 index 7b179b4..6bd1777 100644
27823 --- a/arch/x86/mm/iomap_32.c
27824 +++ b/arch/x86/mm/iomap_32.c
27825 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
27826 type = kmap_atomic_idx_push();
27827 idx = type + KM_TYPE_NR * smp_processor_id();
27828 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27829 +
27830 + pax_open_kernel();
27831 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
27832 + pax_close_kernel();
27833 +
27834 arch_flush_lazy_mmu_mode();
27835
27836 return (void *)vaddr;
27837 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
27838 index 78fe3f1..2f9433c 100644
27839 --- a/arch/x86/mm/ioremap.c
27840 +++ b/arch/x86/mm/ioremap.c
27841 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
27842 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
27843 int is_ram = page_is_ram(pfn);
27844
27845 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
27846 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
27847 return NULL;
27848 WARN_ON_ONCE(is_ram);
27849 }
27850 @@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
27851 *
27852 * Caller must ensure there is only one unmapping for the same pointer.
27853 */
27854 -void iounmap(volatile void __iomem *addr)
27855 +void iounmap(const volatile void __iomem *addr)
27856 {
27857 struct vm_struct *p, *o;
27858
27859 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
27860
27861 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
27862 if (page_is_ram(start >> PAGE_SHIFT))
27863 +#ifdef CONFIG_HIGHMEM
27864 + if ((start >> PAGE_SHIFT) < max_low_pfn)
27865 +#endif
27866 return __va(phys);
27867
27868 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
27869 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
27870 early_param("early_ioremap_debug", early_ioremap_debug_setup);
27871
27872 static __initdata int after_paging_init;
27873 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
27874 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
27875
27876 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
27877 {
27878 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
27879 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
27880
27881 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
27882 - memset(bm_pte, 0, sizeof(bm_pte));
27883 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
27884 + pmd_populate_user(&init_mm, pmd, bm_pte);
27885
27886 /*
27887 * The boot-ioremap range spans multiple pmds, for which
27888 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
27889 index d87dd6d..bf3fa66 100644
27890 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
27891 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
27892 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
27893 * memory (e.g. tracked pages)? For now, we need this to avoid
27894 * invoking kmemcheck for PnP BIOS calls.
27895 */
27896 - if (regs->flags & X86_VM_MASK)
27897 + if (v8086_mode(regs))
27898 return false;
27899 - if (regs->cs != __KERNEL_CS)
27900 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
27901 return false;
27902
27903 pte = kmemcheck_pte_lookup(address);
27904 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
27905 index 845df68..1d8d29f 100644
27906 --- a/arch/x86/mm/mmap.c
27907 +++ b/arch/x86/mm/mmap.c
27908 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
27909 * Leave an at least ~128 MB hole with possible stack randomization.
27910 */
27911 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
27912 -#define MAX_GAP (TASK_SIZE/6*5)
27913 +#define MAX_GAP (pax_task_size/6*5)
27914
27915 static int mmap_is_legacy(void)
27916 {
27917 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
27918 return rnd << PAGE_SHIFT;
27919 }
27920
27921 -static unsigned long mmap_base(void)
27922 +static unsigned long mmap_base(struct mm_struct *mm)
27923 {
27924 unsigned long gap = rlimit(RLIMIT_STACK);
27925 + unsigned long pax_task_size = TASK_SIZE;
27926 +
27927 +#ifdef CONFIG_PAX_SEGMEXEC
27928 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27929 + pax_task_size = SEGMEXEC_TASK_SIZE;
27930 +#endif
27931
27932 if (gap < MIN_GAP)
27933 gap = MIN_GAP;
27934 else if (gap > MAX_GAP)
27935 gap = MAX_GAP;
27936
27937 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
27938 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
27939 }
27940
27941 /*
27942 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
27943 * does, but not when emulating X86_32
27944 */
27945 -static unsigned long mmap_legacy_base(void)
27946 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
27947 {
27948 - if (mmap_is_ia32())
27949 + if (mmap_is_ia32()) {
27950 +
27951 +#ifdef CONFIG_PAX_SEGMEXEC
27952 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27953 + return SEGMEXEC_TASK_UNMAPPED_BASE;
27954 + else
27955 +#endif
27956 +
27957 return TASK_UNMAPPED_BASE;
27958 - else
27959 + } else
27960 return TASK_UNMAPPED_BASE + mmap_rnd();
27961 }
27962
27963 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
27964 void arch_pick_mmap_layout(struct mm_struct *mm)
27965 {
27966 if (mmap_is_legacy()) {
27967 - mm->mmap_base = mmap_legacy_base();
27968 + mm->mmap_base = mmap_legacy_base(mm);
27969 +
27970 +#ifdef CONFIG_PAX_RANDMMAP
27971 + if (mm->pax_flags & MF_PAX_RANDMMAP)
27972 + mm->mmap_base += mm->delta_mmap;
27973 +#endif
27974 +
27975 mm->get_unmapped_area = arch_get_unmapped_area;
27976 mm->unmap_area = arch_unmap_area;
27977 } else {
27978 - mm->mmap_base = mmap_base();
27979 + mm->mmap_base = mmap_base(mm);
27980 +
27981 +#ifdef CONFIG_PAX_RANDMMAP
27982 + if (mm->pax_flags & MF_PAX_RANDMMAP)
27983 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
27984 +#endif
27985 +
27986 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
27987 mm->unmap_area = arch_unmap_area_topdown;
27988 }
27989 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
27990 index dc0b727..f612039 100644
27991 --- a/arch/x86/mm/mmio-mod.c
27992 +++ b/arch/x86/mm/mmio-mod.c
27993 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
27994 break;
27995 default:
27996 {
27997 - unsigned char *ip = (unsigned char *)instptr;
27998 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
27999 my_trace->opcode = MMIO_UNKNOWN_OP;
28000 my_trace->width = 0;
28001 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28002 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28003 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28004 void __iomem *addr)
28005 {
28006 - static atomic_t next_id;
28007 + static atomic_unchecked_t next_id;
28008 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28009 /* These are page-unaligned. */
28010 struct mmiotrace_map map = {
28011 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28012 .private = trace
28013 },
28014 .phys = offset,
28015 - .id = atomic_inc_return(&next_id)
28016 + .id = atomic_inc_return_unchecked(&next_id)
28017 };
28018 map.map_id = trace->id;
28019
28020 @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28021 ioremap_trace_core(offset, size, addr);
28022 }
28023
28024 -static void iounmap_trace_core(volatile void __iomem *addr)
28025 +static void iounmap_trace_core(const volatile void __iomem *addr)
28026 {
28027 struct mmiotrace_map map = {
28028 .phys = 0,
28029 @@ -328,7 +328,7 @@ not_enabled:
28030 }
28031 }
28032
28033 -void mmiotrace_iounmap(volatile void __iomem *addr)
28034 +void mmiotrace_iounmap(const volatile void __iomem *addr)
28035 {
28036 might_sleep();
28037 if (is_enabled()) /* recheck and proper locking in *_core() */
28038 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28039 index b008656..773eac2 100644
28040 --- a/arch/x86/mm/pageattr-test.c
28041 +++ b/arch/x86/mm/pageattr-test.c
28042 @@ -36,7 +36,7 @@ enum {
28043
28044 static int pte_testbit(pte_t pte)
28045 {
28046 - return pte_flags(pte) & _PAGE_UNUSED1;
28047 + return pte_flags(pte) & _PAGE_CPA_TEST;
28048 }
28049
28050 struct split_state {
28051 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28052 index a718e0d..77419bc 100644
28053 --- a/arch/x86/mm/pageattr.c
28054 +++ b/arch/x86/mm/pageattr.c
28055 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28056 */
28057 #ifdef CONFIG_PCI_BIOS
28058 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28059 - pgprot_val(forbidden) |= _PAGE_NX;
28060 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28061 #endif
28062
28063 /*
28064 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28065 * Does not cover __inittext since that is gone later on. On
28066 * 64bit we do not enforce !NX on the low mapping
28067 */
28068 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
28069 - pgprot_val(forbidden) |= _PAGE_NX;
28070 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28071 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28072
28073 +#ifdef CONFIG_DEBUG_RODATA
28074 /*
28075 * The .rodata section needs to be read-only. Using the pfn
28076 * catches all aliases.
28077 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28078 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
28079 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
28080 pgprot_val(forbidden) |= _PAGE_RW;
28081 +#endif
28082
28083 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28084 /*
28085 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28086 }
28087 #endif
28088
28089 +#ifdef CONFIG_PAX_KERNEXEC
28090 + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28091 + pgprot_val(forbidden) |= _PAGE_RW;
28092 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28093 + }
28094 +#endif
28095 +
28096 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28097
28098 return prot;
28099 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
28100 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28101 {
28102 /* change init_mm */
28103 + pax_open_kernel();
28104 set_pte_atomic(kpte, pte);
28105 +
28106 #ifdef CONFIG_X86_32
28107 if (!SHARED_KERNEL_PMD) {
28108 +
28109 +#ifdef CONFIG_PAX_PER_CPU_PGD
28110 + unsigned long cpu;
28111 +#else
28112 struct page *page;
28113 +#endif
28114
28115 +#ifdef CONFIG_PAX_PER_CPU_PGD
28116 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28117 + pgd_t *pgd = get_cpu_pgd(cpu);
28118 +#else
28119 list_for_each_entry(page, &pgd_list, lru) {
28120 - pgd_t *pgd;
28121 + pgd_t *pgd = (pgd_t *)page_address(page);
28122 +#endif
28123 +
28124 pud_t *pud;
28125 pmd_t *pmd;
28126
28127 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
28128 + pgd += pgd_index(address);
28129 pud = pud_offset(pgd, address);
28130 pmd = pmd_offset(pud, address);
28131 set_pte_atomic((pte_t *)pmd, pte);
28132 }
28133 }
28134 #endif
28135 + pax_close_kernel();
28136 }
28137
28138 static int
28139 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
28140 index 0eb572e..92f5c1e 100644
28141 --- a/arch/x86/mm/pat.c
28142 +++ b/arch/x86/mm/pat.c
28143 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
28144
28145 if (!entry) {
28146 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
28147 - current->comm, current->pid, start, end - 1);
28148 + current->comm, task_pid_nr(current), start, end - 1);
28149 return -EINVAL;
28150 }
28151
28152 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28153
28154 while (cursor < to) {
28155 if (!devmem_is_allowed(pfn)) {
28156 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
28157 - current->comm, from, to - 1);
28158 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
28159 + current->comm, from, to - 1, cursor);
28160 return 0;
28161 }
28162 cursor += PAGE_SIZE;
28163 @@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
28164 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
28165 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
28166 "for [mem %#010Lx-%#010Lx]\n",
28167 - current->comm, current->pid,
28168 + current->comm, task_pid_nr(current),
28169 cattr_name(flags),
28170 base, (unsigned long long)(base + size-1));
28171 return -EINVAL;
28172 @@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28173 flags = lookup_memtype(paddr);
28174 if (want_flags != flags) {
28175 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
28176 - current->comm, current->pid,
28177 + current->comm, task_pid_nr(current),
28178 cattr_name(want_flags),
28179 (unsigned long long)paddr,
28180 (unsigned long long)(paddr + size - 1),
28181 @@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28182 free_memtype(paddr, paddr + size);
28183 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
28184 " for [mem %#010Lx-%#010Lx], got %s\n",
28185 - current->comm, current->pid,
28186 + current->comm, task_pid_nr(current),
28187 cattr_name(want_flags),
28188 (unsigned long long)paddr,
28189 (unsigned long long)(paddr + size - 1),
28190 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
28191 index 9f0614d..92ae64a 100644
28192 --- a/arch/x86/mm/pf_in.c
28193 +++ b/arch/x86/mm/pf_in.c
28194 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
28195 int i;
28196 enum reason_type rv = OTHERS;
28197
28198 - p = (unsigned char *)ins_addr;
28199 + p = (unsigned char *)ktla_ktva(ins_addr);
28200 p += skip_prefix(p, &prf);
28201 p += get_opcode(p, &opcode);
28202
28203 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
28204 struct prefix_bits prf;
28205 int i;
28206
28207 - p = (unsigned char *)ins_addr;
28208 + p = (unsigned char *)ktla_ktva(ins_addr);
28209 p += skip_prefix(p, &prf);
28210 p += get_opcode(p, &opcode);
28211
28212 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
28213 struct prefix_bits prf;
28214 int i;
28215
28216 - p = (unsigned char *)ins_addr;
28217 + p = (unsigned char *)ktla_ktva(ins_addr);
28218 p += skip_prefix(p, &prf);
28219 p += get_opcode(p, &opcode);
28220
28221 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
28222 struct prefix_bits prf;
28223 int i;
28224
28225 - p = (unsigned char *)ins_addr;
28226 + p = (unsigned char *)ktla_ktva(ins_addr);
28227 p += skip_prefix(p, &prf);
28228 p += get_opcode(p, &opcode);
28229 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
28230 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
28231 struct prefix_bits prf;
28232 int i;
28233
28234 - p = (unsigned char *)ins_addr;
28235 + p = (unsigned char *)ktla_ktva(ins_addr);
28236 p += skip_prefix(p, &prf);
28237 p += get_opcode(p, &opcode);
28238 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
28239 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
28240 index e27fbf8..8b56dc9 100644
28241 --- a/arch/x86/mm/pgtable.c
28242 +++ b/arch/x86/mm/pgtable.c
28243 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
28244 list_del(&page->lru);
28245 }
28246
28247 -#define UNSHARED_PTRS_PER_PGD \
28248 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
28249 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28250 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
28251
28252 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
28253 +{
28254 + unsigned int count = USER_PGD_PTRS;
28255
28256 + while (count--)
28257 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
28258 +}
28259 +#endif
28260 +
28261 +#ifdef CONFIG_PAX_PER_CPU_PGD
28262 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
28263 +{
28264 + unsigned int count = USER_PGD_PTRS;
28265 +
28266 + while (count--) {
28267 + pgd_t pgd;
28268 +
28269 +#ifdef CONFIG_X86_64
28270 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
28271 +#else
28272 + pgd = *src++;
28273 +#endif
28274 +
28275 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28276 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
28277 +#endif
28278 +
28279 + *dst++ = pgd;
28280 + }
28281 +
28282 +}
28283 +#endif
28284 +
28285 +#ifdef CONFIG_X86_64
28286 +#define pxd_t pud_t
28287 +#define pyd_t pgd_t
28288 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
28289 +#define pxd_free(mm, pud) pud_free((mm), (pud))
28290 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
28291 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
28292 +#define PYD_SIZE PGDIR_SIZE
28293 +#else
28294 +#define pxd_t pmd_t
28295 +#define pyd_t pud_t
28296 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
28297 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
28298 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
28299 +#define pyd_offset(mm, address) pud_offset((mm), (address))
28300 +#define PYD_SIZE PUD_SIZE
28301 +#endif
28302 +
28303 +#ifdef CONFIG_PAX_PER_CPU_PGD
28304 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
28305 +static inline void pgd_dtor(pgd_t *pgd) {}
28306 +#else
28307 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
28308 {
28309 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
28310 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
28311 pgd_list_del(pgd);
28312 spin_unlock(&pgd_lock);
28313 }
28314 +#endif
28315
28316 /*
28317 * List of all pgd's needed for non-PAE so it can invalidate entries
28318 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
28319 * -- nyc
28320 */
28321
28322 -#ifdef CONFIG_X86_PAE
28323 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
28324 /*
28325 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
28326 * updating the top-level pagetable entries to guarantee the
28327 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
28328 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
28329 * and initialize the kernel pmds here.
28330 */
28331 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
28332 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
28333
28334 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
28335 {
28336 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
28337 */
28338 flush_tlb_mm(mm);
28339 }
28340 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
28341 +#define PREALLOCATED_PXDS USER_PGD_PTRS
28342 #else /* !CONFIG_X86_PAE */
28343
28344 /* No need to prepopulate any pagetable entries in non-PAE modes. */
28345 -#define PREALLOCATED_PMDS 0
28346 +#define PREALLOCATED_PXDS 0
28347
28348 #endif /* CONFIG_X86_PAE */
28349
28350 -static void free_pmds(pmd_t *pmds[])
28351 +static void free_pxds(pxd_t *pxds[])
28352 {
28353 int i;
28354
28355 - for(i = 0; i < PREALLOCATED_PMDS; i++)
28356 - if (pmds[i])
28357 - free_page((unsigned long)pmds[i]);
28358 + for(i = 0; i < PREALLOCATED_PXDS; i++)
28359 + if (pxds[i])
28360 + free_page((unsigned long)pxds[i]);
28361 }
28362
28363 -static int preallocate_pmds(pmd_t *pmds[])
28364 +static int preallocate_pxds(pxd_t *pxds[])
28365 {
28366 int i;
28367 bool failed = false;
28368
28369 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
28370 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
28371 - if (pmd == NULL)
28372 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
28373 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
28374 + if (pxd == NULL)
28375 failed = true;
28376 - pmds[i] = pmd;
28377 + pxds[i] = pxd;
28378 }
28379
28380 if (failed) {
28381 - free_pmds(pmds);
28382 + free_pxds(pxds);
28383 return -ENOMEM;
28384 }
28385
28386 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
28387 * preallocate which never got a corresponding vma will need to be
28388 * freed manually.
28389 */
28390 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
28391 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
28392 {
28393 int i;
28394
28395 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
28396 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
28397 pgd_t pgd = pgdp[i];
28398
28399 if (pgd_val(pgd) != 0) {
28400 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
28401 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
28402
28403 - pgdp[i] = native_make_pgd(0);
28404 + set_pgd(pgdp + i, native_make_pgd(0));
28405
28406 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
28407 - pmd_free(mm, pmd);
28408 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
28409 + pxd_free(mm, pxd);
28410 }
28411 }
28412 }
28413
28414 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
28415 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
28416 {
28417 - pud_t *pud;
28418 + pyd_t *pyd;
28419 unsigned long addr;
28420 int i;
28421
28422 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
28423 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
28424 return;
28425
28426 - pud = pud_offset(pgd, 0);
28427 +#ifdef CONFIG_X86_64
28428 + pyd = pyd_offset(mm, 0L);
28429 +#else
28430 + pyd = pyd_offset(pgd, 0L);
28431 +#endif
28432
28433 - for (addr = i = 0; i < PREALLOCATED_PMDS;
28434 - i++, pud++, addr += PUD_SIZE) {
28435 - pmd_t *pmd = pmds[i];
28436 + for (addr = i = 0; i < PREALLOCATED_PXDS;
28437 + i++, pyd++, addr += PYD_SIZE) {
28438 + pxd_t *pxd = pxds[i];
28439
28440 if (i >= KERNEL_PGD_BOUNDARY)
28441 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
28442 - sizeof(pmd_t) * PTRS_PER_PMD);
28443 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
28444 + sizeof(pxd_t) * PTRS_PER_PMD);
28445
28446 - pud_populate(mm, pud, pmd);
28447 + pyd_populate(mm, pyd, pxd);
28448 }
28449 }
28450
28451 pgd_t *pgd_alloc(struct mm_struct *mm)
28452 {
28453 pgd_t *pgd;
28454 - pmd_t *pmds[PREALLOCATED_PMDS];
28455 + pxd_t *pxds[PREALLOCATED_PXDS];
28456
28457 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
28458
28459 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
28460
28461 mm->pgd = pgd;
28462
28463 - if (preallocate_pmds(pmds) != 0)
28464 + if (preallocate_pxds(pxds) != 0)
28465 goto out_free_pgd;
28466
28467 if (paravirt_pgd_alloc(mm) != 0)
28468 - goto out_free_pmds;
28469 + goto out_free_pxds;
28470
28471 /*
28472 * Make sure that pre-populating the pmds is atomic with
28473 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
28474 spin_lock(&pgd_lock);
28475
28476 pgd_ctor(mm, pgd);
28477 - pgd_prepopulate_pmd(mm, pgd, pmds);
28478 + pgd_prepopulate_pxd(mm, pgd, pxds);
28479
28480 spin_unlock(&pgd_lock);
28481
28482 return pgd;
28483
28484 -out_free_pmds:
28485 - free_pmds(pmds);
28486 +out_free_pxds:
28487 + free_pxds(pxds);
28488 out_free_pgd:
28489 free_page((unsigned long)pgd);
28490 out:
28491 @@ -295,7 +356,7 @@ out:
28492
28493 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
28494 {
28495 - pgd_mop_up_pmds(mm, pgd);
28496 + pgd_mop_up_pxds(mm, pgd);
28497 pgd_dtor(pgd);
28498 paravirt_pgd_free(mm, pgd);
28499 free_page((unsigned long)pgd);
28500 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
28501 index a69bcb8..19068ab 100644
28502 --- a/arch/x86/mm/pgtable_32.c
28503 +++ b/arch/x86/mm/pgtable_32.c
28504 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
28505 return;
28506 }
28507 pte = pte_offset_kernel(pmd, vaddr);
28508 +
28509 + pax_open_kernel();
28510 if (pte_val(pteval))
28511 set_pte_at(&init_mm, vaddr, pte, pteval);
28512 else
28513 pte_clear(&init_mm, vaddr, pte);
28514 + pax_close_kernel();
28515
28516 /*
28517 * It's enough to flush this one mapping.
28518 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
28519 index 410531d..0f16030 100644
28520 --- a/arch/x86/mm/setup_nx.c
28521 +++ b/arch/x86/mm/setup_nx.c
28522 @@ -5,8 +5,10 @@
28523 #include <asm/pgtable.h>
28524 #include <asm/proto.h>
28525
28526 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28527 static int disable_nx __cpuinitdata;
28528
28529 +#ifndef CONFIG_PAX_PAGEEXEC
28530 /*
28531 * noexec = on|off
28532 *
28533 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
28534 return 0;
28535 }
28536 early_param("noexec", noexec_setup);
28537 +#endif
28538 +
28539 +#endif
28540
28541 void __cpuinit x86_configure_nx(void)
28542 {
28543 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28544 if (cpu_has_nx && !disable_nx)
28545 __supported_pte_mask |= _PAGE_NX;
28546 else
28547 +#endif
28548 __supported_pte_mask &= ~_PAGE_NX;
28549 }
28550
28551 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
28552 index 13a6b29..c2fff23 100644
28553 --- a/arch/x86/mm/tlb.c
28554 +++ b/arch/x86/mm/tlb.c
28555 @@ -48,7 +48,11 @@ void leave_mm(int cpu)
28556 BUG();
28557 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
28558 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
28559 +
28560 +#ifndef CONFIG_PAX_PER_CPU_PGD
28561 load_cr3(swapper_pg_dir);
28562 +#endif
28563 +
28564 }
28565 }
28566 EXPORT_SYMBOL_GPL(leave_mm);
28567 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
28568 index 877b9a1..a8ecf42 100644
28569 --- a/arch/x86/net/bpf_jit.S
28570 +++ b/arch/x86/net/bpf_jit.S
28571 @@ -9,6 +9,7 @@
28572 */
28573 #include <linux/linkage.h>
28574 #include <asm/dwarf2.h>
28575 +#include <asm/alternative-asm.h>
28576
28577 /*
28578 * Calling convention :
28579 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
28580 jle bpf_slow_path_word
28581 mov (SKBDATA,%rsi),%eax
28582 bswap %eax /* ntohl() */
28583 + pax_force_retaddr
28584 ret
28585
28586 sk_load_half:
28587 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
28588 jle bpf_slow_path_half
28589 movzwl (SKBDATA,%rsi),%eax
28590 rol $8,%ax # ntohs()
28591 + pax_force_retaddr
28592 ret
28593
28594 sk_load_byte:
28595 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
28596 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
28597 jle bpf_slow_path_byte
28598 movzbl (SKBDATA,%rsi),%eax
28599 + pax_force_retaddr
28600 ret
28601
28602 /**
28603 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
28604 movzbl (SKBDATA,%rsi),%ebx
28605 and $15,%bl
28606 shl $2,%bl
28607 + pax_force_retaddr
28608 ret
28609
28610 /* rsi contains offset and can be scratched */
28611 @@ -109,6 +114,7 @@ bpf_slow_path_word:
28612 js bpf_error
28613 mov -12(%rbp),%eax
28614 bswap %eax
28615 + pax_force_retaddr
28616 ret
28617
28618 bpf_slow_path_half:
28619 @@ -117,12 +123,14 @@ bpf_slow_path_half:
28620 mov -12(%rbp),%ax
28621 rol $8,%ax
28622 movzwl %ax,%eax
28623 + pax_force_retaddr
28624 ret
28625
28626 bpf_slow_path_byte:
28627 bpf_slow_path_common(1)
28628 js bpf_error
28629 movzbl -12(%rbp),%eax
28630 + pax_force_retaddr
28631 ret
28632
28633 bpf_slow_path_byte_msh:
28634 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
28635 and $15,%al
28636 shl $2,%al
28637 xchg %eax,%ebx
28638 + pax_force_retaddr
28639 ret
28640
28641 #define sk_negative_common(SIZE) \
28642 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
28643 sk_negative_common(4)
28644 mov (%rax), %eax
28645 bswap %eax
28646 + pax_force_retaddr
28647 ret
28648
28649 bpf_slow_path_half_neg:
28650 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
28651 mov (%rax),%ax
28652 rol $8,%ax
28653 movzwl %ax,%eax
28654 + pax_force_retaddr
28655 ret
28656
28657 bpf_slow_path_byte_neg:
28658 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
28659 .globl sk_load_byte_negative_offset
28660 sk_negative_common(1)
28661 movzbl (%rax), %eax
28662 + pax_force_retaddr
28663 ret
28664
28665 bpf_slow_path_byte_msh_neg:
28666 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
28667 and $15,%al
28668 shl $2,%al
28669 xchg %eax,%ebx
28670 + pax_force_retaddr
28671 ret
28672
28673 bpf_error:
28674 @@ -197,4 +210,5 @@ bpf_error:
28675 xor %eax,%eax
28676 mov -8(%rbp),%rbx
28677 leaveq
28678 + pax_force_retaddr
28679 ret
28680 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
28681 index d11a470..3f9adff3 100644
28682 --- a/arch/x86/net/bpf_jit_comp.c
28683 +++ b/arch/x86/net/bpf_jit_comp.c
28684 @@ -12,6 +12,7 @@
28685 #include <linux/netdevice.h>
28686 #include <linux/filter.h>
28687 #include <linux/if_vlan.h>
28688 +#include <linux/random.h>
28689
28690 /*
28691 * Conventions :
28692 @@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28693 return ptr + len;
28694 }
28695
28696 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28697 +#define MAX_INSTR_CODE_SIZE 96
28698 +#else
28699 +#define MAX_INSTR_CODE_SIZE 64
28700 +#endif
28701 +
28702 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
28703
28704 #define EMIT1(b1) EMIT(b1, 1)
28705 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
28706 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
28707 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
28708 +
28709 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28710 +/* original constant will appear in ecx */
28711 +#define DILUTE_CONST_SEQUENCE(_off, _key) \
28712 +do { \
28713 + /* mov ecx, randkey */ \
28714 + EMIT1(0xb9); \
28715 + EMIT(_key, 4); \
28716 + /* xor ecx, randkey ^ off */ \
28717 + EMIT2(0x81, 0xf1); \
28718 + EMIT((_key) ^ (_off), 4); \
28719 +} while (0)
28720 +
28721 +#define EMIT1_off32(b1, _off) \
28722 +do { \
28723 + switch (b1) { \
28724 + case 0x05: /* add eax, imm32 */ \
28725 + case 0x2d: /* sub eax, imm32 */ \
28726 + case 0x25: /* and eax, imm32 */ \
28727 + case 0x0d: /* or eax, imm32 */ \
28728 + case 0xb8: /* mov eax, imm32 */ \
28729 + case 0x3d: /* cmp eax, imm32 */ \
28730 + case 0xa9: /* test eax, imm32 */ \
28731 + DILUTE_CONST_SEQUENCE(_off, randkey); \
28732 + EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
28733 + break; \
28734 + case 0xbb: /* mov ebx, imm32 */ \
28735 + DILUTE_CONST_SEQUENCE(_off, randkey); \
28736 + /* mov ebx, ecx */ \
28737 + EMIT2(0x89, 0xcb); \
28738 + break; \
28739 + case 0xbe: /* mov esi, imm32 */ \
28740 + DILUTE_CONST_SEQUENCE(_off, randkey); \
28741 + /* mov esi, ecx */ \
28742 + EMIT2(0x89, 0xce); \
28743 + break; \
28744 + case 0xe9: /* jmp rel imm32 */ \
28745 + EMIT1(b1); \
28746 + EMIT(_off, 4); \
28747 + /* prevent fall-through, we're not called if off = 0 */ \
28748 + EMIT(0xcccccccc, 4); \
28749 + EMIT(0xcccccccc, 4); \
28750 + break; \
28751 + default: \
28752 + EMIT1(b1); \
28753 + EMIT(_off, 4); \
28754 + } \
28755 +} while (0)
28756 +
28757 +#define EMIT2_off32(b1, b2, _off) \
28758 +do { \
28759 + if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
28760 + EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
28761 + EMIT(randkey, 4); \
28762 + EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
28763 + EMIT((_off) - randkey, 4); \
28764 + } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
28765 + DILUTE_CONST_SEQUENCE(_off, randkey); \
28766 + /* imul eax, ecx */ \
28767 + EMIT3(0x0f, 0xaf, 0xc1); \
28768 + } else { \
28769 + EMIT2(b1, b2); \
28770 + EMIT(_off, 4); \
28771 + } \
28772 +} while (0)
28773 +#else
28774 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
28775 +#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
28776 +#endif
28777
28778 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
28779 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
28780 @@ -90,6 +165,24 @@ do { \
28781 #define X86_JBE 0x76
28782 #define X86_JA 0x77
28783
28784 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28785 +#define APPEND_FLOW_VERIFY() \
28786 +do { \
28787 + /* mov ecx, randkey */ \
28788 + EMIT1(0xb9); \
28789 + EMIT(randkey, 4); \
28790 + /* cmp ecx, randkey */ \
28791 + EMIT2(0x81, 0xf9); \
28792 + EMIT(randkey, 4); \
28793 + /* jz after 8 int 3s */ \
28794 + EMIT2(0x74, 0x08); \
28795 + EMIT(0xcccccccc, 4); \
28796 + EMIT(0xcccccccc, 4); \
28797 +} while (0)
28798 +#else
28799 +#define APPEND_FLOW_VERIFY() do { } while (0)
28800 +#endif
28801 +
28802 #define EMIT_COND_JMP(op, offset) \
28803 do { \
28804 if (is_near(offset)) \
28805 @@ -97,6 +190,7 @@ do { \
28806 else { \
28807 EMIT2(0x0f, op + 0x10); \
28808 EMIT(offset, 4); /* jxx .+off32 */ \
28809 + APPEND_FLOW_VERIFY(); \
28810 } \
28811 } while (0)
28812
28813 @@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
28814 set_fs(old_fs);
28815 }
28816
28817 +struct bpf_jit_work {
28818 + struct work_struct work;
28819 + void *image;
28820 +};
28821 +
28822 #define CHOOSE_LOAD_FUNC(K, func) \
28823 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
28824
28825 void bpf_jit_compile(struct sk_filter *fp)
28826 {
28827 - u8 temp[64];
28828 + u8 temp[MAX_INSTR_CODE_SIZE];
28829 u8 *prog;
28830 unsigned int proglen, oldproglen = 0;
28831 int ilen, i;
28832 @@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
28833 unsigned int *addrs;
28834 const struct sock_filter *filter = fp->insns;
28835 int flen = fp->len;
28836 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28837 + unsigned int randkey;
28838 +#endif
28839
28840 if (!bpf_jit_enable)
28841 return;
28842 @@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
28843 if (addrs == NULL)
28844 return;
28845
28846 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
28847 + if (!fp->work)
28848 + goto out;
28849 +
28850 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28851 + randkey = get_random_int();
28852 +#endif
28853 +
28854 /* Before first pass, make a rough estimation of addrs[]
28855 - * each bpf instruction is translated to less than 64 bytes
28856 + * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
28857 */
28858 for (proglen = 0, i = 0; i < flen; i++) {
28859 - proglen += 64;
28860 + proglen += MAX_INSTR_CODE_SIZE;
28861 addrs[i] = proglen;
28862 }
28863 cleanup_addr = proglen; /* epilogue address */
28864 @@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
28865 case BPF_S_ALU_MUL_K: /* A *= K */
28866 if (is_imm8(K))
28867 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
28868 - else {
28869 - EMIT2(0x69, 0xc0); /* imul imm32,%eax */
28870 - EMIT(K, 4);
28871 - }
28872 + else
28873 + EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
28874 break;
28875 case BPF_S_ALU_DIV_X: /* A /= X; */
28876 seen |= SEEN_XREG;
28877 @@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
28878 break;
28879 case BPF_S_ALU_MOD_K: /* A %= K; */
28880 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
28881 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28882 + DILUTE_CONST_SEQUENCE(K, randkey);
28883 +#else
28884 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
28885 +#endif
28886 EMIT2(0xf7, 0xf1); /* div %ecx */
28887 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
28888 break;
28889 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
28890 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28891 + DILUTE_CONST_SEQUENCE(K, randkey);
28892 + // imul rax, rcx
28893 + EMIT4(0x48, 0x0f, 0xaf, 0xc1);
28894 +#else
28895 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
28896 EMIT(K, 4);
28897 +#endif
28898 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
28899 break;
28900 case BPF_S_ALU_AND_X:
28901 @@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
28902 if (is_imm8(K)) {
28903 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
28904 } else {
28905 - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
28906 - EMIT(K, 4);
28907 + EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
28908 }
28909 } else {
28910 EMIT2(0x89,0xde); /* mov %ebx,%esi */
28911 @@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
28912 break;
28913 default:
28914 /* hmm, too complex filter, give up with jit compiler */
28915 - goto out;
28916 + goto error;
28917 }
28918 ilen = prog - temp;
28919 if (image) {
28920 if (unlikely(proglen + ilen > oldproglen)) {
28921 pr_err("bpb_jit_compile fatal error\n");
28922 - kfree(addrs);
28923 - module_free(NULL, image);
28924 - return;
28925 + module_free_exec(NULL, image);
28926 + goto error;
28927 }
28928 + pax_open_kernel();
28929 memcpy(image + proglen, temp, ilen);
28930 + pax_close_kernel();
28931 }
28932 proglen += ilen;
28933 addrs[i] = proglen;
28934 @@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
28935 break;
28936 }
28937 if (proglen == oldproglen) {
28938 - image = module_alloc(max_t(unsigned int,
28939 - proglen,
28940 - sizeof(struct work_struct)));
28941 + image = module_alloc_exec(proglen);
28942 if (!image)
28943 - goto out;
28944 + goto error;
28945 }
28946 oldproglen = proglen;
28947 }
28948 @@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
28949 bpf_flush_icache(image, image + proglen);
28950
28951 fp->bpf_func = (void *)image;
28952 - }
28953 + } else
28954 +error:
28955 + kfree(fp->work);
28956 +
28957 out:
28958 kfree(addrs);
28959 return;
28960 @@ -707,18 +826,20 @@ out:
28961
28962 static void jit_free_defer(struct work_struct *arg)
28963 {
28964 - module_free(NULL, arg);
28965 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
28966 + kfree(arg);
28967 }
28968
28969 /* run from softirq, we must use a work_struct to call
28970 - * module_free() from process context
28971 + * module_free_exec() from process context
28972 */
28973 void bpf_jit_free(struct sk_filter *fp)
28974 {
28975 if (fp->bpf_func != sk_run_filter) {
28976 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
28977 + struct work_struct *work = &fp->work->work;
28978
28979 INIT_WORK(work, jit_free_defer);
28980 + fp->work->image = fp->bpf_func;
28981 schedule_work(work);
28982 }
28983 }
28984 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
28985 index d6aa6e8..266395a 100644
28986 --- a/arch/x86/oprofile/backtrace.c
28987 +++ b/arch/x86/oprofile/backtrace.c
28988 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
28989 struct stack_frame_ia32 *fp;
28990 unsigned long bytes;
28991
28992 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
28993 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
28994 if (bytes != sizeof(bufhead))
28995 return NULL;
28996
28997 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
28998 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
28999
29000 oprofile_add_trace(bufhead[0].return_address);
29001
29002 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29003 struct stack_frame bufhead[2];
29004 unsigned long bytes;
29005
29006 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29007 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29008 if (bytes != sizeof(bufhead))
29009 return NULL;
29010
29011 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29012 {
29013 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29014
29015 - if (!user_mode_vm(regs)) {
29016 + if (!user_mode(regs)) {
29017 unsigned long stack = kernel_stack_pointer(regs);
29018 if (depth)
29019 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29020 diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
29021 index e9e6ed5..e47ae67 100644
29022 --- a/arch/x86/pci/amd_bus.c
29023 +++ b/arch/x86/pci/amd_bus.c
29024 @@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
29025 return NOTIFY_OK;
29026 }
29027
29028 -static struct notifier_block __cpuinitdata amd_cpu_notifier = {
29029 +static struct notifier_block amd_cpu_notifier = {
29030 .notifier_call = amd_cpu_notify,
29031 };
29032
29033 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
29034 index 6eb18c4..20d83de 100644
29035 --- a/arch/x86/pci/mrst.c
29036 +++ b/arch/x86/pci/mrst.c
29037 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
29038 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
29039 pci_mmcfg_late_init();
29040 pcibios_enable_irq = mrst_pci_irq_enable;
29041 - pci_root_ops = pci_mrst_ops;
29042 + pax_open_kernel();
29043 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
29044 + pax_close_kernel();
29045 pci_soc_mode = 1;
29046 /* Continue with standard init */
29047 return 1;
29048 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
29049 index c77b24a..c979855 100644
29050 --- a/arch/x86/pci/pcbios.c
29051 +++ b/arch/x86/pci/pcbios.c
29052 @@ -79,7 +79,7 @@ union bios32 {
29053 static struct {
29054 unsigned long address;
29055 unsigned short segment;
29056 -} bios32_indirect = { 0, __KERNEL_CS };
29057 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
29058
29059 /*
29060 * Returns the entry point for the given service, NULL on error
29061 @@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
29062 unsigned long length; /* %ecx */
29063 unsigned long entry; /* %edx */
29064 unsigned long flags;
29065 + struct desc_struct d, *gdt;
29066
29067 local_irq_save(flags);
29068 - __asm__("lcall *(%%edi); cld"
29069 +
29070 + gdt = get_cpu_gdt_table(smp_processor_id());
29071 +
29072 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
29073 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29074 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
29075 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29076 +
29077 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
29078 : "=a" (return_code),
29079 "=b" (address),
29080 "=c" (length),
29081 "=d" (entry)
29082 : "0" (service),
29083 "1" (0),
29084 - "D" (&bios32_indirect));
29085 + "D" (&bios32_indirect),
29086 + "r"(__PCIBIOS_DS)
29087 + : "memory");
29088 +
29089 + pax_open_kernel();
29090 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
29091 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
29092 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
29093 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
29094 + pax_close_kernel();
29095 +
29096 local_irq_restore(flags);
29097
29098 switch (return_code) {
29099 - case 0:
29100 - return address + entry;
29101 - case 0x80: /* Not present */
29102 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
29103 - return 0;
29104 - default: /* Shouldn't happen */
29105 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
29106 - service, return_code);
29107 + case 0: {
29108 + int cpu;
29109 + unsigned char flags;
29110 +
29111 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
29112 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
29113 + printk(KERN_WARNING "bios32_service: not valid\n");
29114 return 0;
29115 + }
29116 + address = address + PAGE_OFFSET;
29117 + length += 16UL; /* some BIOSs underreport this... */
29118 + flags = 4;
29119 + if (length >= 64*1024*1024) {
29120 + length >>= PAGE_SHIFT;
29121 + flags |= 8;
29122 + }
29123 +
29124 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
29125 + gdt = get_cpu_gdt_table(cpu);
29126 + pack_descriptor(&d, address, length, 0x9b, flags);
29127 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29128 + pack_descriptor(&d, address, length, 0x93, flags);
29129 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29130 + }
29131 + return entry;
29132 + }
29133 + case 0x80: /* Not present */
29134 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
29135 + return 0;
29136 + default: /* Shouldn't happen */
29137 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
29138 + service, return_code);
29139 + return 0;
29140 }
29141 }
29142
29143 static struct {
29144 unsigned long address;
29145 unsigned short segment;
29146 -} pci_indirect = { 0, __KERNEL_CS };
29147 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
29148
29149 -static int pci_bios_present;
29150 +static int pci_bios_present __read_only;
29151
29152 static int check_pcibios(void)
29153 {
29154 @@ -131,11 +174,13 @@ static int check_pcibios(void)
29155 unsigned long flags, pcibios_entry;
29156
29157 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
29158 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
29159 + pci_indirect.address = pcibios_entry;
29160
29161 local_irq_save(flags);
29162 - __asm__(
29163 - "lcall *(%%edi); cld\n\t"
29164 + __asm__("movw %w6, %%ds\n\t"
29165 + "lcall *%%ss:(%%edi); cld\n\t"
29166 + "push %%ss\n\t"
29167 + "pop %%ds\n\t"
29168 "jc 1f\n\t"
29169 "xor %%ah, %%ah\n"
29170 "1:"
29171 @@ -144,7 +189,8 @@ static int check_pcibios(void)
29172 "=b" (ebx),
29173 "=c" (ecx)
29174 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
29175 - "D" (&pci_indirect)
29176 + "D" (&pci_indirect),
29177 + "r" (__PCIBIOS_DS)
29178 : "memory");
29179 local_irq_restore(flags);
29180
29181 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29182
29183 switch (len) {
29184 case 1:
29185 - __asm__("lcall *(%%esi); cld\n\t"
29186 + __asm__("movw %w6, %%ds\n\t"
29187 + "lcall *%%ss:(%%esi); cld\n\t"
29188 + "push %%ss\n\t"
29189 + "pop %%ds\n\t"
29190 "jc 1f\n\t"
29191 "xor %%ah, %%ah\n"
29192 "1:"
29193 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29194 : "1" (PCIBIOS_READ_CONFIG_BYTE),
29195 "b" (bx),
29196 "D" ((long)reg),
29197 - "S" (&pci_indirect));
29198 + "S" (&pci_indirect),
29199 + "r" (__PCIBIOS_DS));
29200 /*
29201 * Zero-extend the result beyond 8 bits, do not trust the
29202 * BIOS having done it:
29203 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29204 *value &= 0xff;
29205 break;
29206 case 2:
29207 - __asm__("lcall *(%%esi); cld\n\t"
29208 + __asm__("movw %w6, %%ds\n\t"
29209 + "lcall *%%ss:(%%esi); cld\n\t"
29210 + "push %%ss\n\t"
29211 + "pop %%ds\n\t"
29212 "jc 1f\n\t"
29213 "xor %%ah, %%ah\n"
29214 "1:"
29215 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29216 : "1" (PCIBIOS_READ_CONFIG_WORD),
29217 "b" (bx),
29218 "D" ((long)reg),
29219 - "S" (&pci_indirect));
29220 + "S" (&pci_indirect),
29221 + "r" (__PCIBIOS_DS));
29222 /*
29223 * Zero-extend the result beyond 16 bits, do not trust the
29224 * BIOS having done it:
29225 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29226 *value &= 0xffff;
29227 break;
29228 case 4:
29229 - __asm__("lcall *(%%esi); cld\n\t"
29230 + __asm__("movw %w6, %%ds\n\t"
29231 + "lcall *%%ss:(%%esi); cld\n\t"
29232 + "push %%ss\n\t"
29233 + "pop %%ds\n\t"
29234 "jc 1f\n\t"
29235 "xor %%ah, %%ah\n"
29236 "1:"
29237 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29238 : "1" (PCIBIOS_READ_CONFIG_DWORD),
29239 "b" (bx),
29240 "D" ((long)reg),
29241 - "S" (&pci_indirect));
29242 + "S" (&pci_indirect),
29243 + "r" (__PCIBIOS_DS));
29244 break;
29245 }
29246
29247 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29248
29249 switch (len) {
29250 case 1:
29251 - __asm__("lcall *(%%esi); cld\n\t"
29252 + __asm__("movw %w6, %%ds\n\t"
29253 + "lcall *%%ss:(%%esi); cld\n\t"
29254 + "push %%ss\n\t"
29255 + "pop %%ds\n\t"
29256 "jc 1f\n\t"
29257 "xor %%ah, %%ah\n"
29258 "1:"
29259 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29260 "c" (value),
29261 "b" (bx),
29262 "D" ((long)reg),
29263 - "S" (&pci_indirect));
29264 + "S" (&pci_indirect),
29265 + "r" (__PCIBIOS_DS));
29266 break;
29267 case 2:
29268 - __asm__("lcall *(%%esi); cld\n\t"
29269 + __asm__("movw %w6, %%ds\n\t"
29270 + "lcall *%%ss:(%%esi); cld\n\t"
29271 + "push %%ss\n\t"
29272 + "pop %%ds\n\t"
29273 "jc 1f\n\t"
29274 "xor %%ah, %%ah\n"
29275 "1:"
29276 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29277 "c" (value),
29278 "b" (bx),
29279 "D" ((long)reg),
29280 - "S" (&pci_indirect));
29281 + "S" (&pci_indirect),
29282 + "r" (__PCIBIOS_DS));
29283 break;
29284 case 4:
29285 - __asm__("lcall *(%%esi); cld\n\t"
29286 + __asm__("movw %w6, %%ds\n\t"
29287 + "lcall *%%ss:(%%esi); cld\n\t"
29288 + "push %%ss\n\t"
29289 + "pop %%ds\n\t"
29290 "jc 1f\n\t"
29291 "xor %%ah, %%ah\n"
29292 "1:"
29293 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29294 "c" (value),
29295 "b" (bx),
29296 "D" ((long)reg),
29297 - "S" (&pci_indirect));
29298 + "S" (&pci_indirect),
29299 + "r" (__PCIBIOS_DS));
29300 break;
29301 }
29302
29303 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
29304
29305 DBG("PCI: Fetching IRQ routing table... ");
29306 __asm__("push %%es\n\t"
29307 + "movw %w8, %%ds\n\t"
29308 "push %%ds\n\t"
29309 "pop %%es\n\t"
29310 - "lcall *(%%esi); cld\n\t"
29311 + "lcall *%%ss:(%%esi); cld\n\t"
29312 "pop %%es\n\t"
29313 + "push %%ss\n\t"
29314 + "pop %%ds\n"
29315 "jc 1f\n\t"
29316 "xor %%ah, %%ah\n"
29317 "1:"
29318 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
29319 "1" (0),
29320 "D" ((long) &opt),
29321 "S" (&pci_indirect),
29322 - "m" (opt)
29323 + "m" (opt),
29324 + "r" (__PCIBIOS_DS)
29325 : "memory");
29326 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
29327 if (ret & 0xff00)
29328 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
29329 {
29330 int ret;
29331
29332 - __asm__("lcall *(%%esi); cld\n\t"
29333 + __asm__("movw %w5, %%ds\n\t"
29334 + "lcall *%%ss:(%%esi); cld\n\t"
29335 + "push %%ss\n\t"
29336 + "pop %%ds\n"
29337 "jc 1f\n\t"
29338 "xor %%ah, %%ah\n"
29339 "1:"
29340 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
29341 : "0" (PCIBIOS_SET_PCI_HW_INT),
29342 "b" ((dev->bus->number << 8) | dev->devfn),
29343 "c" ((irq << 8) | (pin + 10)),
29344 - "S" (&pci_indirect));
29345 + "S" (&pci_indirect),
29346 + "r" (__PCIBIOS_DS));
29347 return !(ret & 0xff00);
29348 }
29349 EXPORT_SYMBOL(pcibios_set_irq_routing);
29350 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
29351 index 40e4469..1ab536e 100644
29352 --- a/arch/x86/platform/efi/efi_32.c
29353 +++ b/arch/x86/platform/efi/efi_32.c
29354 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
29355 {
29356 struct desc_ptr gdt_descr;
29357
29358 +#ifdef CONFIG_PAX_KERNEXEC
29359 + struct desc_struct d;
29360 +#endif
29361 +
29362 local_irq_save(efi_rt_eflags);
29363
29364 load_cr3(initial_page_table);
29365 __flush_tlb_all();
29366
29367 +#ifdef CONFIG_PAX_KERNEXEC
29368 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
29369 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
29370 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
29371 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
29372 +#endif
29373 +
29374 gdt_descr.address = __pa(get_cpu_gdt_table(0));
29375 gdt_descr.size = GDT_SIZE - 1;
29376 load_gdt(&gdt_descr);
29377 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
29378 {
29379 struct desc_ptr gdt_descr;
29380
29381 +#ifdef CONFIG_PAX_KERNEXEC
29382 + struct desc_struct d;
29383 +
29384 + memset(&d, 0, sizeof d);
29385 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
29386 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
29387 +#endif
29388 +
29389 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
29390 gdt_descr.size = GDT_SIZE - 1;
29391 load_gdt(&gdt_descr);
29392 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
29393 index fbe66e6..eae5e38 100644
29394 --- a/arch/x86/platform/efi/efi_stub_32.S
29395 +++ b/arch/x86/platform/efi/efi_stub_32.S
29396 @@ -6,7 +6,9 @@
29397 */
29398
29399 #include <linux/linkage.h>
29400 +#include <linux/init.h>
29401 #include <asm/page_types.h>
29402 +#include <asm/segment.h>
29403
29404 /*
29405 * efi_call_phys(void *, ...) is a function with variable parameters.
29406 @@ -20,7 +22,7 @@
29407 * service functions will comply with gcc calling convention, too.
29408 */
29409
29410 -.text
29411 +__INIT
29412 ENTRY(efi_call_phys)
29413 /*
29414 * 0. The function can only be called in Linux kernel. So CS has been
29415 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
29416 * The mapping of lower virtual memory has been created in prelog and
29417 * epilog.
29418 */
29419 - movl $1f, %edx
29420 - subl $__PAGE_OFFSET, %edx
29421 - jmp *%edx
29422 +#ifdef CONFIG_PAX_KERNEXEC
29423 + movl $(__KERNEXEC_EFI_DS), %edx
29424 + mov %edx, %ds
29425 + mov %edx, %es
29426 + mov %edx, %ss
29427 + addl $2f,(1f)
29428 + ljmp *(1f)
29429 +
29430 +__INITDATA
29431 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
29432 +.previous
29433 +
29434 +2:
29435 + subl $2b,(1b)
29436 +#else
29437 + jmp 1f-__PAGE_OFFSET
29438 1:
29439 +#endif
29440
29441 /*
29442 * 2. Now on the top of stack is the return
29443 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
29444 * parameter 2, ..., param n. To make things easy, we save the return
29445 * address of efi_call_phys in a global variable.
29446 */
29447 - popl %edx
29448 - movl %edx, saved_return_addr
29449 - /* get the function pointer into ECX*/
29450 - popl %ecx
29451 - movl %ecx, efi_rt_function_ptr
29452 - movl $2f, %edx
29453 - subl $__PAGE_OFFSET, %edx
29454 - pushl %edx
29455 + popl (saved_return_addr)
29456 + popl (efi_rt_function_ptr)
29457
29458 /*
29459 * 3. Clear PG bit in %CR0.
29460 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
29461 /*
29462 * 5. Call the physical function.
29463 */
29464 - jmp *%ecx
29465 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
29466
29467 -2:
29468 /*
29469 * 6. After EFI runtime service returns, control will return to
29470 * following instruction. We'd better readjust stack pointer first.
29471 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
29472 movl %cr0, %edx
29473 orl $0x80000000, %edx
29474 movl %edx, %cr0
29475 - jmp 1f
29476 -1:
29477 +
29478 /*
29479 * 8. Now restore the virtual mode from flat mode by
29480 * adding EIP with PAGE_OFFSET.
29481 */
29482 - movl $1f, %edx
29483 - jmp *%edx
29484 +#ifdef CONFIG_PAX_KERNEXEC
29485 + movl $(__KERNEL_DS), %edx
29486 + mov %edx, %ds
29487 + mov %edx, %es
29488 + mov %edx, %ss
29489 + ljmp $(__KERNEL_CS),$1f
29490 +#else
29491 + jmp 1f+__PAGE_OFFSET
29492 +#endif
29493 1:
29494
29495 /*
29496 * 9. Balance the stack. And because EAX contain the return value,
29497 * we'd better not clobber it.
29498 */
29499 - leal efi_rt_function_ptr, %edx
29500 - movl (%edx), %ecx
29501 - pushl %ecx
29502 + pushl (efi_rt_function_ptr)
29503
29504 /*
29505 - * 10. Push the saved return address onto the stack and return.
29506 + * 10. Return to the saved return address.
29507 */
29508 - leal saved_return_addr, %edx
29509 - movl (%edx), %ecx
29510 - pushl %ecx
29511 - ret
29512 + jmpl *(saved_return_addr)
29513 ENDPROC(efi_call_phys)
29514 .previous
29515
29516 -.data
29517 +__INITDATA
29518 saved_return_addr:
29519 .long 0
29520 efi_rt_function_ptr:
29521 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
29522 index 4c07cca..2c8427d 100644
29523 --- a/arch/x86/platform/efi/efi_stub_64.S
29524 +++ b/arch/x86/platform/efi/efi_stub_64.S
29525 @@ -7,6 +7,7 @@
29526 */
29527
29528 #include <linux/linkage.h>
29529 +#include <asm/alternative-asm.h>
29530
29531 #define SAVE_XMM \
29532 mov %rsp, %rax; \
29533 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
29534 call *%rdi
29535 addq $32, %rsp
29536 RESTORE_XMM
29537 + pax_force_retaddr 0, 1
29538 ret
29539 ENDPROC(efi_call0)
29540
29541 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
29542 call *%rdi
29543 addq $32, %rsp
29544 RESTORE_XMM
29545 + pax_force_retaddr 0, 1
29546 ret
29547 ENDPROC(efi_call1)
29548
29549 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
29550 call *%rdi
29551 addq $32, %rsp
29552 RESTORE_XMM
29553 + pax_force_retaddr 0, 1
29554 ret
29555 ENDPROC(efi_call2)
29556
29557 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
29558 call *%rdi
29559 addq $32, %rsp
29560 RESTORE_XMM
29561 + pax_force_retaddr 0, 1
29562 ret
29563 ENDPROC(efi_call3)
29564
29565 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
29566 call *%rdi
29567 addq $32, %rsp
29568 RESTORE_XMM
29569 + pax_force_retaddr 0, 1
29570 ret
29571 ENDPROC(efi_call4)
29572
29573 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
29574 call *%rdi
29575 addq $48, %rsp
29576 RESTORE_XMM
29577 + pax_force_retaddr 0, 1
29578 ret
29579 ENDPROC(efi_call5)
29580
29581 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
29582 call *%rdi
29583 addq $48, %rsp
29584 RESTORE_XMM
29585 + pax_force_retaddr 0, 1
29586 ret
29587 ENDPROC(efi_call6)
29588 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
29589 index e31bcd8..f12dc46 100644
29590 --- a/arch/x86/platform/mrst/mrst.c
29591 +++ b/arch/x86/platform/mrst/mrst.c
29592 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
29593 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
29594 int sfi_mrtc_num;
29595
29596 -static void mrst_power_off(void)
29597 +static __noreturn void mrst_power_off(void)
29598 {
29599 + BUG();
29600 }
29601
29602 -static void mrst_reboot(void)
29603 +static __noreturn void mrst_reboot(void)
29604 {
29605 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
29606 + BUG();
29607 }
29608
29609 /* parse all the mtimer info to a static mtimer array */
29610 diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
29611 index d6ee929..3637cb5 100644
29612 --- a/arch/x86/platform/olpc/olpc_dt.c
29613 +++ b/arch/x86/platform/olpc/olpc_dt.c
29614 @@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
29615 return res;
29616 }
29617
29618 -static struct of_pdt_ops prom_olpc_ops __initdata = {
29619 +static struct of_pdt_ops prom_olpc_ops __initconst = {
29620 .nextprop = olpc_dt_nextprop,
29621 .getproplen = olpc_dt_getproplen,
29622 .getproperty = olpc_dt_getproperty,
29623 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
29624 index 120cee1..b2db75a 100644
29625 --- a/arch/x86/power/cpu.c
29626 +++ b/arch/x86/power/cpu.c
29627 @@ -133,7 +133,7 @@ static void do_fpu_end(void)
29628 static void fix_processor_context(void)
29629 {
29630 int cpu = smp_processor_id();
29631 - struct tss_struct *t = &per_cpu(init_tss, cpu);
29632 + struct tss_struct *t = init_tss + cpu;
29633
29634 set_tss_desc(cpu, t); /*
29635 * This just modifies memory; should not be
29636 @@ -143,8 +143,6 @@ static void fix_processor_context(void)
29637 */
29638
29639 #ifdef CONFIG_X86_64
29640 - get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
29641 -
29642 syscall_init(); /* This sets MSR_*STAR and related */
29643 #endif
29644 load_TR_desc(); /* This does ltr */
29645 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
29646 index cbca565..bae7133 100644
29647 --- a/arch/x86/realmode/init.c
29648 +++ b/arch/x86/realmode/init.c
29649 @@ -62,7 +62,13 @@ void __init setup_real_mode(void)
29650 __va(real_mode_header->trampoline_header);
29651
29652 #ifdef CONFIG_X86_32
29653 - trampoline_header->start = __pa(startup_32_smp);
29654 + trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
29655 +
29656 +#ifdef CONFIG_PAX_KERNEXEC
29657 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
29658 +#endif
29659 +
29660 + trampoline_header->boot_cs = __BOOT_CS;
29661 trampoline_header->gdt_limit = __BOOT_DS + 7;
29662 trampoline_header->gdt_base = __pa(boot_gdt);
29663 #else
29664 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
29665 index 8869287..d577672 100644
29666 --- a/arch/x86/realmode/rm/Makefile
29667 +++ b/arch/x86/realmode/rm/Makefile
29668 @@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
29669 $(call cc-option, -fno-unit-at-a-time)) \
29670 $(call cc-option, -fno-stack-protector) \
29671 $(call cc-option, -mpreferred-stack-boundary=2)
29672 +ifdef CONSTIFY_PLUGIN
29673 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
29674 +endif
29675 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
29676 GCOV_PROFILE := n
29677 diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
29678 index a28221d..93c40f1 100644
29679 --- a/arch/x86/realmode/rm/header.S
29680 +++ b/arch/x86/realmode/rm/header.S
29681 @@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
29682 #endif
29683 /* APM/BIOS reboot */
29684 .long pa_machine_real_restart_asm
29685 -#ifdef CONFIG_X86_64
29686 +#ifdef CONFIG_X86_32
29687 + .long __KERNEL_CS
29688 +#else
29689 .long __KERNEL32_CS
29690 #endif
29691 END(real_mode_header)
29692 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
29693 index c1b2791..f9e31c7 100644
29694 --- a/arch/x86/realmode/rm/trampoline_32.S
29695 +++ b/arch/x86/realmode/rm/trampoline_32.S
29696 @@ -25,6 +25,12 @@
29697 #include <asm/page_types.h>
29698 #include "realmode.h"
29699
29700 +#ifdef CONFIG_PAX_KERNEXEC
29701 +#define ta(X) (X)
29702 +#else
29703 +#define ta(X) (pa_ ## X)
29704 +#endif
29705 +
29706 .text
29707 .code16
29708
29709 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
29710
29711 cli # We should be safe anyway
29712
29713 - movl tr_start, %eax # where we need to go
29714 -
29715 movl $0xA5A5A5A5, trampoline_status
29716 # write marker for master knows we're running
29717
29718 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
29719 movw $1, %dx # protected mode (PE) bit
29720 lmsw %dx # into protected mode
29721
29722 - ljmpl $__BOOT_CS, $pa_startup_32
29723 + ljmpl *(trampoline_header)
29724
29725 .section ".text32","ax"
29726 .code32
29727 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
29728 .balign 8
29729 GLOBAL(trampoline_header)
29730 tr_start: .space 4
29731 - tr_gdt_pad: .space 2
29732 + tr_boot_cs: .space 2
29733 tr_gdt: .space 6
29734 END(trampoline_header)
29735
29736 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
29737 index bb360dc..3e5945f 100644
29738 --- a/arch/x86/realmode/rm/trampoline_64.S
29739 +++ b/arch/x86/realmode/rm/trampoline_64.S
29740 @@ -107,7 +107,7 @@ ENTRY(startup_32)
29741 wrmsr
29742
29743 # Enable paging and in turn activate Long Mode
29744 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
29745 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
29746 movl %eax, %cr0
29747
29748 /*
29749 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
29750 index 79d67bd..c7e1b90 100644
29751 --- a/arch/x86/tools/relocs.c
29752 +++ b/arch/x86/tools/relocs.c
29753 @@ -12,10 +12,13 @@
29754 #include <regex.h>
29755 #include <tools/le_byteshift.h>
29756
29757 +#include "../../../include/generated/autoconf.h"
29758 +
29759 static void die(char *fmt, ...);
29760
29761 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
29762 static Elf32_Ehdr ehdr;
29763 +static Elf32_Phdr *phdr;
29764 static unsigned long reloc_count, reloc_idx;
29765 static unsigned long *relocs;
29766 static unsigned long reloc16_count, reloc16_idx;
29767 @@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
29768 }
29769 }
29770
29771 +static void read_phdrs(FILE *fp)
29772 +{
29773 + unsigned int i;
29774 +
29775 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
29776 + if (!phdr) {
29777 + die("Unable to allocate %d program headers\n",
29778 + ehdr.e_phnum);
29779 + }
29780 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
29781 + die("Seek to %d failed: %s\n",
29782 + ehdr.e_phoff, strerror(errno));
29783 + }
29784 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
29785 + die("Cannot read ELF program headers: %s\n",
29786 + strerror(errno));
29787 + }
29788 + for(i = 0; i < ehdr.e_phnum; i++) {
29789 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
29790 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
29791 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
29792 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
29793 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
29794 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
29795 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
29796 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
29797 + }
29798 +
29799 +}
29800 +
29801 static void read_shdrs(FILE *fp)
29802 {
29803 - int i;
29804 + unsigned int i;
29805 Elf32_Shdr shdr;
29806
29807 secs = calloc(ehdr.e_shnum, sizeof(struct section));
29808 @@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
29809
29810 static void read_strtabs(FILE *fp)
29811 {
29812 - int i;
29813 + unsigned int i;
29814 for (i = 0; i < ehdr.e_shnum; i++) {
29815 struct section *sec = &secs[i];
29816 if (sec->shdr.sh_type != SHT_STRTAB) {
29817 @@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
29818
29819 static void read_symtabs(FILE *fp)
29820 {
29821 - int i,j;
29822 + unsigned int i,j;
29823 for (i = 0; i < ehdr.e_shnum; i++) {
29824 struct section *sec = &secs[i];
29825 if (sec->shdr.sh_type != SHT_SYMTAB) {
29826 @@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
29827 }
29828
29829
29830 -static void read_relocs(FILE *fp)
29831 +static void read_relocs(FILE *fp, int use_real_mode)
29832 {
29833 - int i,j;
29834 + unsigned int i,j;
29835 + uint32_t base;
29836 +
29837 for (i = 0; i < ehdr.e_shnum; i++) {
29838 struct section *sec = &secs[i];
29839 if (sec->shdr.sh_type != SHT_REL) {
29840 @@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
29841 die("Cannot read symbol table: %s\n",
29842 strerror(errno));
29843 }
29844 + base = 0;
29845 +
29846 +#ifdef CONFIG_X86_32
29847 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
29848 + if (phdr[j].p_type != PT_LOAD )
29849 + continue;
29850 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
29851 + continue;
29852 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
29853 + break;
29854 + }
29855 +#endif
29856 +
29857 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
29858 Elf32_Rel *rel = &sec->reltab[j];
29859 - rel->r_offset = elf32_to_cpu(rel->r_offset);
29860 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
29861 rel->r_info = elf32_to_cpu(rel->r_info);
29862 }
29863 }
29864 @@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
29865
29866 static void print_absolute_symbols(void)
29867 {
29868 - int i;
29869 + unsigned int i;
29870 printf("Absolute symbols\n");
29871 printf(" Num: Value Size Type Bind Visibility Name\n");
29872 for (i = 0; i < ehdr.e_shnum; i++) {
29873 struct section *sec = &secs[i];
29874 char *sym_strtab;
29875 - int j;
29876 + unsigned int j;
29877
29878 if (sec->shdr.sh_type != SHT_SYMTAB) {
29879 continue;
29880 @@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
29881
29882 static void print_absolute_relocs(void)
29883 {
29884 - int i, printed = 0;
29885 + unsigned int i, printed = 0;
29886
29887 for (i = 0; i < ehdr.e_shnum; i++) {
29888 struct section *sec = &secs[i];
29889 struct section *sec_applies, *sec_symtab;
29890 char *sym_strtab;
29891 Elf32_Sym *sh_symtab;
29892 - int j;
29893 + unsigned int j;
29894 if (sec->shdr.sh_type != SHT_REL) {
29895 continue;
29896 }
29897 @@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
29898 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
29899 int use_real_mode)
29900 {
29901 - int i;
29902 + unsigned int i;
29903 /* Walk through the relocations */
29904 for (i = 0; i < ehdr.e_shnum; i++) {
29905 char *sym_strtab;
29906 Elf32_Sym *sh_symtab;
29907 struct section *sec_applies, *sec_symtab;
29908 - int j;
29909 + unsigned int j;
29910 struct section *sec = &secs[i];
29911
29912 if (sec->shdr.sh_type != SHT_REL) {
29913 @@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
29914 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
29915 r_type = ELF32_R_TYPE(rel->r_info);
29916
29917 + if (!use_real_mode) {
29918 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
29919 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
29920 + continue;
29921 +
29922 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
29923 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
29924 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
29925 + continue;
29926 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
29927 + continue;
29928 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
29929 + continue;
29930 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
29931 + continue;
29932 +#endif
29933 + }
29934 +
29935 shn_abs = sym->st_shndx == SHN_ABS;
29936
29937 switch (r_type) {
29938 @@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
29939
29940 static void emit_relocs(int as_text, int use_real_mode)
29941 {
29942 - int i;
29943 + unsigned int i;
29944 /* Count how many relocations I have and allocate space for them. */
29945 reloc_count = 0;
29946 walk_relocs(count_reloc, use_real_mode);
29947 @@ -808,10 +874,11 @@ int main(int argc, char **argv)
29948 fname, strerror(errno));
29949 }
29950 read_ehdr(fp);
29951 + read_phdrs(fp);
29952 read_shdrs(fp);
29953 read_strtabs(fp);
29954 read_symtabs(fp);
29955 - read_relocs(fp);
29956 + read_relocs(fp, use_real_mode);
29957 if (show_absolute_syms) {
29958 print_absolute_symbols();
29959 goto out;
29960 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
29961 index fd14be1..e3c79c0 100644
29962 --- a/arch/x86/vdso/Makefile
29963 +++ b/arch/x86/vdso/Makefile
29964 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
29965 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
29966 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
29967
29968 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
29969 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
29970 GCOV_PROFILE := n
29971
29972 #
29973 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
29974 index 0faad64..39ef157 100644
29975 --- a/arch/x86/vdso/vdso32-setup.c
29976 +++ b/arch/x86/vdso/vdso32-setup.c
29977 @@ -25,6 +25,7 @@
29978 #include <asm/tlbflush.h>
29979 #include <asm/vdso.h>
29980 #include <asm/proto.h>
29981 +#include <asm/mman.h>
29982
29983 enum {
29984 VDSO_DISABLED = 0,
29985 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
29986 void enable_sep_cpu(void)
29987 {
29988 int cpu = get_cpu();
29989 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
29990 + struct tss_struct *tss = init_tss + cpu;
29991
29992 if (!boot_cpu_has(X86_FEATURE_SEP)) {
29993 put_cpu();
29994 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
29995 gate_vma.vm_start = FIXADDR_USER_START;
29996 gate_vma.vm_end = FIXADDR_USER_END;
29997 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
29998 - gate_vma.vm_page_prot = __P101;
29999 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
30000
30001 return 0;
30002 }
30003 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30004 if (compat)
30005 addr = VDSO_HIGH_BASE;
30006 else {
30007 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
30008 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
30009 if (IS_ERR_VALUE(addr)) {
30010 ret = addr;
30011 goto up_fail;
30012 }
30013 }
30014
30015 - current->mm->context.vdso = (void *)addr;
30016 + current->mm->context.vdso = addr;
30017
30018 if (compat_uses_vma || !compat) {
30019 /*
30020 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30021 }
30022
30023 current_thread_info()->sysenter_return =
30024 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30025 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30026
30027 up_fail:
30028 if (ret)
30029 - current->mm->context.vdso = NULL;
30030 + current->mm->context.vdso = 0;
30031
30032 up_write(&mm->mmap_sem);
30033
30034 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
30035
30036 const char *arch_vma_name(struct vm_area_struct *vma)
30037 {
30038 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
30039 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
30040 return "[vdso]";
30041 +
30042 +#ifdef CONFIG_PAX_SEGMEXEC
30043 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
30044 + return "[vdso]";
30045 +#endif
30046 +
30047 return NULL;
30048 }
30049
30050 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
30051 * Check to see if the corresponding task was created in compat vdso
30052 * mode.
30053 */
30054 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
30055 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
30056 return &gate_vma;
30057 return NULL;
30058 }
30059 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
30060 index 431e875..cbb23f3 100644
30061 --- a/arch/x86/vdso/vma.c
30062 +++ b/arch/x86/vdso/vma.c
30063 @@ -16,8 +16,6 @@
30064 #include <asm/vdso.h>
30065 #include <asm/page.h>
30066
30067 -unsigned int __read_mostly vdso_enabled = 1;
30068 -
30069 extern char vdso_start[], vdso_end[];
30070 extern unsigned short vdso_sync_cpuid;
30071
30072 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
30073 * unaligned here as a result of stack start randomization.
30074 */
30075 addr = PAGE_ALIGN(addr);
30076 - addr = align_vdso_addr(addr);
30077
30078 return addr;
30079 }
30080 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
30081 unsigned size)
30082 {
30083 struct mm_struct *mm = current->mm;
30084 - unsigned long addr;
30085 + unsigned long addr = 0;
30086 int ret;
30087
30088 - if (!vdso_enabled)
30089 - return 0;
30090 -
30091 down_write(&mm->mmap_sem);
30092 +
30093 +#ifdef CONFIG_PAX_RANDMMAP
30094 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
30095 +#endif
30096 +
30097 addr = vdso_addr(mm->start_stack, size);
30098 + addr = align_vdso_addr(addr);
30099 addr = get_unmapped_area(NULL, addr, size, 0, 0);
30100 if (IS_ERR_VALUE(addr)) {
30101 ret = addr;
30102 goto up_fail;
30103 }
30104
30105 - current->mm->context.vdso = (void *)addr;
30106 + mm->context.vdso = addr;
30107
30108 ret = install_special_mapping(mm, addr, size,
30109 VM_READ|VM_EXEC|
30110 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
30111 pages);
30112 - if (ret) {
30113 - current->mm->context.vdso = NULL;
30114 - goto up_fail;
30115 - }
30116 + if (ret)
30117 + mm->context.vdso = 0;
30118
30119 up_fail:
30120 up_write(&mm->mmap_sem);
30121 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30122 vdsox32_size);
30123 }
30124 #endif
30125 -
30126 -static __init int vdso_setup(char *s)
30127 -{
30128 - vdso_enabled = simple_strtoul(s, NULL, 0);
30129 - return 0;
30130 -}
30131 -__setup("vdso=", vdso_setup);
30132 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
30133 index e014092..c76ab69 100644
30134 --- a/arch/x86/xen/enlighten.c
30135 +++ b/arch/x86/xen/enlighten.c
30136 @@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
30137
30138 struct shared_info xen_dummy_shared_info;
30139
30140 -void *xen_initial_gdt;
30141 -
30142 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
30143 __read_mostly int xen_have_vector_callback;
30144 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
30145 @@ -495,8 +493,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
30146 {
30147 unsigned long va = dtr->address;
30148 unsigned int size = dtr->size + 1;
30149 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
30150 - unsigned long frames[pages];
30151 + unsigned long frames[65536 / PAGE_SIZE];
30152 int f;
30153
30154 /*
30155 @@ -544,8 +541,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
30156 {
30157 unsigned long va = dtr->address;
30158 unsigned int size = dtr->size + 1;
30159 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
30160 - unsigned long frames[pages];
30161 + unsigned long frames[65536 / PAGE_SIZE];
30162 int f;
30163
30164 /*
30165 @@ -938,7 +934,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
30166 return 0;
30167 }
30168
30169 -static void set_xen_basic_apic_ops(void)
30170 +static void __init set_xen_basic_apic_ops(void)
30171 {
30172 apic->read = xen_apic_read;
30173 apic->write = xen_apic_write;
30174 @@ -1244,30 +1240,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
30175 #endif
30176 };
30177
30178 -static void xen_reboot(int reason)
30179 +static __noreturn void xen_reboot(int reason)
30180 {
30181 struct sched_shutdown r = { .reason = reason };
30182
30183 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
30184 - BUG();
30185 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
30186 + BUG();
30187 }
30188
30189 -static void xen_restart(char *msg)
30190 +static __noreturn void xen_restart(char *msg)
30191 {
30192 xen_reboot(SHUTDOWN_reboot);
30193 }
30194
30195 -static void xen_emergency_restart(void)
30196 +static __noreturn void xen_emergency_restart(void)
30197 {
30198 xen_reboot(SHUTDOWN_reboot);
30199 }
30200
30201 -static void xen_machine_halt(void)
30202 +static __noreturn void xen_machine_halt(void)
30203 {
30204 xen_reboot(SHUTDOWN_poweroff);
30205 }
30206
30207 -static void xen_machine_power_off(void)
30208 +static __noreturn void xen_machine_power_off(void)
30209 {
30210 if (pm_power_off)
30211 pm_power_off();
30212 @@ -1369,7 +1365,17 @@ asmlinkage void __init xen_start_kernel(void)
30213 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
30214
30215 /* Work out if we support NX */
30216 - x86_configure_nx();
30217 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
30218 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
30219 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
30220 + unsigned l, h;
30221 +
30222 + __supported_pte_mask |= _PAGE_NX;
30223 + rdmsr(MSR_EFER, l, h);
30224 + l |= EFER_NX;
30225 + wrmsr(MSR_EFER, l, h);
30226 + }
30227 +#endif
30228
30229 xen_setup_features();
30230
30231 @@ -1398,14 +1404,7 @@ asmlinkage void __init xen_start_kernel(void)
30232 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
30233 }
30234
30235 - machine_ops = xen_machine_ops;
30236 -
30237 - /*
30238 - * The only reliable way to retain the initial address of the
30239 - * percpu gdt_page is to remember it here, so we can go and
30240 - * mark it RW later, when the initial percpu area is freed.
30241 - */
30242 - xen_initial_gdt = &per_cpu(gdt_page, 0);
30243 + memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
30244
30245 xen_smp_init();
30246
30247 @@ -1590,7 +1589,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
30248 return NOTIFY_OK;
30249 }
30250
30251 -static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
30252 +static struct notifier_block xen_hvm_cpu_notifier = {
30253 .notifier_call = xen_hvm_cpu_notify,
30254 };
30255
30256 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
30257 index 01de35c..0bda07b 100644
30258 --- a/arch/x86/xen/mmu.c
30259 +++ b/arch/x86/xen/mmu.c
30260 @@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
30261 /* L3_k[510] -> level2_kernel_pgt
30262 * L3_i[511] -> level2_fixmap_pgt */
30263 convert_pfn_mfn(level3_kernel_pgt);
30264 + convert_pfn_mfn(level3_vmalloc_start_pgt);
30265 + convert_pfn_mfn(level3_vmalloc_end_pgt);
30266 + convert_pfn_mfn(level3_vmemmap_pgt);
30267
30268 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
30269 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
30270 @@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
30271 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
30272 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
30273 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
30274 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
30275 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
30276 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
30277 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
30278 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
30279 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
30280 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
30281 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
30282
30283 @@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
30284 pv_mmu_ops.set_pud = xen_set_pud;
30285 #if PAGETABLE_LEVELS == 4
30286 pv_mmu_ops.set_pgd = xen_set_pgd;
30287 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
30288 #endif
30289
30290 /* This will work as long as patching hasn't happened yet
30291 @@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
30292 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
30293 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
30294 .set_pgd = xen_set_pgd_hyper,
30295 + .set_pgd_batched = xen_set_pgd_hyper,
30296
30297 .alloc_pud = xen_alloc_pmd_init,
30298 .release_pud = xen_release_pmd_init,
30299 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
30300 index 34bc4ce..c34aa24 100644
30301 --- a/arch/x86/xen/smp.c
30302 +++ b/arch/x86/xen/smp.c
30303 @@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
30304 {
30305 BUG_ON(smp_processor_id() != 0);
30306 native_smp_prepare_boot_cpu();
30307 -
30308 - /* We've switched to the "real" per-cpu gdt, so make sure the
30309 - old memory can be recycled */
30310 - make_lowmem_page_readwrite(xen_initial_gdt);
30311 -
30312 xen_filter_cpu_maps();
30313 xen_setup_vcpu_info_placement();
30314 }
30315 @@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
30316 gdt = get_cpu_gdt_table(cpu);
30317
30318 ctxt->flags = VGCF_IN_KERNEL;
30319 - ctxt->user_regs.ds = __USER_DS;
30320 - ctxt->user_regs.es = __USER_DS;
30321 + ctxt->user_regs.ds = __KERNEL_DS;
30322 + ctxt->user_regs.es = __KERNEL_DS;
30323 ctxt->user_regs.ss = __KERNEL_DS;
30324 #ifdef CONFIG_X86_32
30325 ctxt->user_regs.fs = __KERNEL_PERCPU;
30326 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
30327 + savesegment(gs, ctxt->user_regs.gs);
30328 #else
30329 ctxt->gs_base_kernel = per_cpu_offset(cpu);
30330 #endif
30331 @@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
30332 int rc;
30333
30334 per_cpu(current_task, cpu) = idle;
30335 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
30336 #ifdef CONFIG_X86_32
30337 irq_ctx_init(cpu);
30338 #else
30339 clear_tsk_thread_flag(idle, TIF_FORK);
30340 - per_cpu(kernel_stack, cpu) =
30341 - (unsigned long)task_stack_page(idle) -
30342 - KERNEL_STACK_OFFSET + THREAD_SIZE;
30343 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
30344 #endif
30345 xen_setup_runstate_info(cpu);
30346 xen_setup_timer(cpu);
30347 @@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
30348
30349 void __init xen_smp_init(void)
30350 {
30351 - smp_ops = xen_smp_ops;
30352 + memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
30353 xen_fill_possible_map();
30354 xen_init_spinlocks();
30355 }
30356 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
30357 index 33ca6e4..0ded929 100644
30358 --- a/arch/x86/xen/xen-asm_32.S
30359 +++ b/arch/x86/xen/xen-asm_32.S
30360 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
30361 ESP_OFFSET=4 # bytes pushed onto stack
30362
30363 /*
30364 - * Store vcpu_info pointer for easy access. Do it this way to
30365 - * avoid having to reload %fs
30366 + * Store vcpu_info pointer for easy access.
30367 */
30368 #ifdef CONFIG_SMP
30369 - GET_THREAD_INFO(%eax)
30370 - movl %ss:TI_cpu(%eax), %eax
30371 - movl %ss:__per_cpu_offset(,%eax,4), %eax
30372 - mov %ss:xen_vcpu(%eax), %eax
30373 + push %fs
30374 + mov $(__KERNEL_PERCPU), %eax
30375 + mov %eax, %fs
30376 + mov PER_CPU_VAR(xen_vcpu), %eax
30377 + pop %fs
30378 #else
30379 movl %ss:xen_vcpu, %eax
30380 #endif
30381 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
30382 index 7faed58..ba4427c 100644
30383 --- a/arch/x86/xen/xen-head.S
30384 +++ b/arch/x86/xen/xen-head.S
30385 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
30386 #ifdef CONFIG_X86_32
30387 mov %esi,xen_start_info
30388 mov $init_thread_union+THREAD_SIZE,%esp
30389 +#ifdef CONFIG_SMP
30390 + movl $cpu_gdt_table,%edi
30391 + movl $__per_cpu_load,%eax
30392 + movw %ax,__KERNEL_PERCPU + 2(%edi)
30393 + rorl $16,%eax
30394 + movb %al,__KERNEL_PERCPU + 4(%edi)
30395 + movb %ah,__KERNEL_PERCPU + 7(%edi)
30396 + movl $__per_cpu_end - 1,%eax
30397 + subl $__per_cpu_start,%eax
30398 + movw %ax,__KERNEL_PERCPU + 0(%edi)
30399 +#endif
30400 #else
30401 mov %rsi,xen_start_info
30402 mov $init_thread_union+THREAD_SIZE,%rsp
30403 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
30404 index a95b417..b6dbd0b 100644
30405 --- a/arch/x86/xen/xen-ops.h
30406 +++ b/arch/x86/xen/xen-ops.h
30407 @@ -10,8 +10,6 @@
30408 extern const char xen_hypervisor_callback[];
30409 extern const char xen_failsafe_callback[];
30410
30411 -extern void *xen_initial_gdt;
30412 -
30413 struct trap_info;
30414 void xen_copy_trap_info(struct trap_info *traps);
30415
30416 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
30417 index 525bd3d..ef888b1 100644
30418 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
30419 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
30420 @@ -119,9 +119,9 @@
30421 ----------------------------------------------------------------------*/
30422
30423 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
30424 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
30425 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
30426 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
30427 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30428
30429 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
30430 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
30431 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
30432 index 2f33760..835e50a 100644
30433 --- a/arch/xtensa/variants/fsf/include/variant/core.h
30434 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
30435 @@ -11,6 +11,7 @@
30436 #ifndef _XTENSA_CORE_H
30437 #define _XTENSA_CORE_H
30438
30439 +#include <linux/const.h>
30440
30441 /****************************************************************************
30442 Parameters Useful for Any Code, USER or PRIVILEGED
30443 @@ -112,9 +113,9 @@
30444 ----------------------------------------------------------------------*/
30445
30446 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
30447 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
30448 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
30449 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
30450 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30451
30452 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
30453 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
30454 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
30455 index af00795..2bb8105 100644
30456 --- a/arch/xtensa/variants/s6000/include/variant/core.h
30457 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
30458 @@ -11,6 +11,7 @@
30459 #ifndef _XTENSA_CORE_CONFIGURATION_H
30460 #define _XTENSA_CORE_CONFIGURATION_H
30461
30462 +#include <linux/const.h>
30463
30464 /****************************************************************************
30465 Parameters Useful for Any Code, USER or PRIVILEGED
30466 @@ -118,9 +119,9 @@
30467 ----------------------------------------------------------------------*/
30468
30469 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
30470 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
30471 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
30472 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
30473 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30474
30475 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
30476 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
30477 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
30478 index 58916af..eb9dbcf6 100644
30479 --- a/block/blk-iopoll.c
30480 +++ b/block/blk-iopoll.c
30481 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
30482 }
30483 EXPORT_SYMBOL(blk_iopoll_complete);
30484
30485 -static void blk_iopoll_softirq(struct softirq_action *h)
30486 +static void blk_iopoll_softirq(void)
30487 {
30488 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
30489 int rearm = 0, budget = blk_iopoll_budget;
30490 @@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
30491 return NOTIFY_OK;
30492 }
30493
30494 -static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
30495 +static struct notifier_block blk_iopoll_cpu_notifier = {
30496 .notifier_call = blk_iopoll_cpu_notify,
30497 };
30498
30499 diff --git a/block/blk-map.c b/block/blk-map.c
30500 index 623e1cd..ca1e109 100644
30501 --- a/block/blk-map.c
30502 +++ b/block/blk-map.c
30503 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
30504 if (!len || !kbuf)
30505 return -EINVAL;
30506
30507 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
30508 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
30509 if (do_copy)
30510 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
30511 else
30512 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
30513 index 467c8de..f3628c5 100644
30514 --- a/block/blk-softirq.c
30515 +++ b/block/blk-softirq.c
30516 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
30517 * Softirq action handler - move entries to local list and loop over them
30518 * while passing them to the queue registered handler.
30519 */
30520 -static void blk_done_softirq(struct softirq_action *h)
30521 +static void blk_done_softirq(void)
30522 {
30523 struct list_head *cpu_list, local_list;
30524
30525 @@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
30526 return NOTIFY_OK;
30527 }
30528
30529 -static struct notifier_block __cpuinitdata blk_cpu_notifier = {
30530 +static struct notifier_block blk_cpu_notifier = {
30531 .notifier_call = blk_cpu_notify,
30532 };
30533
30534 diff --git a/block/bsg.c b/block/bsg.c
30535 index ff64ae3..593560c 100644
30536 --- a/block/bsg.c
30537 +++ b/block/bsg.c
30538 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
30539 struct sg_io_v4 *hdr, struct bsg_device *bd,
30540 fmode_t has_write_perm)
30541 {
30542 + unsigned char tmpcmd[sizeof(rq->__cmd)];
30543 + unsigned char *cmdptr;
30544 +
30545 if (hdr->request_len > BLK_MAX_CDB) {
30546 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
30547 if (!rq->cmd)
30548 return -ENOMEM;
30549 - }
30550 + cmdptr = rq->cmd;
30551 + } else
30552 + cmdptr = tmpcmd;
30553
30554 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
30555 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
30556 hdr->request_len))
30557 return -EFAULT;
30558
30559 + if (cmdptr != rq->cmd)
30560 + memcpy(rq->cmd, cmdptr, hdr->request_len);
30561 +
30562 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
30563 if (blk_verify_command(rq->cmd, has_write_perm))
30564 return -EPERM;
30565 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
30566 index 7c668c8..db3521c 100644
30567 --- a/block/compat_ioctl.c
30568 +++ b/block/compat_ioctl.c
30569 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
30570 err |= __get_user(f->spec1, &uf->spec1);
30571 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
30572 err |= __get_user(name, &uf->name);
30573 - f->name = compat_ptr(name);
30574 + f->name = (void __force_kernel *)compat_ptr(name);
30575 if (err) {
30576 err = -EFAULT;
30577 goto out;
30578 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
30579 index b62fb88..bdab4c4 100644
30580 --- a/block/partitions/efi.c
30581 +++ b/block/partitions/efi.c
30582 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
30583 if (!gpt)
30584 return NULL;
30585
30586 + if (!le32_to_cpu(gpt->num_partition_entries))
30587 + return NULL;
30588 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
30589 + if (!pte)
30590 + return NULL;
30591 +
30592 count = le32_to_cpu(gpt->num_partition_entries) *
30593 le32_to_cpu(gpt->sizeof_partition_entry);
30594 - if (!count)
30595 - return NULL;
30596 - pte = kzalloc(count, GFP_KERNEL);
30597 - if (!pte)
30598 - return NULL;
30599 -
30600 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
30601 (u8 *) pte,
30602 count) < count) {
30603 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
30604 index 9a87daa..fb17486 100644
30605 --- a/block/scsi_ioctl.c
30606 +++ b/block/scsi_ioctl.c
30607 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
30608 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
30609 struct sg_io_hdr *hdr, fmode_t mode)
30610 {
30611 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
30612 + unsigned char tmpcmd[sizeof(rq->__cmd)];
30613 + unsigned char *cmdptr;
30614 +
30615 + if (rq->cmd != rq->__cmd)
30616 + cmdptr = rq->cmd;
30617 + else
30618 + cmdptr = tmpcmd;
30619 +
30620 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
30621 return -EFAULT;
30622 +
30623 + if (cmdptr != rq->cmd)
30624 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
30625 +
30626 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
30627 return -EPERM;
30628
30629 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
30630 int err;
30631 unsigned int in_len, out_len, bytes, opcode, cmdlen;
30632 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
30633 + unsigned char tmpcmd[sizeof(rq->__cmd)];
30634 + unsigned char *cmdptr;
30635
30636 if (!sic)
30637 return -EINVAL;
30638 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
30639 */
30640 err = -EFAULT;
30641 rq->cmd_len = cmdlen;
30642 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
30643 +
30644 + if (rq->cmd != rq->__cmd)
30645 + cmdptr = rq->cmd;
30646 + else
30647 + cmdptr = tmpcmd;
30648 +
30649 + if (copy_from_user(cmdptr, sic->data, cmdlen))
30650 goto error;
30651
30652 + if (rq->cmd != cmdptr)
30653 + memcpy(rq->cmd, cmdptr, cmdlen);
30654 +
30655 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
30656 goto error;
30657
30658 diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
30659 index 533de95..7d4a8d2 100644
30660 --- a/crypto/ablkcipher.c
30661 +++ b/crypto/ablkcipher.c
30662 @@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
30663 {
30664 struct crypto_report_blkcipher rblkcipher;
30665
30666 - snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
30667 - snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
30668 - alg->cra_ablkcipher.geniv ?: "<default>");
30669 + strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
30670 + strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
30671 + sizeof(rblkcipher.geniv));
30672
30673 rblkcipher.blocksize = alg->cra_blocksize;
30674 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
30675 @@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
30676 {
30677 struct crypto_report_blkcipher rblkcipher;
30678
30679 - snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
30680 - snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
30681 - alg->cra_ablkcipher.geniv ?: "<built-in>");
30682 + strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
30683 + strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
30684 + sizeof(rblkcipher.geniv));
30685
30686 rblkcipher.blocksize = alg->cra_blocksize;
30687 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
30688 diff --git a/crypto/aead.c b/crypto/aead.c
30689 index 0b8121e..27bc487 100644
30690 --- a/crypto/aead.c
30691 +++ b/crypto/aead.c
30692 @@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
30693 struct crypto_report_aead raead;
30694 struct aead_alg *aead = &alg->cra_aead;
30695
30696 - snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
30697 - snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
30698 - aead->geniv ?: "<built-in>");
30699 + strncpy(raead.type, "aead", sizeof(raead.type));
30700 + strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
30701
30702 raead.blocksize = alg->cra_blocksize;
30703 raead.maxauthsize = aead->maxauthsize;
30704 @@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
30705 struct crypto_report_aead raead;
30706 struct aead_alg *aead = &alg->cra_aead;
30707
30708 - snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
30709 - snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
30710 + strncpy(raead.type, "nivaead", sizeof(raead.type));
30711 + strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
30712
30713 raead.blocksize = alg->cra_blocksize;
30714 raead.maxauthsize = aead->maxauthsize;
30715 diff --git a/crypto/ahash.c b/crypto/ahash.c
30716 index 3887856..793a27f 100644
30717 --- a/crypto/ahash.c
30718 +++ b/crypto/ahash.c
30719 @@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
30720 {
30721 struct crypto_report_hash rhash;
30722
30723 - snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
30724 + strncpy(rhash.type, "ahash", sizeof(rhash.type));
30725
30726 rhash.blocksize = alg->cra_blocksize;
30727 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
30728 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
30729 index a8d85a1..c44e014 100644
30730 --- a/crypto/blkcipher.c
30731 +++ b/crypto/blkcipher.c
30732 @@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
30733 {
30734 struct crypto_report_blkcipher rblkcipher;
30735
30736 - snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
30737 - snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
30738 - alg->cra_blkcipher.geniv ?: "<default>");
30739 + strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
30740 + strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
30741 + sizeof(rblkcipher.geniv));
30742
30743 rblkcipher.blocksize = alg->cra_blocksize;
30744 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
30745 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
30746 index 7bdd61b..afec999 100644
30747 --- a/crypto/cryptd.c
30748 +++ b/crypto/cryptd.c
30749 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
30750
30751 struct cryptd_blkcipher_request_ctx {
30752 crypto_completion_t complete;
30753 -};
30754 +} __no_const;
30755
30756 struct cryptd_hash_ctx {
30757 struct crypto_shash *child;
30758 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
30759
30760 struct cryptd_aead_request_ctx {
30761 crypto_completion_t complete;
30762 -};
30763 +} __no_const;
30764
30765 static void cryptd_queue_worker(struct work_struct *work);
30766
30767 diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
30768 index 35d700a..dfd511f 100644
30769 --- a/crypto/crypto_user.c
30770 +++ b/crypto/crypto_user.c
30771 @@ -30,6 +30,8 @@
30772
30773 #include "internal.h"
30774
30775 +#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
30776 +
30777 static DEFINE_MUTEX(crypto_cfg_mutex);
30778
30779 /* The crypto netlink socket */
30780 @@ -75,7 +77,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
30781 {
30782 struct crypto_report_cipher rcipher;
30783
30784 - snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
30785 + strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
30786
30787 rcipher.blocksize = alg->cra_blocksize;
30788 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
30789 @@ -94,8 +96,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
30790 {
30791 struct crypto_report_comp rcomp;
30792
30793 - snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
30794 -
30795 + strncpy(rcomp.type, "compression", sizeof(rcomp.type));
30796 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
30797 sizeof(struct crypto_report_comp), &rcomp))
30798 goto nla_put_failure;
30799 @@ -108,12 +109,14 @@ nla_put_failure:
30800 static int crypto_report_one(struct crypto_alg *alg,
30801 struct crypto_user_alg *ualg, struct sk_buff *skb)
30802 {
30803 - memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
30804 - memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
30805 - sizeof(ualg->cru_driver_name));
30806 - memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
30807 - CRYPTO_MAX_ALG_NAME);
30808 + strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
30809 + strncpy(ualg->cru_driver_name, alg->cra_driver_name,
30810 + sizeof(ualg->cru_driver_name));
30811 + strncpy(ualg->cru_module_name, module_name(alg->cra_module),
30812 + sizeof(ualg->cru_module_name));
30813
30814 + ualg->cru_type = 0;
30815 + ualg->cru_mask = 0;
30816 ualg->cru_flags = alg->cra_flags;
30817 ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
30818
30819 @@ -122,8 +125,7 @@ static int crypto_report_one(struct crypto_alg *alg,
30820 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
30821 struct crypto_report_larval rl;
30822
30823 - snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
30824 -
30825 + strncpy(rl.type, "larval", sizeof(rl.type));
30826 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
30827 sizeof(struct crypto_report_larval), &rl))
30828 goto nla_put_failure;
30829 @@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
30830 struct crypto_dump_info info;
30831 int err;
30832
30833 - if (!p->cru_driver_name)
30834 + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
30835 + return -EINVAL;
30836 +
30837 + if (!p->cru_driver_name[0])
30838 return -EINVAL;
30839
30840 alg = crypto_alg_match(p, 1);
30841 @@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
30842 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
30843 LIST_HEAD(list);
30844
30845 + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
30846 + return -EINVAL;
30847 +
30848 if (priority && !strlen(p->cru_driver_name))
30849 return -EINVAL;
30850
30851 @@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
30852 struct crypto_alg *alg;
30853 struct crypto_user_alg *p = nlmsg_data(nlh);
30854
30855 + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
30856 + return -EINVAL;
30857 +
30858 alg = crypto_alg_match(p, 1);
30859 if (!alg)
30860 return -ENOENT;
30861 @@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
30862 struct crypto_user_alg *p = nlmsg_data(nlh);
30863 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
30864
30865 + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
30866 + return -EINVAL;
30867 +
30868 if (strlen(p->cru_driver_name))
30869 exact = 1;
30870
30871 diff --git a/crypto/pcompress.c b/crypto/pcompress.c
30872 index 04e083f..7140fe7 100644
30873 --- a/crypto/pcompress.c
30874 +++ b/crypto/pcompress.c
30875 @@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
30876 {
30877 struct crypto_report_comp rpcomp;
30878
30879 - snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
30880 -
30881 + strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
30882 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
30883 sizeof(struct crypto_report_comp), &rpcomp))
30884 goto nla_put_failure;
30885 diff --git a/crypto/rng.c b/crypto/rng.c
30886 index f3b7894..e0a25c2 100644
30887 --- a/crypto/rng.c
30888 +++ b/crypto/rng.c
30889 @@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
30890 {
30891 struct crypto_report_rng rrng;
30892
30893 - snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
30894 + strncpy(rrng.type, "rng", sizeof(rrng.type));
30895
30896 rrng.seedsize = alg->cra_rng.seedsize;
30897
30898 diff --git a/crypto/shash.c b/crypto/shash.c
30899 index f426330f..929058a 100644
30900 --- a/crypto/shash.c
30901 +++ b/crypto/shash.c
30902 @@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
30903 struct crypto_report_hash rhash;
30904 struct shash_alg *salg = __crypto_shash_alg(alg);
30905
30906 - snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
30907 + strncpy(rhash.type, "shash", sizeof(rhash.type));
30908 +
30909 rhash.blocksize = alg->cra_blocksize;
30910 rhash.digestsize = salg->digestsize;
30911
30912 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
30913 index e6defd8..c26a225 100644
30914 --- a/drivers/acpi/apei/cper.c
30915 +++ b/drivers/acpi/apei/cper.c
30916 @@ -38,12 +38,12 @@
30917 */
30918 u64 cper_next_record_id(void)
30919 {
30920 - static atomic64_t seq;
30921 + static atomic64_unchecked_t seq;
30922
30923 - if (!atomic64_read(&seq))
30924 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
30925 + if (!atomic64_read_unchecked(&seq))
30926 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
30927
30928 - return atomic64_inc_return(&seq);
30929 + return atomic64_inc_return_unchecked(&seq);
30930 }
30931 EXPORT_SYMBOL_GPL(cper_next_record_id);
30932
30933 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
30934 index 7586544..636a2f0 100644
30935 --- a/drivers/acpi/ec_sys.c
30936 +++ b/drivers/acpi/ec_sys.c
30937 @@ -12,6 +12,7 @@
30938 #include <linux/acpi.h>
30939 #include <linux/debugfs.h>
30940 #include <linux/module.h>
30941 +#include <linux/uaccess.h>
30942 #include "internal.h"
30943
30944 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
30945 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
30946 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
30947 */
30948 unsigned int size = EC_SPACE_SIZE;
30949 - u8 *data = (u8 *) buf;
30950 + u8 data;
30951 loff_t init_off = *off;
30952 int err = 0;
30953
30954 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
30955 size = count;
30956
30957 while (size) {
30958 - err = ec_read(*off, &data[*off - init_off]);
30959 + err = ec_read(*off, &data);
30960 if (err)
30961 return err;
30962 + if (put_user(data, &buf[*off - init_off]))
30963 + return -EFAULT;
30964 *off += 1;
30965 size--;
30966 }
30967 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
30968
30969 unsigned int size = count;
30970 loff_t init_off = *off;
30971 - u8 *data = (u8 *) buf;
30972 int err = 0;
30973
30974 if (*off >= EC_SPACE_SIZE)
30975 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
30976 }
30977
30978 while (size) {
30979 - u8 byte_write = data[*off - init_off];
30980 + u8 byte_write;
30981 + if (get_user(byte_write, &buf[*off - init_off]))
30982 + return -EFAULT;
30983 err = ec_write(*off, byte_write);
30984 if (err)
30985 return err;
30986 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
30987 index e83311b..142b5cc 100644
30988 --- a/drivers/acpi/processor_driver.c
30989 +++ b/drivers/acpi/processor_driver.c
30990 @@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
30991 return 0;
30992 #endif
30993
30994 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
30995 + BUG_ON(pr->id >= nr_cpu_ids);
30996
30997 /*
30998 * Buggy BIOS check
30999 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
31000 index 46cd3f4..0871ad0 100644
31001 --- a/drivers/ata/libata-core.c
31002 +++ b/drivers/ata/libata-core.c
31003 @@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
31004 struct ata_port *ap;
31005 unsigned int tag;
31006
31007 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31008 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31009 ap = qc->ap;
31010
31011 qc->flags = 0;
31012 @@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
31013 struct ata_port *ap;
31014 struct ata_link *link;
31015
31016 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31017 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31018 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
31019 ap = qc->ap;
31020 link = qc->dev->link;
31021 @@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31022 return;
31023
31024 spin_lock(&lock);
31025 + pax_open_kernel();
31026
31027 for (cur = ops->inherits; cur; cur = cur->inherits) {
31028 void **inherit = (void **)cur;
31029 @@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31030 if (IS_ERR(*pp))
31031 *pp = NULL;
31032
31033 - ops->inherits = NULL;
31034 + *(struct ata_port_operations **)&ops->inherits = NULL;
31035
31036 + pax_close_kernel();
31037 spin_unlock(&lock);
31038 }
31039
31040 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
31041 index 405022d..fb70e53 100644
31042 --- a/drivers/ata/pata_arasan_cf.c
31043 +++ b/drivers/ata/pata_arasan_cf.c
31044 @@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
31045 /* Handle platform specific quirks */
31046 if (pdata->quirk) {
31047 if (pdata->quirk & CF_BROKEN_PIO) {
31048 - ap->ops->set_piomode = NULL;
31049 + pax_open_kernel();
31050 + *(void **)&ap->ops->set_piomode = NULL;
31051 + pax_close_kernel();
31052 ap->pio_mask = 0;
31053 }
31054 if (pdata->quirk & CF_BROKEN_MWDMA)
31055 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31056 index f9b983a..887b9d8 100644
31057 --- a/drivers/atm/adummy.c
31058 +++ b/drivers/atm/adummy.c
31059 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31060 vcc->pop(vcc, skb);
31061 else
31062 dev_kfree_skb_any(skb);
31063 - atomic_inc(&vcc->stats->tx);
31064 + atomic_inc_unchecked(&vcc->stats->tx);
31065
31066 return 0;
31067 }
31068 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31069 index 77a7480..05cde58 100644
31070 --- a/drivers/atm/ambassador.c
31071 +++ b/drivers/atm/ambassador.c
31072 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31073 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31074
31075 // VC layer stats
31076 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31077 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31078
31079 // free the descriptor
31080 kfree (tx_descr);
31081 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31082 dump_skb ("<<<", vc, skb);
31083
31084 // VC layer stats
31085 - atomic_inc(&atm_vcc->stats->rx);
31086 + atomic_inc_unchecked(&atm_vcc->stats->rx);
31087 __net_timestamp(skb);
31088 // end of our responsibility
31089 atm_vcc->push (atm_vcc, skb);
31090 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31091 } else {
31092 PRINTK (KERN_INFO, "dropped over-size frame");
31093 // should we count this?
31094 - atomic_inc(&atm_vcc->stats->rx_drop);
31095 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31096 }
31097
31098 } else {
31099 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31100 }
31101
31102 if (check_area (skb->data, skb->len)) {
31103 - atomic_inc(&atm_vcc->stats->tx_err);
31104 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31105 return -ENOMEM; // ?
31106 }
31107
31108 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31109 index b22d71c..d6e1049 100644
31110 --- a/drivers/atm/atmtcp.c
31111 +++ b/drivers/atm/atmtcp.c
31112 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31113 if (vcc->pop) vcc->pop(vcc,skb);
31114 else dev_kfree_skb(skb);
31115 if (dev_data) return 0;
31116 - atomic_inc(&vcc->stats->tx_err);
31117 + atomic_inc_unchecked(&vcc->stats->tx_err);
31118 return -ENOLINK;
31119 }
31120 size = skb->len+sizeof(struct atmtcp_hdr);
31121 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31122 if (!new_skb) {
31123 if (vcc->pop) vcc->pop(vcc,skb);
31124 else dev_kfree_skb(skb);
31125 - atomic_inc(&vcc->stats->tx_err);
31126 + atomic_inc_unchecked(&vcc->stats->tx_err);
31127 return -ENOBUFS;
31128 }
31129 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31130 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31131 if (vcc->pop) vcc->pop(vcc,skb);
31132 else dev_kfree_skb(skb);
31133 out_vcc->push(out_vcc,new_skb);
31134 - atomic_inc(&vcc->stats->tx);
31135 - atomic_inc(&out_vcc->stats->rx);
31136 + atomic_inc_unchecked(&vcc->stats->tx);
31137 + atomic_inc_unchecked(&out_vcc->stats->rx);
31138 return 0;
31139 }
31140
31141 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31142 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31143 read_unlock(&vcc_sklist_lock);
31144 if (!out_vcc) {
31145 - atomic_inc(&vcc->stats->tx_err);
31146 + atomic_inc_unchecked(&vcc->stats->tx_err);
31147 goto done;
31148 }
31149 skb_pull(skb,sizeof(struct atmtcp_hdr));
31150 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31151 __net_timestamp(new_skb);
31152 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31153 out_vcc->push(out_vcc,new_skb);
31154 - atomic_inc(&vcc->stats->tx);
31155 - atomic_inc(&out_vcc->stats->rx);
31156 + atomic_inc_unchecked(&vcc->stats->tx);
31157 + atomic_inc_unchecked(&out_vcc->stats->rx);
31158 done:
31159 if (vcc->pop) vcc->pop(vcc,skb);
31160 else dev_kfree_skb(skb);
31161 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31162 index c1eb6fa..4c71be9 100644
31163 --- a/drivers/atm/eni.c
31164 +++ b/drivers/atm/eni.c
31165 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31166 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31167 vcc->dev->number);
31168 length = 0;
31169 - atomic_inc(&vcc->stats->rx_err);
31170 + atomic_inc_unchecked(&vcc->stats->rx_err);
31171 }
31172 else {
31173 length = ATM_CELL_SIZE-1; /* no HEC */
31174 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31175 size);
31176 }
31177 eff = length = 0;
31178 - atomic_inc(&vcc->stats->rx_err);
31179 + atomic_inc_unchecked(&vcc->stats->rx_err);
31180 }
31181 else {
31182 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31183 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31184 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31185 vcc->dev->number,vcc->vci,length,size << 2,descr);
31186 length = eff = 0;
31187 - atomic_inc(&vcc->stats->rx_err);
31188 + atomic_inc_unchecked(&vcc->stats->rx_err);
31189 }
31190 }
31191 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31192 @@ -767,7 +767,7 @@ rx_dequeued++;
31193 vcc->push(vcc,skb);
31194 pushed++;
31195 }
31196 - atomic_inc(&vcc->stats->rx);
31197 + atomic_inc_unchecked(&vcc->stats->rx);
31198 }
31199 wake_up(&eni_dev->rx_wait);
31200 }
31201 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31202 PCI_DMA_TODEVICE);
31203 if (vcc->pop) vcc->pop(vcc,skb);
31204 else dev_kfree_skb_irq(skb);
31205 - atomic_inc(&vcc->stats->tx);
31206 + atomic_inc_unchecked(&vcc->stats->tx);
31207 wake_up(&eni_dev->tx_wait);
31208 dma_complete++;
31209 }
31210 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31211 index b41c948..a002b17 100644
31212 --- a/drivers/atm/firestream.c
31213 +++ b/drivers/atm/firestream.c
31214 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31215 }
31216 }
31217
31218 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31219 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31220
31221 fs_dprintk (FS_DEBUG_TXMEM, "i");
31222 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31223 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31224 #endif
31225 skb_put (skb, qe->p1 & 0xffff);
31226 ATM_SKB(skb)->vcc = atm_vcc;
31227 - atomic_inc(&atm_vcc->stats->rx);
31228 + atomic_inc_unchecked(&atm_vcc->stats->rx);
31229 __net_timestamp(skb);
31230 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31231 atm_vcc->push (atm_vcc, skb);
31232 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31233 kfree (pe);
31234 }
31235 if (atm_vcc)
31236 - atomic_inc(&atm_vcc->stats->rx_drop);
31237 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31238 break;
31239 case 0x1f: /* Reassembly abort: no buffers. */
31240 /* Silently increment error counter. */
31241 if (atm_vcc)
31242 - atomic_inc(&atm_vcc->stats->rx_drop);
31243 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31244 break;
31245 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31246 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31247 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31248 index 204814e..cede831 100644
31249 --- a/drivers/atm/fore200e.c
31250 +++ b/drivers/atm/fore200e.c
31251 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31252 #endif
31253 /* check error condition */
31254 if (*entry->status & STATUS_ERROR)
31255 - atomic_inc(&vcc->stats->tx_err);
31256 + atomic_inc_unchecked(&vcc->stats->tx_err);
31257 else
31258 - atomic_inc(&vcc->stats->tx);
31259 + atomic_inc_unchecked(&vcc->stats->tx);
31260 }
31261 }
31262
31263 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31264 if (skb == NULL) {
31265 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31266
31267 - atomic_inc(&vcc->stats->rx_drop);
31268 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31269 return -ENOMEM;
31270 }
31271
31272 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31273
31274 dev_kfree_skb_any(skb);
31275
31276 - atomic_inc(&vcc->stats->rx_drop);
31277 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31278 return -ENOMEM;
31279 }
31280
31281 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31282
31283 vcc->push(vcc, skb);
31284 - atomic_inc(&vcc->stats->rx);
31285 + atomic_inc_unchecked(&vcc->stats->rx);
31286
31287 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31288
31289 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31290 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31291 fore200e->atm_dev->number,
31292 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31293 - atomic_inc(&vcc->stats->rx_err);
31294 + atomic_inc_unchecked(&vcc->stats->rx_err);
31295 }
31296 }
31297
31298 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31299 goto retry_here;
31300 }
31301
31302 - atomic_inc(&vcc->stats->tx_err);
31303 + atomic_inc_unchecked(&vcc->stats->tx_err);
31304
31305 fore200e->tx_sat++;
31306 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31307 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31308 index 72b6960..cf9167a 100644
31309 --- a/drivers/atm/he.c
31310 +++ b/drivers/atm/he.c
31311 @@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31312
31313 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31314 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31315 - atomic_inc(&vcc->stats->rx_drop);
31316 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31317 goto return_host_buffers;
31318 }
31319
31320 @@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31321 RBRQ_LEN_ERR(he_dev->rbrq_head)
31322 ? "LEN_ERR" : "",
31323 vcc->vpi, vcc->vci);
31324 - atomic_inc(&vcc->stats->rx_err);
31325 + atomic_inc_unchecked(&vcc->stats->rx_err);
31326 goto return_host_buffers;
31327 }
31328
31329 @@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31330 vcc->push(vcc, skb);
31331 spin_lock(&he_dev->global_lock);
31332
31333 - atomic_inc(&vcc->stats->rx);
31334 + atomic_inc_unchecked(&vcc->stats->rx);
31335
31336 return_host_buffers:
31337 ++pdus_assembled;
31338 @@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31339 tpd->vcc->pop(tpd->vcc, tpd->skb);
31340 else
31341 dev_kfree_skb_any(tpd->skb);
31342 - atomic_inc(&tpd->vcc->stats->tx_err);
31343 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31344 }
31345 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31346 return;
31347 @@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31348 vcc->pop(vcc, skb);
31349 else
31350 dev_kfree_skb_any(skb);
31351 - atomic_inc(&vcc->stats->tx_err);
31352 + atomic_inc_unchecked(&vcc->stats->tx_err);
31353 return -EINVAL;
31354 }
31355
31356 @@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31357 vcc->pop(vcc, skb);
31358 else
31359 dev_kfree_skb_any(skb);
31360 - atomic_inc(&vcc->stats->tx_err);
31361 + atomic_inc_unchecked(&vcc->stats->tx_err);
31362 return -EINVAL;
31363 }
31364 #endif
31365 @@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31366 vcc->pop(vcc, skb);
31367 else
31368 dev_kfree_skb_any(skb);
31369 - atomic_inc(&vcc->stats->tx_err);
31370 + atomic_inc_unchecked(&vcc->stats->tx_err);
31371 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31372 return -ENOMEM;
31373 }
31374 @@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31375 vcc->pop(vcc, skb);
31376 else
31377 dev_kfree_skb_any(skb);
31378 - atomic_inc(&vcc->stats->tx_err);
31379 + atomic_inc_unchecked(&vcc->stats->tx_err);
31380 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31381 return -ENOMEM;
31382 }
31383 @@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31384 __enqueue_tpd(he_dev, tpd, cid);
31385 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31386
31387 - atomic_inc(&vcc->stats->tx);
31388 + atomic_inc_unchecked(&vcc->stats->tx);
31389
31390 return 0;
31391 }
31392 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31393 index 1dc0519..1aadaf7 100644
31394 --- a/drivers/atm/horizon.c
31395 +++ b/drivers/atm/horizon.c
31396 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31397 {
31398 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31399 // VC layer stats
31400 - atomic_inc(&vcc->stats->rx);
31401 + atomic_inc_unchecked(&vcc->stats->rx);
31402 __net_timestamp(skb);
31403 // end of our responsibility
31404 vcc->push (vcc, skb);
31405 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31406 dev->tx_iovec = NULL;
31407
31408 // VC layer stats
31409 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31410 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31411
31412 // free the skb
31413 hrz_kfree_skb (skb);
31414 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31415 index 272f009..a18ba55 100644
31416 --- a/drivers/atm/idt77252.c
31417 +++ b/drivers/atm/idt77252.c
31418 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31419 else
31420 dev_kfree_skb(skb);
31421
31422 - atomic_inc(&vcc->stats->tx);
31423 + atomic_inc_unchecked(&vcc->stats->tx);
31424 }
31425
31426 atomic_dec(&scq->used);
31427 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31428 if ((sb = dev_alloc_skb(64)) == NULL) {
31429 printk("%s: Can't allocate buffers for aal0.\n",
31430 card->name);
31431 - atomic_add(i, &vcc->stats->rx_drop);
31432 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
31433 break;
31434 }
31435 if (!atm_charge(vcc, sb->truesize)) {
31436 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31437 card->name);
31438 - atomic_add(i - 1, &vcc->stats->rx_drop);
31439 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31440 dev_kfree_skb(sb);
31441 break;
31442 }
31443 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31444 ATM_SKB(sb)->vcc = vcc;
31445 __net_timestamp(sb);
31446 vcc->push(vcc, sb);
31447 - atomic_inc(&vcc->stats->rx);
31448 + atomic_inc_unchecked(&vcc->stats->rx);
31449
31450 cell += ATM_CELL_PAYLOAD;
31451 }
31452 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31453 "(CDC: %08x)\n",
31454 card->name, len, rpp->len, readl(SAR_REG_CDC));
31455 recycle_rx_pool_skb(card, rpp);
31456 - atomic_inc(&vcc->stats->rx_err);
31457 + atomic_inc_unchecked(&vcc->stats->rx_err);
31458 return;
31459 }
31460 if (stat & SAR_RSQE_CRC) {
31461 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31462 recycle_rx_pool_skb(card, rpp);
31463 - atomic_inc(&vcc->stats->rx_err);
31464 + atomic_inc_unchecked(&vcc->stats->rx_err);
31465 return;
31466 }
31467 if (skb_queue_len(&rpp->queue) > 1) {
31468 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31469 RXPRINTK("%s: Can't alloc RX skb.\n",
31470 card->name);
31471 recycle_rx_pool_skb(card, rpp);
31472 - atomic_inc(&vcc->stats->rx_err);
31473 + atomic_inc_unchecked(&vcc->stats->rx_err);
31474 return;
31475 }
31476 if (!atm_charge(vcc, skb->truesize)) {
31477 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31478 __net_timestamp(skb);
31479
31480 vcc->push(vcc, skb);
31481 - atomic_inc(&vcc->stats->rx);
31482 + atomic_inc_unchecked(&vcc->stats->rx);
31483
31484 return;
31485 }
31486 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31487 __net_timestamp(skb);
31488
31489 vcc->push(vcc, skb);
31490 - atomic_inc(&vcc->stats->rx);
31491 + atomic_inc_unchecked(&vcc->stats->rx);
31492
31493 if (skb->truesize > SAR_FB_SIZE_3)
31494 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31495 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31496 if (vcc->qos.aal != ATM_AAL0) {
31497 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
31498 card->name, vpi, vci);
31499 - atomic_inc(&vcc->stats->rx_drop);
31500 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31501 goto drop;
31502 }
31503
31504 if ((sb = dev_alloc_skb(64)) == NULL) {
31505 printk("%s: Can't allocate buffers for AAL0.\n",
31506 card->name);
31507 - atomic_inc(&vcc->stats->rx_err);
31508 + atomic_inc_unchecked(&vcc->stats->rx_err);
31509 goto drop;
31510 }
31511
31512 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
31513 ATM_SKB(sb)->vcc = vcc;
31514 __net_timestamp(sb);
31515 vcc->push(vcc, sb);
31516 - atomic_inc(&vcc->stats->rx);
31517 + atomic_inc_unchecked(&vcc->stats->rx);
31518
31519 drop:
31520 skb_pull(queue, 64);
31521 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31522
31523 if (vc == NULL) {
31524 printk("%s: NULL connection in send().\n", card->name);
31525 - atomic_inc(&vcc->stats->tx_err);
31526 + atomic_inc_unchecked(&vcc->stats->tx_err);
31527 dev_kfree_skb(skb);
31528 return -EINVAL;
31529 }
31530 if (!test_bit(VCF_TX, &vc->flags)) {
31531 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
31532 - atomic_inc(&vcc->stats->tx_err);
31533 + atomic_inc_unchecked(&vcc->stats->tx_err);
31534 dev_kfree_skb(skb);
31535 return -EINVAL;
31536 }
31537 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31538 break;
31539 default:
31540 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
31541 - atomic_inc(&vcc->stats->tx_err);
31542 + atomic_inc_unchecked(&vcc->stats->tx_err);
31543 dev_kfree_skb(skb);
31544 return -EINVAL;
31545 }
31546
31547 if (skb_shinfo(skb)->nr_frags != 0) {
31548 printk("%s: No scatter-gather yet.\n", card->name);
31549 - atomic_inc(&vcc->stats->tx_err);
31550 + atomic_inc_unchecked(&vcc->stats->tx_err);
31551 dev_kfree_skb(skb);
31552 return -EINVAL;
31553 }
31554 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31555
31556 err = queue_skb(card, vc, skb, oam);
31557 if (err) {
31558 - atomic_inc(&vcc->stats->tx_err);
31559 + atomic_inc_unchecked(&vcc->stats->tx_err);
31560 dev_kfree_skb(skb);
31561 return err;
31562 }
31563 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
31564 skb = dev_alloc_skb(64);
31565 if (!skb) {
31566 printk("%s: Out of memory in send_oam().\n", card->name);
31567 - atomic_inc(&vcc->stats->tx_err);
31568 + atomic_inc_unchecked(&vcc->stats->tx_err);
31569 return -ENOMEM;
31570 }
31571 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
31572 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
31573 index 4217f29..88f547a 100644
31574 --- a/drivers/atm/iphase.c
31575 +++ b/drivers/atm/iphase.c
31576 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
31577 status = (u_short) (buf_desc_ptr->desc_mode);
31578 if (status & (RX_CER | RX_PTE | RX_OFL))
31579 {
31580 - atomic_inc(&vcc->stats->rx_err);
31581 + atomic_inc_unchecked(&vcc->stats->rx_err);
31582 IF_ERR(printk("IA: bad packet, dropping it");)
31583 if (status & RX_CER) {
31584 IF_ERR(printk(" cause: packet CRC error\n");)
31585 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
31586 len = dma_addr - buf_addr;
31587 if (len > iadev->rx_buf_sz) {
31588 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
31589 - atomic_inc(&vcc->stats->rx_err);
31590 + atomic_inc_unchecked(&vcc->stats->rx_err);
31591 goto out_free_desc;
31592 }
31593
31594 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31595 ia_vcc = INPH_IA_VCC(vcc);
31596 if (ia_vcc == NULL)
31597 {
31598 - atomic_inc(&vcc->stats->rx_err);
31599 + atomic_inc_unchecked(&vcc->stats->rx_err);
31600 atm_return(vcc, skb->truesize);
31601 dev_kfree_skb_any(skb);
31602 goto INCR_DLE;
31603 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31604 if ((length > iadev->rx_buf_sz) || (length >
31605 (skb->len - sizeof(struct cpcs_trailer))))
31606 {
31607 - atomic_inc(&vcc->stats->rx_err);
31608 + atomic_inc_unchecked(&vcc->stats->rx_err);
31609 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
31610 length, skb->len);)
31611 atm_return(vcc, skb->truesize);
31612 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31613
31614 IF_RX(printk("rx_dle_intr: skb push");)
31615 vcc->push(vcc,skb);
31616 - atomic_inc(&vcc->stats->rx);
31617 + atomic_inc_unchecked(&vcc->stats->rx);
31618 iadev->rx_pkt_cnt++;
31619 }
31620 INCR_DLE:
31621 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
31622 {
31623 struct k_sonet_stats *stats;
31624 stats = &PRIV(_ia_dev[board])->sonet_stats;
31625 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
31626 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
31627 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
31628 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
31629 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
31630 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
31631 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
31632 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
31633 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
31634 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
31635 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
31636 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
31637 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
31638 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
31639 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
31640 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
31641 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
31642 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
31643 }
31644 ia_cmds.status = 0;
31645 break;
31646 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31647 if ((desc == 0) || (desc > iadev->num_tx_desc))
31648 {
31649 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
31650 - atomic_inc(&vcc->stats->tx);
31651 + atomic_inc_unchecked(&vcc->stats->tx);
31652 if (vcc->pop)
31653 vcc->pop(vcc, skb);
31654 else
31655 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31656 ATM_DESC(skb) = vcc->vci;
31657 skb_queue_tail(&iadev->tx_dma_q, skb);
31658
31659 - atomic_inc(&vcc->stats->tx);
31660 + atomic_inc_unchecked(&vcc->stats->tx);
31661 iadev->tx_pkt_cnt++;
31662 /* Increment transaction counter */
31663 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
31664
31665 #if 0
31666 /* add flow control logic */
31667 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
31668 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
31669 if (iavcc->vc_desc_cnt > 10) {
31670 vcc->tx_quota = vcc->tx_quota * 3 / 4;
31671 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
31672 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
31673 index fa7d701..1e404c7 100644
31674 --- a/drivers/atm/lanai.c
31675 +++ b/drivers/atm/lanai.c
31676 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
31677 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
31678 lanai_endtx(lanai, lvcc);
31679 lanai_free_skb(lvcc->tx.atmvcc, skb);
31680 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
31681 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
31682 }
31683
31684 /* Try to fill the buffer - don't call unless there is backlog */
31685 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
31686 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
31687 __net_timestamp(skb);
31688 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
31689 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
31690 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
31691 out:
31692 lvcc->rx.buf.ptr = end;
31693 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
31694 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31695 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
31696 "vcc %d\n", lanai->number, (unsigned int) s, vci);
31697 lanai->stats.service_rxnotaal5++;
31698 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31699 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31700 return 0;
31701 }
31702 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
31703 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31704 int bytes;
31705 read_unlock(&vcc_sklist_lock);
31706 DPRINTK("got trashed rx pdu on vci %d\n", vci);
31707 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31708 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31709 lvcc->stats.x.aal5.service_trash++;
31710 bytes = (SERVICE_GET_END(s) * 16) -
31711 (((unsigned long) lvcc->rx.buf.ptr) -
31712 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31713 }
31714 if (s & SERVICE_STREAM) {
31715 read_unlock(&vcc_sklist_lock);
31716 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31717 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31718 lvcc->stats.x.aal5.service_stream++;
31719 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
31720 "PDU on VCI %d!\n", lanai->number, vci);
31721 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31722 return 0;
31723 }
31724 DPRINTK("got rx crc error on vci %d\n", vci);
31725 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31726 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31727 lvcc->stats.x.aal5.service_rxcrc++;
31728 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
31729 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
31730 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
31731 index ed1d2b7..8cffc1f 100644
31732 --- a/drivers/atm/nicstar.c
31733 +++ b/drivers/atm/nicstar.c
31734 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31735 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
31736 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
31737 card->index);
31738 - atomic_inc(&vcc->stats->tx_err);
31739 + atomic_inc_unchecked(&vcc->stats->tx_err);
31740 dev_kfree_skb_any(skb);
31741 return -EINVAL;
31742 }
31743 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31744 if (!vc->tx) {
31745 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
31746 card->index);
31747 - atomic_inc(&vcc->stats->tx_err);
31748 + atomic_inc_unchecked(&vcc->stats->tx_err);
31749 dev_kfree_skb_any(skb);
31750 return -EINVAL;
31751 }
31752 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31753 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
31754 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
31755 card->index);
31756 - atomic_inc(&vcc->stats->tx_err);
31757 + atomic_inc_unchecked(&vcc->stats->tx_err);
31758 dev_kfree_skb_any(skb);
31759 return -EINVAL;
31760 }
31761
31762 if (skb_shinfo(skb)->nr_frags != 0) {
31763 printk("nicstar%d: No scatter-gather yet.\n", card->index);
31764 - atomic_inc(&vcc->stats->tx_err);
31765 + atomic_inc_unchecked(&vcc->stats->tx_err);
31766 dev_kfree_skb_any(skb);
31767 return -EINVAL;
31768 }
31769 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31770 }
31771
31772 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
31773 - atomic_inc(&vcc->stats->tx_err);
31774 + atomic_inc_unchecked(&vcc->stats->tx_err);
31775 dev_kfree_skb_any(skb);
31776 return -EIO;
31777 }
31778 - atomic_inc(&vcc->stats->tx);
31779 + atomic_inc_unchecked(&vcc->stats->tx);
31780
31781 return 0;
31782 }
31783 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31784 printk
31785 ("nicstar%d: Can't allocate buffers for aal0.\n",
31786 card->index);
31787 - atomic_add(i, &vcc->stats->rx_drop);
31788 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
31789 break;
31790 }
31791 if (!atm_charge(vcc, sb->truesize)) {
31792 RXPRINTK
31793 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
31794 card->index);
31795 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
31796 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
31797 dev_kfree_skb_any(sb);
31798 break;
31799 }
31800 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31801 ATM_SKB(sb)->vcc = vcc;
31802 __net_timestamp(sb);
31803 vcc->push(vcc, sb);
31804 - atomic_inc(&vcc->stats->rx);
31805 + atomic_inc_unchecked(&vcc->stats->rx);
31806 cell += ATM_CELL_PAYLOAD;
31807 }
31808
31809 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31810 if (iovb == NULL) {
31811 printk("nicstar%d: Out of iovec buffers.\n",
31812 card->index);
31813 - atomic_inc(&vcc->stats->rx_drop);
31814 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31815 recycle_rx_buf(card, skb);
31816 return;
31817 }
31818 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31819 small or large buffer itself. */
31820 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
31821 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
31822 - atomic_inc(&vcc->stats->rx_err);
31823 + atomic_inc_unchecked(&vcc->stats->rx_err);
31824 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
31825 NS_MAX_IOVECS);
31826 NS_PRV_IOVCNT(iovb) = 0;
31827 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31828 ("nicstar%d: Expected a small buffer, and this is not one.\n",
31829 card->index);
31830 which_list(card, skb);
31831 - atomic_inc(&vcc->stats->rx_err);
31832 + atomic_inc_unchecked(&vcc->stats->rx_err);
31833 recycle_rx_buf(card, skb);
31834 vc->rx_iov = NULL;
31835 recycle_iov_buf(card, iovb);
31836 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31837 ("nicstar%d: Expected a large buffer, and this is not one.\n",
31838 card->index);
31839 which_list(card, skb);
31840 - atomic_inc(&vcc->stats->rx_err);
31841 + atomic_inc_unchecked(&vcc->stats->rx_err);
31842 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
31843 NS_PRV_IOVCNT(iovb));
31844 vc->rx_iov = NULL;
31845 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31846 printk(" - PDU size mismatch.\n");
31847 else
31848 printk(".\n");
31849 - atomic_inc(&vcc->stats->rx_err);
31850 + atomic_inc_unchecked(&vcc->stats->rx_err);
31851 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
31852 NS_PRV_IOVCNT(iovb));
31853 vc->rx_iov = NULL;
31854 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31855 /* skb points to a small buffer */
31856 if (!atm_charge(vcc, skb->truesize)) {
31857 push_rxbufs(card, skb);
31858 - atomic_inc(&vcc->stats->rx_drop);
31859 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31860 } else {
31861 skb_put(skb, len);
31862 dequeue_sm_buf(card, skb);
31863 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31864 ATM_SKB(skb)->vcc = vcc;
31865 __net_timestamp(skb);
31866 vcc->push(vcc, skb);
31867 - atomic_inc(&vcc->stats->rx);
31868 + atomic_inc_unchecked(&vcc->stats->rx);
31869 }
31870 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
31871 struct sk_buff *sb;
31872 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31873 if (len <= NS_SMBUFSIZE) {
31874 if (!atm_charge(vcc, sb->truesize)) {
31875 push_rxbufs(card, sb);
31876 - atomic_inc(&vcc->stats->rx_drop);
31877 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31878 } else {
31879 skb_put(sb, len);
31880 dequeue_sm_buf(card, sb);
31881 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31882 ATM_SKB(sb)->vcc = vcc;
31883 __net_timestamp(sb);
31884 vcc->push(vcc, sb);
31885 - atomic_inc(&vcc->stats->rx);
31886 + atomic_inc_unchecked(&vcc->stats->rx);
31887 }
31888
31889 push_rxbufs(card, skb);
31890 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31891
31892 if (!atm_charge(vcc, skb->truesize)) {
31893 push_rxbufs(card, skb);
31894 - atomic_inc(&vcc->stats->rx_drop);
31895 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31896 } else {
31897 dequeue_lg_buf(card, skb);
31898 #ifdef NS_USE_DESTRUCTORS
31899 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31900 ATM_SKB(skb)->vcc = vcc;
31901 __net_timestamp(skb);
31902 vcc->push(vcc, skb);
31903 - atomic_inc(&vcc->stats->rx);
31904 + atomic_inc_unchecked(&vcc->stats->rx);
31905 }
31906
31907 push_rxbufs(card, sb);
31908 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31909 printk
31910 ("nicstar%d: Out of huge buffers.\n",
31911 card->index);
31912 - atomic_inc(&vcc->stats->rx_drop);
31913 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31914 recycle_iovec_rx_bufs(card,
31915 (struct iovec *)
31916 iovb->data,
31917 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31918 card->hbpool.count++;
31919 } else
31920 dev_kfree_skb_any(hb);
31921 - atomic_inc(&vcc->stats->rx_drop);
31922 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31923 } else {
31924 /* Copy the small buffer to the huge buffer */
31925 sb = (struct sk_buff *)iov->iov_base;
31926 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
31927 #endif /* NS_USE_DESTRUCTORS */
31928 __net_timestamp(hb);
31929 vcc->push(vcc, hb);
31930 - atomic_inc(&vcc->stats->rx);
31931 + atomic_inc_unchecked(&vcc->stats->rx);
31932 }
31933 }
31934
31935 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
31936 index 0474a89..06ea4a1 100644
31937 --- a/drivers/atm/solos-pci.c
31938 +++ b/drivers/atm/solos-pci.c
31939 @@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
31940 }
31941 atm_charge(vcc, skb->truesize);
31942 vcc->push(vcc, skb);
31943 - atomic_inc(&vcc->stats->rx);
31944 + atomic_inc_unchecked(&vcc->stats->rx);
31945 break;
31946
31947 case PKT_STATUS:
31948 @@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
31949 vcc = SKB_CB(oldskb)->vcc;
31950
31951 if (vcc) {
31952 - atomic_inc(&vcc->stats->tx);
31953 + atomic_inc_unchecked(&vcc->stats->tx);
31954 solos_pop(vcc, oldskb);
31955 } else {
31956 dev_kfree_skb_irq(oldskb);
31957 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
31958 index 0215934..ce9f5b1 100644
31959 --- a/drivers/atm/suni.c
31960 +++ b/drivers/atm/suni.c
31961 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
31962
31963
31964 #define ADD_LIMITED(s,v) \
31965 - atomic_add((v),&stats->s); \
31966 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
31967 + atomic_add_unchecked((v),&stats->s); \
31968 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
31969
31970
31971 static void suni_hz(unsigned long from_timer)
31972 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
31973 index 5120a96..e2572bd 100644
31974 --- a/drivers/atm/uPD98402.c
31975 +++ b/drivers/atm/uPD98402.c
31976 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31977 struct sonet_stats tmp;
31978 int error = 0;
31979
31980 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31981 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31982 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31983 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31984 if (zero && !error) {
31985 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31986
31987
31988 #define ADD_LIMITED(s,v) \
31989 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31990 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31991 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31992 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31993 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31994 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31995
31996
31997 static void stat_event(struct atm_dev *dev)
31998 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
31999 if (reason & uPD98402_INT_PFM) stat_event(dev);
32000 if (reason & uPD98402_INT_PCO) {
32001 (void) GET(PCOCR); /* clear interrupt cause */
32002 - atomic_add(GET(HECCT),
32003 + atomic_add_unchecked(GET(HECCT),
32004 &PRIV(dev)->sonet_stats.uncorr_hcs);
32005 }
32006 if ((reason & uPD98402_INT_RFO) &&
32007 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
32008 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
32009 uPD98402_INT_LOS),PIMR); /* enable them */
32010 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
32011 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32012 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
32013 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
32014 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32015 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
32016 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
32017 return 0;
32018 }
32019
32020 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32021 index 969c3c2..9b72956 100644
32022 --- a/drivers/atm/zatm.c
32023 +++ b/drivers/atm/zatm.c
32024 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32025 }
32026 if (!size) {
32027 dev_kfree_skb_irq(skb);
32028 - if (vcc) atomic_inc(&vcc->stats->rx_err);
32029 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32030 continue;
32031 }
32032 if (!atm_charge(vcc,skb->truesize)) {
32033 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32034 skb->len = size;
32035 ATM_SKB(skb)->vcc = vcc;
32036 vcc->push(vcc,skb);
32037 - atomic_inc(&vcc->stats->rx);
32038 + atomic_inc_unchecked(&vcc->stats->rx);
32039 }
32040 zout(pos & 0xffff,MTA(mbx));
32041 #if 0 /* probably a stupid idea */
32042 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32043 skb_queue_head(&zatm_vcc->backlog,skb);
32044 break;
32045 }
32046 - atomic_inc(&vcc->stats->tx);
32047 + atomic_inc_unchecked(&vcc->stats->tx);
32048 wake_up(&zatm_vcc->tx_wait);
32049 }
32050
32051 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
32052 index 17cf7ca..7e553e1 100644
32053 --- a/drivers/base/devtmpfs.c
32054 +++ b/drivers/base/devtmpfs.c
32055 @@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
32056 if (!thread)
32057 return 0;
32058
32059 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
32060 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
32061 if (err)
32062 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
32063 else
32064 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
32065 index e6ee5e8..98ad7fc 100644
32066 --- a/drivers/base/power/wakeup.c
32067 +++ b/drivers/base/power/wakeup.c
32068 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
32069 * They need to be modified together atomically, so it's better to use one
32070 * atomic variable to hold them both.
32071 */
32072 -static atomic_t combined_event_count = ATOMIC_INIT(0);
32073 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
32074
32075 #define IN_PROGRESS_BITS (sizeof(int) * 4)
32076 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
32077
32078 static void split_counters(unsigned int *cnt, unsigned int *inpr)
32079 {
32080 - unsigned int comb = atomic_read(&combined_event_count);
32081 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
32082
32083 *cnt = (comb >> IN_PROGRESS_BITS);
32084 *inpr = comb & MAX_IN_PROGRESS;
32085 @@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
32086 ws->start_prevent_time = ws->last_time;
32087
32088 /* Increment the counter of events in progress. */
32089 - cec = atomic_inc_return(&combined_event_count);
32090 + cec = atomic_inc_return_unchecked(&combined_event_count);
32091
32092 trace_wakeup_source_activate(ws->name, cec);
32093 }
32094 @@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
32095 * Increment the counter of registered wakeup events and decrement the
32096 * couter of wakeup events in progress simultaneously.
32097 */
32098 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
32099 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
32100 trace_wakeup_source_deactivate(ws->name, cec);
32101
32102 split_counters(&cnt, &inpr);
32103 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32104 index ade58bc..867143d 100644
32105 --- a/drivers/block/cciss.c
32106 +++ b/drivers/block/cciss.c
32107 @@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32108 int err;
32109 u32 cp;
32110
32111 + memset(&arg64, 0, sizeof(arg64));
32112 +
32113 err = 0;
32114 err |=
32115 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32116 @@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
32117 while (!list_empty(&h->reqQ)) {
32118 c = list_entry(h->reqQ.next, CommandList_struct, list);
32119 /* can't do anything if fifo is full */
32120 - if ((h->access.fifo_full(h))) {
32121 + if ((h->access->fifo_full(h))) {
32122 dev_warn(&h->pdev->dev, "fifo full\n");
32123 break;
32124 }
32125 @@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
32126 h->Qdepth--;
32127
32128 /* Tell the controller execute command */
32129 - h->access.submit_command(h, c);
32130 + h->access->submit_command(h, c);
32131
32132 /* Put job onto the completed Q */
32133 addQ(&h->cmpQ, c);
32134 @@ -3441,17 +3443,17 @@ startio:
32135
32136 static inline unsigned long get_next_completion(ctlr_info_t *h)
32137 {
32138 - return h->access.command_completed(h);
32139 + return h->access->command_completed(h);
32140 }
32141
32142 static inline int interrupt_pending(ctlr_info_t *h)
32143 {
32144 - return h->access.intr_pending(h);
32145 + return h->access->intr_pending(h);
32146 }
32147
32148 static inline long interrupt_not_for_us(ctlr_info_t *h)
32149 {
32150 - return ((h->access.intr_pending(h) == 0) ||
32151 + return ((h->access->intr_pending(h) == 0) ||
32152 (h->interrupts_enabled == 0));
32153 }
32154
32155 @@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
32156 u32 a;
32157
32158 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
32159 - return h->access.command_completed(h);
32160 + return h->access->command_completed(h);
32161
32162 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
32163 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
32164 @@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
32165 trans_support & CFGTBL_Trans_use_short_tags);
32166
32167 /* Change the access methods to the performant access methods */
32168 - h->access = SA5_performant_access;
32169 + h->access = &SA5_performant_access;
32170 h->transMethod = CFGTBL_Trans_Performant;
32171
32172 return;
32173 @@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
32174 if (prod_index < 0)
32175 return -ENODEV;
32176 h->product_name = products[prod_index].product_name;
32177 - h->access = *(products[prod_index].access);
32178 + h->access = products[prod_index].access;
32179
32180 if (cciss_board_disabled(h)) {
32181 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
32182 @@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
32183 }
32184
32185 /* make sure the board interrupts are off */
32186 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
32187 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
32188 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
32189 if (rc)
32190 goto clean2;
32191 @@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
32192 * fake ones to scoop up any residual completions.
32193 */
32194 spin_lock_irqsave(&h->lock, flags);
32195 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
32196 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
32197 spin_unlock_irqrestore(&h->lock, flags);
32198 free_irq(h->intr[h->intr_mode], h);
32199 rc = cciss_request_irq(h, cciss_msix_discard_completions,
32200 @@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
32201 dev_info(&h->pdev->dev, "Board READY.\n");
32202 dev_info(&h->pdev->dev,
32203 "Waiting for stale completions to drain.\n");
32204 - h->access.set_intr_mask(h, CCISS_INTR_ON);
32205 + h->access->set_intr_mask(h, CCISS_INTR_ON);
32206 msleep(10000);
32207 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
32208 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
32209
32210 rc = controller_reset_failed(h->cfgtable);
32211 if (rc)
32212 @@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
32213 cciss_scsi_setup(h);
32214
32215 /* Turn the interrupts on so we can service requests */
32216 - h->access.set_intr_mask(h, CCISS_INTR_ON);
32217 + h->access->set_intr_mask(h, CCISS_INTR_ON);
32218
32219 /* Get the firmware version */
32220 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32221 @@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
32222 kfree(flush_buf);
32223 if (return_code != IO_OK)
32224 dev_warn(&h->pdev->dev, "Error flushing cache\n");
32225 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
32226 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
32227 free_irq(h->intr[h->intr_mode], h);
32228 }
32229
32230 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32231 index 7fda30e..eb5dfe0 100644
32232 --- a/drivers/block/cciss.h
32233 +++ b/drivers/block/cciss.h
32234 @@ -101,7 +101,7 @@ struct ctlr_info
32235 /* information about each logical volume */
32236 drive_info_struct *drv[CISS_MAX_LUN];
32237
32238 - struct access_method access;
32239 + struct access_method *access;
32240
32241 /* queue and queue Info */
32242 struct list_head reqQ;
32243 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32244 index 3f08713..56a586a 100644
32245 --- a/drivers/block/cpqarray.c
32246 +++ b/drivers/block/cpqarray.c
32247 @@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
32248 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32249 goto Enomem4;
32250 }
32251 - hba[i]->access.set_intr_mask(hba[i], 0);
32252 + hba[i]->access->set_intr_mask(hba[i], 0);
32253 if (request_irq(hba[i]->intr, do_ida_intr,
32254 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32255 {
32256 @@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
32257 add_timer(&hba[i]->timer);
32258
32259 /* Enable IRQ now that spinlock and rate limit timer are set up */
32260 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32261 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32262
32263 for(j=0; j<NWD; j++) {
32264 struct gendisk *disk = ida_gendisk[i][j];
32265 @@ -694,7 +694,7 @@ DBGINFO(
32266 for(i=0; i<NR_PRODUCTS; i++) {
32267 if (board_id == products[i].board_id) {
32268 c->product_name = products[i].product_name;
32269 - c->access = *(products[i].access);
32270 + c->access = products[i].access;
32271 break;
32272 }
32273 }
32274 @@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
32275 hba[ctlr]->intr = intr;
32276 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32277 hba[ctlr]->product_name = products[j].product_name;
32278 - hba[ctlr]->access = *(products[j].access);
32279 + hba[ctlr]->access = products[j].access;
32280 hba[ctlr]->ctlr = ctlr;
32281 hba[ctlr]->board_id = board_id;
32282 hba[ctlr]->pci_dev = NULL; /* not PCI */
32283 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
32284
32285 while((c = h->reqQ) != NULL) {
32286 /* Can't do anything if we're busy */
32287 - if (h->access.fifo_full(h) == 0)
32288 + if (h->access->fifo_full(h) == 0)
32289 return;
32290
32291 /* Get the first entry from the request Q */
32292 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
32293 h->Qdepth--;
32294
32295 /* Tell the controller to do our bidding */
32296 - h->access.submit_command(h, c);
32297 + h->access->submit_command(h, c);
32298
32299 /* Get onto the completion Q */
32300 addQ(&h->cmpQ, c);
32301 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32302 unsigned long flags;
32303 __u32 a,a1;
32304
32305 - istat = h->access.intr_pending(h);
32306 + istat = h->access->intr_pending(h);
32307 /* Is this interrupt for us? */
32308 if (istat == 0)
32309 return IRQ_NONE;
32310 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32311 */
32312 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32313 if (istat & FIFO_NOT_EMPTY) {
32314 - while((a = h->access.command_completed(h))) {
32315 + while((a = h->access->command_completed(h))) {
32316 a1 = a; a &= ~3;
32317 if ((c = h->cmpQ) == NULL)
32318 {
32319 @@ -1449,11 +1449,11 @@ static int sendcmd(
32320 /*
32321 * Disable interrupt
32322 */
32323 - info_p->access.set_intr_mask(info_p, 0);
32324 + info_p->access->set_intr_mask(info_p, 0);
32325 /* Make sure there is room in the command FIFO */
32326 /* Actually it should be completely empty at this time. */
32327 for (i = 200000; i > 0; i--) {
32328 - temp = info_p->access.fifo_full(info_p);
32329 + temp = info_p->access->fifo_full(info_p);
32330 if (temp != 0) {
32331 break;
32332 }
32333 @@ -1466,7 +1466,7 @@ DBG(
32334 /*
32335 * Send the cmd
32336 */
32337 - info_p->access.submit_command(info_p, c);
32338 + info_p->access->submit_command(info_p, c);
32339 complete = pollcomplete(ctlr);
32340
32341 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32342 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32343 * we check the new geometry. Then turn interrupts back on when
32344 * we're done.
32345 */
32346 - host->access.set_intr_mask(host, 0);
32347 + host->access->set_intr_mask(host, 0);
32348 getgeometry(ctlr);
32349 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32350 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32351
32352 for(i=0; i<NWD; i++) {
32353 struct gendisk *disk = ida_gendisk[ctlr][i];
32354 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
32355 /* Wait (up to 2 seconds) for a command to complete */
32356
32357 for (i = 200000; i > 0; i--) {
32358 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
32359 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
32360 if (done == 0) {
32361 udelay(10); /* a short fixed delay */
32362 } else
32363 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32364 index be73e9d..7fbf140 100644
32365 --- a/drivers/block/cpqarray.h
32366 +++ b/drivers/block/cpqarray.h
32367 @@ -99,7 +99,7 @@ struct ctlr_info {
32368 drv_info_t drv[NWD];
32369 struct proc_dir_entry *proc;
32370
32371 - struct access_method access;
32372 + struct access_method *access;
32373
32374 cmdlist_t *reqQ;
32375 cmdlist_t *cmpQ;
32376 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
32377 index 6b51afa..17e1191 100644
32378 --- a/drivers/block/drbd/drbd_int.h
32379 +++ b/drivers/block/drbd/drbd_int.h
32380 @@ -582,7 +582,7 @@ struct drbd_epoch {
32381 struct drbd_tconn *tconn;
32382 struct list_head list;
32383 unsigned int barrier_nr;
32384 - atomic_t epoch_size; /* increased on every request added. */
32385 + atomic_unchecked_t epoch_size; /* increased on every request added. */
32386 atomic_t active; /* increased on every req. added, and dec on every finished. */
32387 unsigned long flags;
32388 };
32389 @@ -1011,7 +1011,7 @@ struct drbd_conf {
32390 int al_tr_cycle;
32391 int al_tr_pos; /* position of the next transaction in the journal */
32392 wait_queue_head_t seq_wait;
32393 - atomic_t packet_seq;
32394 + atomic_unchecked_t packet_seq;
32395 unsigned int peer_seq;
32396 spinlock_t peer_seq_lock;
32397 unsigned int minor;
32398 @@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
32399 char __user *uoptval;
32400 int err;
32401
32402 - uoptval = (char __user __force *)optval;
32403 + uoptval = (char __force_user *)optval;
32404
32405 set_fs(KERNEL_DS);
32406 if (level == SOL_SOCKET)
32407 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
32408 index 8c13eeb..217adee 100644
32409 --- a/drivers/block/drbd/drbd_main.c
32410 +++ b/drivers/block/drbd/drbd_main.c
32411 @@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
32412 p->sector = sector;
32413 p->block_id = block_id;
32414 p->blksize = blksize;
32415 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
32416 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
32417 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
32418 }
32419
32420 @@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
32421 return -EIO;
32422 p->sector = cpu_to_be64(req->i.sector);
32423 p->block_id = (unsigned long)req;
32424 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
32425 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
32426 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
32427 if (mdev->state.conn >= C_SYNC_SOURCE &&
32428 mdev->state.conn <= C_PAUSED_SYNC_T)
32429 @@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
32430 {
32431 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
32432
32433 - if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
32434 - conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
32435 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
32436 + conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
32437 kfree(tconn->current_epoch);
32438
32439 idr_destroy(&tconn->volumes);
32440 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
32441 index a9eccfc..68e4533 100644
32442 --- a/drivers/block/drbd/drbd_receiver.c
32443 +++ b/drivers/block/drbd/drbd_receiver.c
32444 @@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
32445 {
32446 int err;
32447
32448 - atomic_set(&mdev->packet_seq, 0);
32449 + atomic_set_unchecked(&mdev->packet_seq, 0);
32450 mdev->peer_seq = 0;
32451
32452 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
32453 @@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
32454 do {
32455 next_epoch = NULL;
32456
32457 - epoch_size = atomic_read(&epoch->epoch_size);
32458 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
32459
32460 switch (ev & ~EV_CLEANUP) {
32461 case EV_PUT:
32462 @@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
32463 rv = FE_DESTROYED;
32464 } else {
32465 epoch->flags = 0;
32466 - atomic_set(&epoch->epoch_size, 0);
32467 + atomic_set_unchecked(&epoch->epoch_size, 0);
32468 /* atomic_set(&epoch->active, 0); is already zero */
32469 if (rv == FE_STILL_LIVE)
32470 rv = FE_RECYCLED;
32471 @@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
32472 conn_wait_active_ee_empty(tconn);
32473 drbd_flush(tconn);
32474
32475 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
32476 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
32477 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
32478 if (epoch)
32479 break;
32480 @@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
32481 }
32482
32483 epoch->flags = 0;
32484 - atomic_set(&epoch->epoch_size, 0);
32485 + atomic_set_unchecked(&epoch->epoch_size, 0);
32486 atomic_set(&epoch->active, 0);
32487
32488 spin_lock(&tconn->epoch_lock);
32489 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
32490 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
32491 list_add(&epoch->list, &tconn->current_epoch->list);
32492 tconn->current_epoch = epoch;
32493 tconn->epochs++;
32494 @@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
32495
32496 err = wait_for_and_update_peer_seq(mdev, peer_seq);
32497 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
32498 - atomic_inc(&tconn->current_epoch->epoch_size);
32499 + atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
32500 err2 = drbd_drain_block(mdev, pi->size);
32501 if (!err)
32502 err = err2;
32503 @@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
32504
32505 spin_lock(&tconn->epoch_lock);
32506 peer_req->epoch = tconn->current_epoch;
32507 - atomic_inc(&peer_req->epoch->epoch_size);
32508 + atomic_inc_unchecked(&peer_req->epoch->epoch_size);
32509 atomic_inc(&peer_req->epoch->active);
32510 spin_unlock(&tconn->epoch_lock);
32511
32512 @@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
32513 if (!list_empty(&tconn->current_epoch->list))
32514 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
32515 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
32516 - atomic_set(&tconn->current_epoch->epoch_size, 0);
32517 + atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
32518 tconn->send.seen_any_write_yet = false;
32519
32520 conn_info(tconn, "Connection closed\n");
32521 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
32522 index ae12512..37fa397 100644
32523 --- a/drivers/block/loop.c
32524 +++ b/drivers/block/loop.c
32525 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
32526 mm_segment_t old_fs = get_fs();
32527
32528 set_fs(get_ds());
32529 - bw = file->f_op->write(file, buf, len, &pos);
32530 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
32531 set_fs(old_fs);
32532 if (likely(bw == len))
32533 return 0;
32534 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
32535 index d620b44..587561e 100644
32536 --- a/drivers/cdrom/cdrom.c
32537 +++ b/drivers/cdrom/cdrom.c
32538 @@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
32539 ENSURE(reset, CDC_RESET);
32540 ENSURE(generic_packet, CDC_GENERIC_PACKET);
32541 cdi->mc_flags = 0;
32542 - cdo->n_minors = 0;
32543 cdi->options = CDO_USE_FFLAGS;
32544
32545 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
32546 @@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
32547 else
32548 cdi->cdda_method = CDDA_OLD;
32549
32550 - if (!cdo->generic_packet)
32551 - cdo->generic_packet = cdrom_dummy_generic_packet;
32552 + if (!cdo->generic_packet) {
32553 + pax_open_kernel();
32554 + *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
32555 + pax_close_kernel();
32556 + }
32557
32558 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
32559 mutex_lock(&cdrom_mutex);
32560 @@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
32561 if (cdi->exit)
32562 cdi->exit(cdi);
32563
32564 - cdi->ops->n_minors--;
32565 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
32566 }
32567
32568 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
32569 index d59cdcb..11afddf 100644
32570 --- a/drivers/cdrom/gdrom.c
32571 +++ b/drivers/cdrom/gdrom.c
32572 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
32573 .audio_ioctl = gdrom_audio_ioctl,
32574 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
32575 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
32576 - .n_minors = 1,
32577 };
32578
32579 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
32580 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
32581 index 72bedad..8181ce1 100644
32582 --- a/drivers/char/Kconfig
32583 +++ b/drivers/char/Kconfig
32584 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
32585
32586 config DEVKMEM
32587 bool "/dev/kmem virtual device support"
32588 - default y
32589 + default n
32590 + depends on !GRKERNSEC_KMEM
32591 help
32592 Say Y here if you want to support the /dev/kmem device. The
32593 /dev/kmem device is rarely used, but can be used for certain
32594 @@ -581,6 +582,7 @@ config DEVPORT
32595 bool
32596 depends on !M68K
32597 depends on ISA || PCI
32598 + depends on !GRKERNSEC_KMEM
32599 default y
32600
32601 source "drivers/s390/char/Kconfig"
32602 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
32603 index 2e04433..22afc64 100644
32604 --- a/drivers/char/agp/frontend.c
32605 +++ b/drivers/char/agp/frontend.c
32606 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
32607 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
32608 return -EFAULT;
32609
32610 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
32611 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
32612 return -EFAULT;
32613
32614 client = agp_find_client_by_pid(reserve.pid);
32615 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
32616 index 21cb980..f15107c 100644
32617 --- a/drivers/char/genrtc.c
32618 +++ b/drivers/char/genrtc.c
32619 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
32620 switch (cmd) {
32621
32622 case RTC_PLL_GET:
32623 + memset(&pll, 0, sizeof(pll));
32624 if (get_rtc_pll(&pll))
32625 return -EINVAL;
32626 else
32627 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
32628 index fe6d4be..89f32100 100644
32629 --- a/drivers/char/hpet.c
32630 +++ b/drivers/char/hpet.c
32631 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
32632 }
32633
32634 static int
32635 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
32636 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
32637 struct hpet_info *info)
32638 {
32639 struct hpet_timer __iomem *timer;
32640 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
32641 index 053201b0..8335cce 100644
32642 --- a/drivers/char/ipmi/ipmi_msghandler.c
32643 +++ b/drivers/char/ipmi/ipmi_msghandler.c
32644 @@ -420,7 +420,7 @@ struct ipmi_smi {
32645 struct proc_dir_entry *proc_dir;
32646 char proc_dir_name[10];
32647
32648 - atomic_t stats[IPMI_NUM_STATS];
32649 + atomic_unchecked_t stats[IPMI_NUM_STATS];
32650
32651 /*
32652 * run_to_completion duplicate of smb_info, smi_info
32653 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
32654
32655
32656 #define ipmi_inc_stat(intf, stat) \
32657 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
32658 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
32659 #define ipmi_get_stat(intf, stat) \
32660 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
32661 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
32662
32663 static int is_lan_addr(struct ipmi_addr *addr)
32664 {
32665 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
32666 INIT_LIST_HEAD(&intf->cmd_rcvrs);
32667 init_waitqueue_head(&intf->waitq);
32668 for (i = 0; i < IPMI_NUM_STATS; i++)
32669 - atomic_set(&intf->stats[i], 0);
32670 + atomic_set_unchecked(&intf->stats[i], 0);
32671
32672 intf->proc_dir = NULL;
32673
32674 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
32675 index 1c7fdcd..4899100 100644
32676 --- a/drivers/char/ipmi/ipmi_si_intf.c
32677 +++ b/drivers/char/ipmi/ipmi_si_intf.c
32678 @@ -275,7 +275,7 @@ struct smi_info {
32679 unsigned char slave_addr;
32680
32681 /* Counters and things for the proc filesystem. */
32682 - atomic_t stats[SI_NUM_STATS];
32683 + atomic_unchecked_t stats[SI_NUM_STATS];
32684
32685 struct task_struct *thread;
32686
32687 @@ -284,9 +284,9 @@ struct smi_info {
32688 };
32689
32690 #define smi_inc_stat(smi, stat) \
32691 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
32692 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
32693 #define smi_get_stat(smi, stat) \
32694 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
32695 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
32696
32697 #define SI_MAX_PARMS 4
32698
32699 @@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
32700 atomic_set(&new_smi->req_events, 0);
32701 new_smi->run_to_completion = 0;
32702 for (i = 0; i < SI_NUM_STATS; i++)
32703 - atomic_set(&new_smi->stats[i], 0);
32704 + atomic_set_unchecked(&new_smi->stats[i], 0);
32705
32706 new_smi->interrupt_disabled = 1;
32707 atomic_set(&new_smi->stop_operation, 0);
32708 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
32709 index c6fa3bc..4ca3e42 100644
32710 --- a/drivers/char/mem.c
32711 +++ b/drivers/char/mem.c
32712 @@ -18,6 +18,7 @@
32713 #include <linux/raw.h>
32714 #include <linux/tty.h>
32715 #include <linux/capability.h>
32716 +#include <linux/security.h>
32717 #include <linux/ptrace.h>
32718 #include <linux/device.h>
32719 #include <linux/highmem.h>
32720 @@ -37,6 +38,10 @@
32721
32722 #define DEVPORT_MINOR 4
32723
32724 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32725 +extern const struct file_operations grsec_fops;
32726 +#endif
32727 +
32728 static inline unsigned long size_inside_page(unsigned long start,
32729 unsigned long size)
32730 {
32731 @@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32732
32733 while (cursor < to) {
32734 if (!devmem_is_allowed(pfn)) {
32735 +#ifdef CONFIG_GRKERNSEC_KMEM
32736 + gr_handle_mem_readwrite(from, to);
32737 +#else
32738 printk(KERN_INFO
32739 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
32740 current->comm, from, to);
32741 +#endif
32742 return 0;
32743 }
32744 cursor += PAGE_SIZE;
32745 @@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32746 }
32747 return 1;
32748 }
32749 +#elif defined(CONFIG_GRKERNSEC_KMEM)
32750 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32751 +{
32752 + return 0;
32753 +}
32754 #else
32755 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32756 {
32757 @@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
32758
32759 while (count > 0) {
32760 unsigned long remaining;
32761 + char *temp;
32762
32763 sz = size_inside_page(p, count);
32764
32765 @@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
32766 if (!ptr)
32767 return -EFAULT;
32768
32769 - remaining = copy_to_user(buf, ptr, sz);
32770 +#ifdef CONFIG_PAX_USERCOPY
32771 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
32772 + if (!temp) {
32773 + unxlate_dev_mem_ptr(p, ptr);
32774 + return -ENOMEM;
32775 + }
32776 + memcpy(temp, ptr, sz);
32777 +#else
32778 + temp = ptr;
32779 +#endif
32780 +
32781 + remaining = copy_to_user(buf, temp, sz);
32782 +
32783 +#ifdef CONFIG_PAX_USERCOPY
32784 + kfree(temp);
32785 +#endif
32786 +
32787 unxlate_dev_mem_ptr(p, ptr);
32788 if (remaining)
32789 return -EFAULT;
32790 @@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32791 size_t count, loff_t *ppos)
32792 {
32793 unsigned long p = *ppos;
32794 - ssize_t low_count, read, sz;
32795 + ssize_t low_count, read, sz, err = 0;
32796 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
32797 - int err = 0;
32798
32799 read = 0;
32800 if (p < (unsigned long) high_memory) {
32801 @@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32802 }
32803 #endif
32804 while (low_count > 0) {
32805 + char *temp;
32806 +
32807 sz = size_inside_page(p, low_count);
32808
32809 /*
32810 @@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32811 */
32812 kbuf = xlate_dev_kmem_ptr((char *)p);
32813
32814 - if (copy_to_user(buf, kbuf, sz))
32815 +#ifdef CONFIG_PAX_USERCOPY
32816 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
32817 + if (!temp)
32818 + return -ENOMEM;
32819 + memcpy(temp, kbuf, sz);
32820 +#else
32821 + temp = kbuf;
32822 +#endif
32823 +
32824 + err = copy_to_user(buf, temp, sz);
32825 +
32826 +#ifdef CONFIG_PAX_USERCOPY
32827 + kfree(temp);
32828 +#endif
32829 +
32830 + if (err)
32831 return -EFAULT;
32832 buf += sz;
32833 p += sz;
32834 @@ -833,6 +880,9 @@ static const struct memdev {
32835 #ifdef CONFIG_CRASH_DUMP
32836 [12] = { "oldmem", 0, &oldmem_fops, NULL },
32837 #endif
32838 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32839 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
32840 +#endif
32841 };
32842
32843 static int memory_open(struct inode *inode, struct file *filp)
32844 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
32845 index 9df78e2..01ba9ae 100644
32846 --- a/drivers/char/nvram.c
32847 +++ b/drivers/char/nvram.c
32848 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
32849
32850 spin_unlock_irq(&rtc_lock);
32851
32852 - if (copy_to_user(buf, contents, tmp - contents))
32853 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
32854 return -EFAULT;
32855
32856 *ppos = i;
32857 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
32858 index b66eaa0..2619d1b 100644
32859 --- a/drivers/char/pcmcia/synclink_cs.c
32860 +++ b/drivers/char/pcmcia/synclink_cs.c
32861 @@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
32862
32863 if (debug_level >= DEBUG_LEVEL_INFO)
32864 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
32865 - __FILE__,__LINE__, info->device_name, port->count);
32866 + __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
32867
32868 - WARN_ON(!port->count);
32869 + WARN_ON(!atomic_read(&port->count));
32870
32871 if (tty_port_close_start(port, tty, filp) == 0)
32872 goto cleanup;
32873 @@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
32874 cleanup:
32875 if (debug_level >= DEBUG_LEVEL_INFO)
32876 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
32877 - tty->driver->name, port->count);
32878 + tty->driver->name, atomic_read(&port->count));
32879 }
32880
32881 /* Wait until the transmitter is empty.
32882 @@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
32883
32884 if (debug_level >= DEBUG_LEVEL_INFO)
32885 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
32886 - __FILE__,__LINE__,tty->driver->name, port->count);
32887 + __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
32888
32889 /* If port is closing, signal caller to try again */
32890 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
32891 @@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
32892 goto cleanup;
32893 }
32894 spin_lock(&port->lock);
32895 - port->count++;
32896 + atomic_inc(&port->count);
32897 spin_unlock(&port->lock);
32898 spin_unlock_irqrestore(&info->netlock, flags);
32899
32900 - if (port->count == 1) {
32901 + if (atomic_read(&port->count) == 1) {
32902 /* 1st open on this device, init hardware */
32903 retval = startup(info, tty);
32904 if (retval < 0)
32905 @@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
32906 unsigned short new_crctype;
32907
32908 /* return error if TTY interface open */
32909 - if (info->port.count)
32910 + if (atomic_read(&info->port.count))
32911 return -EBUSY;
32912
32913 switch (encoding)
32914 @@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
32915
32916 /* arbitrate between network and tty opens */
32917 spin_lock_irqsave(&info->netlock, flags);
32918 - if (info->port.count != 0 || info->netcount != 0) {
32919 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
32920 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
32921 spin_unlock_irqrestore(&info->netlock, flags);
32922 return -EBUSY;
32923 @@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32924 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
32925
32926 /* return error if TTY interface open */
32927 - if (info->port.count)
32928 + if (atomic_read(&info->port.count))
32929 return -EBUSY;
32930
32931 if (cmd != SIOCWANDEV)
32932 diff --git a/drivers/char/random.c b/drivers/char/random.c
32933 index 85e81ec..bce8b97 100644
32934 --- a/drivers/char/random.c
32935 +++ b/drivers/char/random.c
32936 @@ -272,8 +272,13 @@
32937 /*
32938 * Configuration information
32939 */
32940 +#ifdef CONFIG_GRKERNSEC_RANDNET
32941 +#define INPUT_POOL_WORDS 512
32942 +#define OUTPUT_POOL_WORDS 128
32943 +#else
32944 #define INPUT_POOL_WORDS 128
32945 #define OUTPUT_POOL_WORDS 32
32946 +#endif
32947 #define SEC_XFER_SIZE 512
32948 #define EXTRACT_SIZE 10
32949
32950 @@ -313,10 +318,17 @@ static struct poolinfo {
32951 int poolwords;
32952 int tap1, tap2, tap3, tap4, tap5;
32953 } poolinfo_table[] = {
32954 +#ifdef CONFIG_GRKERNSEC_RANDNET
32955 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
32956 + { 512, 411, 308, 208, 104, 1 },
32957 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
32958 + { 128, 103, 76, 51, 25, 1 },
32959 +#else
32960 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
32961 { 128, 103, 76, 51, 25, 1 },
32962 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
32963 { 32, 26, 20, 14, 7, 1 },
32964 +#endif
32965 #if 0
32966 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
32967 { 2048, 1638, 1231, 819, 411, 1 },
32968 @@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
32969 input_rotate += i ? 7 : 14;
32970 }
32971
32972 - ACCESS_ONCE(r->input_rotate) = input_rotate;
32973 - ACCESS_ONCE(r->add_ptr) = i;
32974 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
32975 + ACCESS_ONCE_RW(r->add_ptr) = i;
32976 smp_wmb();
32977
32978 if (out)
32979 @@ -1020,7 +1032,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
32980
32981 extract_buf(r, tmp);
32982 i = min_t(int, nbytes, EXTRACT_SIZE);
32983 - if (copy_to_user(buf, tmp, i)) {
32984 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
32985 ret = -EFAULT;
32986 break;
32987 }
32988 @@ -1356,7 +1368,7 @@ EXPORT_SYMBOL(generate_random_uuid);
32989 #include <linux/sysctl.h>
32990
32991 static int min_read_thresh = 8, min_write_thresh;
32992 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
32993 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
32994 static int max_write_thresh = INPUT_POOL_WORDS * 32;
32995 static char sysctl_bootid[16];
32996
32997 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
32998 index d780295..b29f3a8 100644
32999 --- a/drivers/char/sonypi.c
33000 +++ b/drivers/char/sonypi.c
33001 @@ -54,6 +54,7 @@
33002
33003 #include <asm/uaccess.h>
33004 #include <asm/io.h>
33005 +#include <asm/local.h>
33006
33007 #include <linux/sonypi.h>
33008
33009 @@ -490,7 +491,7 @@ static struct sonypi_device {
33010 spinlock_t fifo_lock;
33011 wait_queue_head_t fifo_proc_list;
33012 struct fasync_struct *fifo_async;
33013 - int open_count;
33014 + local_t open_count;
33015 int model;
33016 struct input_dev *input_jog_dev;
33017 struct input_dev *input_key_dev;
33018 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33019 static int sonypi_misc_release(struct inode *inode, struct file *file)
33020 {
33021 mutex_lock(&sonypi_device.lock);
33022 - sonypi_device.open_count--;
33023 + local_dec(&sonypi_device.open_count);
33024 mutex_unlock(&sonypi_device.lock);
33025 return 0;
33026 }
33027 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33028 {
33029 mutex_lock(&sonypi_device.lock);
33030 /* Flush input queue on first open */
33031 - if (!sonypi_device.open_count)
33032 + if (!local_read(&sonypi_device.open_count))
33033 kfifo_reset(&sonypi_device.fifo);
33034 - sonypi_device.open_count++;
33035 + local_inc(&sonypi_device.open_count);
33036 mutex_unlock(&sonypi_device.lock);
33037
33038 return 0;
33039 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33040 index 93211df..c7805f7 100644
33041 --- a/drivers/char/tpm/tpm.c
33042 +++ b/drivers/char/tpm/tpm.c
33043 @@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33044 chip->vendor.req_complete_val)
33045 goto out_recv;
33046
33047 - if ((status == chip->vendor.req_canceled)) {
33048 + if (status == chip->vendor.req_canceled) {
33049 dev_err(chip->dev, "Operation Canceled\n");
33050 rc = -ECANCELED;
33051 goto out;
33052 diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
33053 index 56051d0..11cf3b7 100644
33054 --- a/drivers/char/tpm/tpm_acpi.c
33055 +++ b/drivers/char/tpm/tpm_acpi.c
33056 @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
33057 virt = acpi_os_map_memory(start, len);
33058 if (!virt) {
33059 kfree(log->bios_event_log);
33060 + log->bios_event_log = NULL;
33061 printk("%s: ERROR - Unable to map memory\n", __func__);
33062 return -EIO;
33063 }
33064
33065 - memcpy_fromio(log->bios_event_log, virt, len);
33066 + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
33067
33068 acpi_os_unmap_memory(virt, len);
33069 return 0;
33070 diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
33071 index 84ddc55..1d32f1e 100644
33072 --- a/drivers/char/tpm/tpm_eventlog.c
33073 +++ b/drivers/char/tpm/tpm_eventlog.c
33074 @@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33075 event = addr;
33076
33077 if ((event->event_type == 0 && event->event_size == 0) ||
33078 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33079 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33080 return NULL;
33081
33082 return addr;
33083 @@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33084 return NULL;
33085
33086 if ((event->event_type == 0 && event->event_size == 0) ||
33087 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33088 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33089 return NULL;
33090
33091 (*pos)++;
33092 @@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33093 int i;
33094
33095 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33096 - seq_putc(m, data[i]);
33097 + if (!seq_putc(m, data[i]))
33098 + return -EFAULT;
33099
33100 return 0;
33101 }
33102 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33103 index ee4dbea..69c817b 100644
33104 --- a/drivers/char/virtio_console.c
33105 +++ b/drivers/char/virtio_console.c
33106 @@ -681,7 +681,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
33107 if (to_user) {
33108 ssize_t ret;
33109
33110 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
33111 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
33112 if (ret)
33113 return -EFAULT;
33114 } else {
33115 @@ -780,7 +780,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
33116 if (!port_has_data(port) && !port->host_connected)
33117 return 0;
33118
33119 - return fill_readbuf(port, ubuf, count, true);
33120 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
33121 }
33122
33123 static int wait_port_writable(struct port *port, bool nonblock)
33124 diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
33125 index 8ae1a61..9c00613 100644
33126 --- a/drivers/clocksource/arm_generic.c
33127 +++ b/drivers/clocksource/arm_generic.c
33128 @@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
33129 return NOTIFY_OK;
33130 }
33131
33132 -static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
33133 +static struct notifier_block arch_timer_cpu_nb = {
33134 .notifier_call = arch_timer_cpu_notify,
33135 };
33136
33137 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33138 index 1f93dbd..edf95ff 100644
33139 --- a/drivers/cpufreq/cpufreq.c
33140 +++ b/drivers/cpufreq/cpufreq.c
33141 @@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
33142 return NOTIFY_OK;
33143 }
33144
33145 -static struct notifier_block __refdata cpufreq_cpu_notifier = {
33146 +static struct notifier_block cpufreq_cpu_notifier = {
33147 .notifier_call = cpufreq_cpu_callback,
33148 };
33149
33150 diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
33151 index 9d7732b..0b1a793 100644
33152 --- a/drivers/cpufreq/cpufreq_stats.c
33153 +++ b/drivers/cpufreq/cpufreq_stats.c
33154 @@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
33155 }
33156
33157 /* priority=1 so this will get called before cpufreq_remove_dev */
33158 -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
33159 +static struct notifier_block cpufreq_stat_cpu_notifier = {
33160 .notifier_call = cpufreq_stat_cpu_callback,
33161 .priority = 1,
33162 };
33163 diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
33164 index b70709b..1d8d02a 100644
33165 --- a/drivers/dma/sh/shdma.c
33166 +++ b/drivers/dma/sh/shdma.c
33167 @@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
33168 return ret;
33169 }
33170
33171 -static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
33172 +static struct notifier_block sh_dmae_nmi_notifier = {
33173 .notifier_call = sh_dmae_nmi_handler,
33174
33175 /* Run before NMI debug handler and KGDB */
33176 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
33177 index 0056c4d..725934f 100644
33178 --- a/drivers/edac/edac_pci_sysfs.c
33179 +++ b/drivers/edac/edac_pci_sysfs.c
33180 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
33181 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
33182 static int edac_pci_poll_msec = 1000; /* one second workq period */
33183
33184 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
33185 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
33186 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
33187 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
33188
33189 static struct kobject *edac_pci_top_main_kobj;
33190 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33191 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33192 edac_printk(KERN_CRIT, EDAC_PCI,
33193 "Signaled System Error on %s\n",
33194 pci_name(dev));
33195 - atomic_inc(&pci_nonparity_count);
33196 + atomic_inc_unchecked(&pci_nonparity_count);
33197 }
33198
33199 if (status & (PCI_STATUS_PARITY)) {
33200 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33201 "Master Data Parity Error on %s\n",
33202 pci_name(dev));
33203
33204 - atomic_inc(&pci_parity_count);
33205 + atomic_inc_unchecked(&pci_parity_count);
33206 }
33207
33208 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33209 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33210 "Detected Parity Error on %s\n",
33211 pci_name(dev));
33212
33213 - atomic_inc(&pci_parity_count);
33214 + atomic_inc_unchecked(&pci_parity_count);
33215 }
33216 }
33217
33218 @@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33219 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33220 "Signaled System Error on %s\n",
33221 pci_name(dev));
33222 - atomic_inc(&pci_nonparity_count);
33223 + atomic_inc_unchecked(&pci_nonparity_count);
33224 }
33225
33226 if (status & (PCI_STATUS_PARITY)) {
33227 @@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33228 "Master Data Parity Error on "
33229 "%s\n", pci_name(dev));
33230
33231 - atomic_inc(&pci_parity_count);
33232 + atomic_inc_unchecked(&pci_parity_count);
33233 }
33234
33235 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33236 @@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33237 "Detected Parity Error on %s\n",
33238 pci_name(dev));
33239
33240 - atomic_inc(&pci_parity_count);
33241 + atomic_inc_unchecked(&pci_parity_count);
33242 }
33243 }
33244 }
33245 @@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
33246 if (!check_pci_errors)
33247 return;
33248
33249 - before_count = atomic_read(&pci_parity_count);
33250 + before_count = atomic_read_unchecked(&pci_parity_count);
33251
33252 /* scan all PCI devices looking for a Parity Error on devices and
33253 * bridges.
33254 @@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
33255 /* Only if operator has selected panic on PCI Error */
33256 if (edac_pci_get_panic_on_pe()) {
33257 /* If the count is different 'after' from 'before' */
33258 - if (before_count != atomic_read(&pci_parity_count))
33259 + if (before_count != atomic_read_unchecked(&pci_parity_count))
33260 panic("EDAC: PCI Parity Error");
33261 }
33262 }
33263 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
33264 index 6796799..99e8377 100644
33265 --- a/drivers/edac/mce_amd.h
33266 +++ b/drivers/edac/mce_amd.h
33267 @@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
33268 struct amd_decoder_ops {
33269 bool (*mc0_mce)(u16, u8);
33270 bool (*mc1_mce)(u16, u8);
33271 -};
33272 +} __no_const;
33273
33274 void amd_report_gart_errors(bool);
33275 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
33276 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
33277 index 57ea7f4..789e3c3 100644
33278 --- a/drivers/firewire/core-card.c
33279 +++ b/drivers/firewire/core-card.c
33280 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
33281
33282 void fw_core_remove_card(struct fw_card *card)
33283 {
33284 - struct fw_card_driver dummy_driver = dummy_driver_template;
33285 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
33286
33287 card->driver->update_phy_reg(card, 4,
33288 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
33289 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
33290 index f8d2287..5aaf4db 100644
33291 --- a/drivers/firewire/core-cdev.c
33292 +++ b/drivers/firewire/core-cdev.c
33293 @@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
33294 int ret;
33295
33296 if ((request->channels == 0 && request->bandwidth == 0) ||
33297 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
33298 - request->bandwidth < 0)
33299 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
33300 return -EINVAL;
33301
33302 r = kmalloc(sizeof(*r), GFP_KERNEL);
33303 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
33304 index 28a94c7..58da63a 100644
33305 --- a/drivers/firewire/core-transaction.c
33306 +++ b/drivers/firewire/core-transaction.c
33307 @@ -38,6 +38,7 @@
33308 #include <linux/timer.h>
33309 #include <linux/types.h>
33310 #include <linux/workqueue.h>
33311 +#include <linux/sched.h>
33312
33313 #include <asm/byteorder.h>
33314
33315 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
33316 index 515a42c..5ecf3ba 100644
33317 --- a/drivers/firewire/core.h
33318 +++ b/drivers/firewire/core.h
33319 @@ -111,6 +111,7 @@ struct fw_card_driver {
33320
33321 int (*stop_iso)(struct fw_iso_context *ctx);
33322 };
33323 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
33324
33325 void fw_card_initialize(struct fw_card *card,
33326 const struct fw_card_driver *driver, struct device *device);
33327 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
33328 index 982f1f5..d21e5da 100644
33329 --- a/drivers/firmware/dmi_scan.c
33330 +++ b/drivers/firmware/dmi_scan.c
33331 @@ -491,11 +491,6 @@ void __init dmi_scan_machine(void)
33332 }
33333 }
33334 else {
33335 - /*
33336 - * no iounmap() for that ioremap(); it would be a no-op, but
33337 - * it's so early in setup that sucker gets confused into doing
33338 - * what it shouldn't if we actually call it.
33339 - */
33340 p = dmi_ioremap(0xF0000, 0x10000);
33341 if (p == NULL)
33342 goto error;
33343 @@ -770,7 +765,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
33344 if (buf == NULL)
33345 return -1;
33346
33347 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
33348 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
33349
33350 iounmap(buf);
33351 return 0;
33352 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
33353 index bcb201c..f9782e5 100644
33354 --- a/drivers/firmware/efivars.c
33355 +++ b/drivers/firmware/efivars.c
33356 @@ -133,7 +133,7 @@ struct efivar_attribute {
33357 };
33358
33359 static struct efivars __efivars;
33360 -static struct efivar_operations ops;
33361 +static efivar_operations_no_const ops __read_only;
33362
33363 #define PSTORE_EFI_ATTRIBUTES \
33364 (EFI_VARIABLE_NON_VOLATILE | \
33365 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
33366 index 9902732..64b62dd 100644
33367 --- a/drivers/gpio/gpio-vr41xx.c
33368 +++ b/drivers/gpio/gpio-vr41xx.c
33369 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
33370 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
33371 maskl, pendl, maskh, pendh);
33372
33373 - atomic_inc(&irq_err_count);
33374 + atomic_inc_unchecked(&irq_err_count);
33375
33376 return -EINVAL;
33377 }
33378 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
33379 index 7b2d378..cc947ea 100644
33380 --- a/drivers/gpu/drm/drm_crtc_helper.c
33381 +++ b/drivers/gpu/drm/drm_crtc_helper.c
33382 @@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
33383 struct drm_crtc *tmp;
33384 int crtc_mask = 1;
33385
33386 - WARN(!crtc, "checking null crtc?\n");
33387 + BUG_ON(!crtc);
33388
33389 dev = crtc->dev;
33390
33391 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
33392 index be174ca..0bcbb71 100644
33393 --- a/drivers/gpu/drm/drm_drv.c
33394 +++ b/drivers/gpu/drm/drm_drv.c
33395 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
33396 /**
33397 * Copy and IOCTL return string to user space
33398 */
33399 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
33400 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
33401 {
33402 int len;
33403
33404 @@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
33405 return -ENODEV;
33406
33407 atomic_inc(&dev->ioctl_count);
33408 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
33409 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
33410 ++file_priv->ioctl_count;
33411
33412 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
33413 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
33414 index 133b413..fd68225 100644
33415 --- a/drivers/gpu/drm/drm_fops.c
33416 +++ b/drivers/gpu/drm/drm_fops.c
33417 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
33418 }
33419
33420 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
33421 - atomic_set(&dev->counts[i], 0);
33422 + atomic_set_unchecked(&dev->counts[i], 0);
33423
33424 dev->sigdata.lock = NULL;
33425
33426 @@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
33427 if (drm_device_is_unplugged(dev))
33428 return -ENODEV;
33429
33430 - if (!dev->open_count++)
33431 + if (local_inc_return(&dev->open_count) == 1)
33432 need_setup = 1;
33433 mutex_lock(&dev->struct_mutex);
33434 old_mapping = dev->dev_mapping;
33435 @@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
33436 retcode = drm_open_helper(inode, filp, dev);
33437 if (retcode)
33438 goto err_undo;
33439 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
33440 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
33441 if (need_setup) {
33442 retcode = drm_setup(dev);
33443 if (retcode)
33444 @@ -164,7 +164,7 @@ err_undo:
33445 iput(container_of(dev->dev_mapping, struct inode, i_data));
33446 dev->dev_mapping = old_mapping;
33447 mutex_unlock(&dev->struct_mutex);
33448 - dev->open_count--;
33449 + local_dec(&dev->open_count);
33450 return retcode;
33451 }
33452 EXPORT_SYMBOL(drm_open);
33453 @@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
33454
33455 mutex_lock(&drm_global_mutex);
33456
33457 - DRM_DEBUG("open_count = %d\n", dev->open_count);
33458 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
33459
33460 if (dev->driver->preclose)
33461 dev->driver->preclose(dev, file_priv);
33462 @@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
33463 * Begin inline drm_release
33464 */
33465
33466 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
33467 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
33468 task_pid_nr(current),
33469 (long)old_encode_dev(file_priv->minor->device),
33470 - dev->open_count);
33471 + local_read(&dev->open_count));
33472
33473 /* Release any auth tokens that might point to this file_priv,
33474 (do that under the drm_global_mutex) */
33475 @@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
33476 * End inline drm_release
33477 */
33478
33479 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
33480 - if (!--dev->open_count) {
33481 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
33482 + if (local_dec_and_test(&dev->open_count)) {
33483 if (atomic_read(&dev->ioctl_count)) {
33484 DRM_ERROR("Device busy: %d\n",
33485 atomic_read(&dev->ioctl_count));
33486 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
33487 index f731116..629842c 100644
33488 --- a/drivers/gpu/drm/drm_global.c
33489 +++ b/drivers/gpu/drm/drm_global.c
33490 @@ -36,7 +36,7 @@
33491 struct drm_global_item {
33492 struct mutex mutex;
33493 void *object;
33494 - int refcount;
33495 + atomic_t refcount;
33496 };
33497
33498 static struct drm_global_item glob[DRM_GLOBAL_NUM];
33499 @@ -49,7 +49,7 @@ void drm_global_init(void)
33500 struct drm_global_item *item = &glob[i];
33501 mutex_init(&item->mutex);
33502 item->object = NULL;
33503 - item->refcount = 0;
33504 + atomic_set(&item->refcount, 0);
33505 }
33506 }
33507
33508 @@ -59,7 +59,7 @@ void drm_global_release(void)
33509 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
33510 struct drm_global_item *item = &glob[i];
33511 BUG_ON(item->object != NULL);
33512 - BUG_ON(item->refcount != 0);
33513 + BUG_ON(atomic_read(&item->refcount) != 0);
33514 }
33515 }
33516
33517 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
33518 void *object;
33519
33520 mutex_lock(&item->mutex);
33521 - if (item->refcount == 0) {
33522 + if (atomic_read(&item->refcount) == 0) {
33523 item->object = kzalloc(ref->size, GFP_KERNEL);
33524 if (unlikely(item->object == NULL)) {
33525 ret = -ENOMEM;
33526 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
33527 goto out_err;
33528
33529 }
33530 - ++item->refcount;
33531 + atomic_inc(&item->refcount);
33532 ref->object = item->object;
33533 object = item->object;
33534 mutex_unlock(&item->mutex);
33535 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
33536 struct drm_global_item *item = &glob[ref->global_type];
33537
33538 mutex_lock(&item->mutex);
33539 - BUG_ON(item->refcount == 0);
33540 + BUG_ON(atomic_read(&item->refcount) == 0);
33541 BUG_ON(ref->object != item->object);
33542 - if (--item->refcount == 0) {
33543 + if (atomic_dec_and_test(&item->refcount)) {
33544 ref->release(ref);
33545 item->object = NULL;
33546 }
33547 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33548 index d4b20ce..77a8d41 100644
33549 --- a/drivers/gpu/drm/drm_info.c
33550 +++ b/drivers/gpu/drm/drm_info.c
33551 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33552 struct drm_local_map *map;
33553 struct drm_map_list *r_list;
33554
33555 - /* Hardcoded from _DRM_FRAME_BUFFER,
33556 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33557 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33558 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33559 + static const char * const types[] = {
33560 + [_DRM_FRAME_BUFFER] = "FB",
33561 + [_DRM_REGISTERS] = "REG",
33562 + [_DRM_SHM] = "SHM",
33563 + [_DRM_AGP] = "AGP",
33564 + [_DRM_SCATTER_GATHER] = "SG",
33565 + [_DRM_CONSISTENT] = "PCI",
33566 + [_DRM_GEM] = "GEM" };
33567 const char *type;
33568 int i;
33569
33570 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33571 map = r_list->map;
33572 if (!map)
33573 continue;
33574 - if (map->type < 0 || map->type > 5)
33575 + if (map->type >= ARRAY_SIZE(types))
33576 type = "??";
33577 else
33578 type = types[map->type];
33579 @@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33580 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33581 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33582 vma->vm_flags & VM_IO ? 'i' : '-',
33583 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33584 + 0);
33585 +#else
33586 vma->vm_pgoff);
33587 +#endif
33588
33589 #if defined(__i386__)
33590 pgprot = pgprot_val(vma->vm_page_prot);
33591 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33592 index 2f4c434..764794b 100644
33593 --- a/drivers/gpu/drm/drm_ioc32.c
33594 +++ b/drivers/gpu/drm/drm_ioc32.c
33595 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33596 request = compat_alloc_user_space(nbytes);
33597 if (!access_ok(VERIFY_WRITE, request, nbytes))
33598 return -EFAULT;
33599 - list = (struct drm_buf_desc *) (request + 1);
33600 + list = (struct drm_buf_desc __user *) (request + 1);
33601
33602 if (__put_user(count, &request->count)
33603 || __put_user(list, &request->list))
33604 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33605 request = compat_alloc_user_space(nbytes);
33606 if (!access_ok(VERIFY_WRITE, request, nbytes))
33607 return -EFAULT;
33608 - list = (struct drm_buf_pub *) (request + 1);
33609 + list = (struct drm_buf_pub __user *) (request + 1);
33610
33611 if (__put_user(count, &request->count)
33612 || __put_user(list, &request->list))
33613 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33614 index e77bd8b..1571b85 100644
33615 --- a/drivers/gpu/drm/drm_ioctl.c
33616 +++ b/drivers/gpu/drm/drm_ioctl.c
33617 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33618 stats->data[i].value =
33619 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33620 else
33621 - stats->data[i].value = atomic_read(&dev->counts[i]);
33622 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33623 stats->data[i].type = dev->types[i];
33624 }
33625
33626 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33627 index d752c96..fe08455 100644
33628 --- a/drivers/gpu/drm/drm_lock.c
33629 +++ b/drivers/gpu/drm/drm_lock.c
33630 @@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33631 if (drm_lock_take(&master->lock, lock->context)) {
33632 master->lock.file_priv = file_priv;
33633 master->lock.lock_time = jiffies;
33634 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33635 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33636 break; /* Got lock */
33637 }
33638
33639 @@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33640 return -EINVAL;
33641 }
33642
33643 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33644 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33645
33646 if (drm_lock_free(&master->lock, lock->context)) {
33647 /* FIXME: Should really bail out here. */
33648 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
33649 index 200e104..59facda 100644
33650 --- a/drivers/gpu/drm/drm_stub.c
33651 +++ b/drivers/gpu/drm/drm_stub.c
33652 @@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
33653
33654 drm_device_set_unplugged(dev);
33655
33656 - if (dev->open_count == 0) {
33657 + if (local_read(&dev->open_count) == 0) {
33658 drm_put_dev(dev);
33659 }
33660 mutex_unlock(&drm_global_mutex);
33661 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33662 index 004ecdf..db1f6e0 100644
33663 --- a/drivers/gpu/drm/i810/i810_dma.c
33664 +++ b/drivers/gpu/drm/i810/i810_dma.c
33665 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33666 dma->buflist[vertex->idx],
33667 vertex->discard, vertex->used);
33668
33669 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33670 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33671 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33672 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33673 sarea_priv->last_enqueue = dev_priv->counter - 1;
33674 sarea_priv->last_dispatch = (int)hw_status[5];
33675
33676 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33677 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33678 mc->last_render);
33679
33680 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33681 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33682 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33683 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33684 sarea_priv->last_enqueue = dev_priv->counter - 1;
33685 sarea_priv->last_dispatch = (int)hw_status[5];
33686
33687 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33688 index 6e0acad..93c8289 100644
33689 --- a/drivers/gpu/drm/i810/i810_drv.h
33690 +++ b/drivers/gpu/drm/i810/i810_drv.h
33691 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33692 int page_flipping;
33693
33694 wait_queue_head_t irq_queue;
33695 - atomic_t irq_received;
33696 - atomic_t irq_emitted;
33697 + atomic_unchecked_t irq_received;
33698 + atomic_unchecked_t irq_emitted;
33699
33700 int front_offset;
33701 } drm_i810_private_t;
33702 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33703 index 8a7c48b..72effc2 100644
33704 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33705 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33706 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33707 I915_READ(GTIMR));
33708 }
33709 seq_printf(m, "Interrupts received: %d\n",
33710 - atomic_read(&dev_priv->irq_received));
33711 + atomic_read_unchecked(&dev_priv->irq_received));
33712 for_each_ring(ring, dev_priv, i) {
33713 if (IS_GEN6(dev) || IS_GEN7(dev)) {
33714 seq_printf(m,
33715 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
33716 index 99daa89..84ebd44 100644
33717 --- a/drivers/gpu/drm/i915/i915_dma.c
33718 +++ b/drivers/gpu/drm/i915/i915_dma.c
33719 @@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
33720 bool can_switch;
33721
33722 spin_lock(&dev->count_lock);
33723 - can_switch = (dev->open_count == 0);
33724 + can_switch = (local_read(&dev->open_count) == 0);
33725 spin_unlock(&dev->count_lock);
33726 return can_switch;
33727 }
33728 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33729 index 7339a4b..445aaba 100644
33730 --- a/drivers/gpu/drm/i915/i915_drv.h
33731 +++ b/drivers/gpu/drm/i915/i915_drv.h
33732 @@ -656,7 +656,7 @@ typedef struct drm_i915_private {
33733 drm_dma_handle_t *status_page_dmah;
33734 struct resource mch_res;
33735
33736 - atomic_t irq_received;
33737 + atomic_unchecked_t irq_received;
33738
33739 /* protects the irq masks */
33740 spinlock_t irq_lock;
33741 @@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
33742 * will be page flipped away on the next vblank. When it
33743 * reaches 0, dev_priv->pending_flip_queue will be woken up.
33744 */
33745 - atomic_t pending_flip;
33746 + atomic_unchecked_t pending_flip;
33747 };
33748 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
33749
33750 @@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
33751 struct drm_i915_private *dev_priv, unsigned port);
33752 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
33753 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
33754 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
33755 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
33756 {
33757 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
33758 }
33759 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
33760 index 26d08bb..fccb984 100644
33761 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
33762 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
33763 @@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
33764 i915_gem_clflush_object(obj);
33765
33766 if (obj->base.pending_write_domain)
33767 - flips |= atomic_read(&obj->pending_flip);
33768 + flips |= atomic_read_unchecked(&obj->pending_flip);
33769
33770 flush_domains |= obj->base.write_domain;
33771 }
33772 @@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
33773
33774 static int
33775 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
33776 - int count)
33777 + unsigned int count)
33778 {
33779 - int i;
33780 + unsigned int i;
33781
33782 for (i = 0; i < count; i++) {
33783 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
33784 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33785 index fe84338..a863190 100644
33786 --- a/drivers/gpu/drm/i915/i915_irq.c
33787 +++ b/drivers/gpu/drm/i915/i915_irq.c
33788 @@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
33789 u32 pipe_stats[I915_MAX_PIPES];
33790 bool blc_event;
33791
33792 - atomic_inc(&dev_priv->irq_received);
33793 + atomic_inc_unchecked(&dev_priv->irq_received);
33794
33795 while (true) {
33796 iir = I915_READ(VLV_IIR);
33797 @@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
33798 irqreturn_t ret = IRQ_NONE;
33799 int i;
33800
33801 - atomic_inc(&dev_priv->irq_received);
33802 + atomic_inc_unchecked(&dev_priv->irq_received);
33803
33804 /* disable master interrupt before clearing iir */
33805 de_ier = I915_READ(DEIER);
33806 @@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
33807 int ret = IRQ_NONE;
33808 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
33809
33810 - atomic_inc(&dev_priv->irq_received);
33811 + atomic_inc_unchecked(&dev_priv->irq_received);
33812
33813 /* disable master interrupt before clearing iir */
33814 de_ier = I915_READ(DEIER);
33815 @@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
33816 {
33817 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33818
33819 - atomic_set(&dev_priv->irq_received, 0);
33820 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33821
33822 I915_WRITE(HWSTAM, 0xeffe);
33823
33824 @@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
33825 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33826 int pipe;
33827
33828 - atomic_set(&dev_priv->irq_received, 0);
33829 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33830
33831 /* VLV magic */
33832 I915_WRITE(VLV_IMR, 0);
33833 @@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
33834 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33835 int pipe;
33836
33837 - atomic_set(&dev_priv->irq_received, 0);
33838 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33839
33840 for_each_pipe(pipe)
33841 I915_WRITE(PIPESTAT(pipe), 0);
33842 @@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
33843 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
33844 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
33845
33846 - atomic_inc(&dev_priv->irq_received);
33847 + atomic_inc_unchecked(&dev_priv->irq_received);
33848
33849 iir = I915_READ16(IIR);
33850 if (iir == 0)
33851 @@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
33852 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33853 int pipe;
33854
33855 - atomic_set(&dev_priv->irq_received, 0);
33856 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33857
33858 if (I915_HAS_HOTPLUG(dev)) {
33859 I915_WRITE(PORT_HOTPLUG_EN, 0);
33860 @@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
33861 };
33862 int pipe, ret = IRQ_NONE;
33863
33864 - atomic_inc(&dev_priv->irq_received);
33865 + atomic_inc_unchecked(&dev_priv->irq_received);
33866
33867 iir = I915_READ(IIR);
33868 do {
33869 @@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
33870 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33871 int pipe;
33872
33873 - atomic_set(&dev_priv->irq_received, 0);
33874 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33875
33876 I915_WRITE(PORT_HOTPLUG_EN, 0);
33877 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
33878 @@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
33879 int irq_received;
33880 int ret = IRQ_NONE, pipe;
33881
33882 - atomic_inc(&dev_priv->irq_received);
33883 + atomic_inc_unchecked(&dev_priv->irq_received);
33884
33885 iir = I915_READ(IIR);
33886
33887 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
33888 index 80aa1fc..1ede041 100644
33889 --- a/drivers/gpu/drm/i915/intel_display.c
33890 +++ b/drivers/gpu/drm/i915/intel_display.c
33891 @@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
33892
33893 wait_event(dev_priv->pending_flip_queue,
33894 atomic_read(&dev_priv->mm.wedged) ||
33895 - atomic_read(&obj->pending_flip) == 0);
33896 + atomic_read_unchecked(&obj->pending_flip) == 0);
33897
33898 /* Big Hammer, we also need to ensure that any pending
33899 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
33900 @@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
33901
33902 obj = work->old_fb_obj;
33903
33904 - atomic_clear_mask(1 << intel_crtc->plane,
33905 - &obj->pending_flip.counter);
33906 + atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
33907 wake_up(&dev_priv->pending_flip_queue);
33908
33909 queue_work(dev_priv->wq, &work->work);
33910 @@ -7490,7 +7489,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
33911 /* Block clients from rendering to the new back buffer until
33912 * the flip occurs and the object is no longer visible.
33913 */
33914 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
33915 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
33916 atomic_inc(&intel_crtc->unpin_work_count);
33917
33918 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
33919 @@ -7507,7 +7506,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
33920
33921 cleanup_pending:
33922 atomic_dec(&intel_crtc->unpin_work_count);
33923 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
33924 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
33925 drm_gem_object_unreference(&work->old_fb_obj->base);
33926 drm_gem_object_unreference(&obj->base);
33927 mutex_unlock(&dev->struct_mutex);
33928 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33929 index 54558a0..2d97005 100644
33930 --- a/drivers/gpu/drm/mga/mga_drv.h
33931 +++ b/drivers/gpu/drm/mga/mga_drv.h
33932 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33933 u32 clear_cmd;
33934 u32 maccess;
33935
33936 - atomic_t vbl_received; /**< Number of vblanks received. */
33937 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33938 wait_queue_head_t fence_queue;
33939 - atomic_t last_fence_retired;
33940 + atomic_unchecked_t last_fence_retired;
33941 u32 next_fence_to_post;
33942
33943 unsigned int fb_cpp;
33944 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33945 index 598c281..60d590e 100644
33946 --- a/drivers/gpu/drm/mga/mga_irq.c
33947 +++ b/drivers/gpu/drm/mga/mga_irq.c
33948 @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33949 if (crtc != 0)
33950 return 0;
33951
33952 - return atomic_read(&dev_priv->vbl_received);
33953 + return atomic_read_unchecked(&dev_priv->vbl_received);
33954 }
33955
33956
33957 @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33958 /* VBLANK interrupt */
33959 if (status & MGA_VLINEPEN) {
33960 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33961 - atomic_inc(&dev_priv->vbl_received);
33962 + atomic_inc_unchecked(&dev_priv->vbl_received);
33963 drm_handle_vblank(dev, 0);
33964 handled = 1;
33965 }
33966 @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33967 if ((prim_start & ~0x03) != (prim_end & ~0x03))
33968 MGA_WRITE(MGA_PRIMEND, prim_end);
33969
33970 - atomic_inc(&dev_priv->last_fence_retired);
33971 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33972 DRM_WAKEUP(&dev_priv->fence_queue);
33973 handled = 1;
33974 }
33975 @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
33976 * using fences.
33977 */
33978 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33979 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33980 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33981 - *sequence) <= (1 << 23)));
33982
33983 *sequence = cur_fence;
33984 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
33985 index 865eddf..62c4cc3 100644
33986 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
33987 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
33988 @@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
33989 struct bit_table {
33990 const char id;
33991 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
33992 -};
33993 +} __no_const;
33994
33995 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
33996
33997 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
33998 index aa89eb9..d45d38b 100644
33999 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
34000 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
34001 @@ -80,7 +80,7 @@ struct nouveau_drm {
34002 struct drm_global_reference mem_global_ref;
34003 struct ttm_bo_global_ref bo_global_ref;
34004 struct ttm_bo_device bdev;
34005 - atomic_t validate_sequence;
34006 + atomic_unchecked_t validate_sequence;
34007 int (*move)(struct nouveau_channel *,
34008 struct ttm_buffer_object *,
34009 struct ttm_mem_reg *, struct ttm_mem_reg *);
34010 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
34011 index cdb83ac..27f0a16 100644
34012 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h
34013 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
34014 @@ -43,7 +43,7 @@ struct nouveau_fence_priv {
34015 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
34016 struct nouveau_channel *);
34017 u32 (*read)(struct nouveau_channel *);
34018 -};
34019 +} __no_const;
34020
34021 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
34022
34023 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
34024 index 8bf695c..9fbc90a 100644
34025 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
34026 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
34027 @@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
34028 int trycnt = 0;
34029 int ret, i;
34030
34031 - sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
34032 + sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
34033 retry:
34034 if (++trycnt > 100000) {
34035 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
34036 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
34037 index 25d3495..d81aaf6 100644
34038 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
34039 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
34040 @@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
34041 bool can_switch;
34042
34043 spin_lock(&dev->count_lock);
34044 - can_switch = (dev->open_count == 0);
34045 + can_switch = (local_read(&dev->open_count) == 0);
34046 spin_unlock(&dev->count_lock);
34047 return can_switch;
34048 }
34049 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
34050 index d4660cf..70dbe65 100644
34051 --- a/drivers/gpu/drm/r128/r128_cce.c
34052 +++ b/drivers/gpu/drm/r128/r128_cce.c
34053 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
34054
34055 /* GH: Simple idle check.
34056 */
34057 - atomic_set(&dev_priv->idle_count, 0);
34058 + atomic_set_unchecked(&dev_priv->idle_count, 0);
34059
34060 /* We don't support anything other than bus-mastering ring mode,
34061 * but the ring can be in either AGP or PCI space for the ring
34062 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
34063 index 930c71b..499aded 100644
34064 --- a/drivers/gpu/drm/r128/r128_drv.h
34065 +++ b/drivers/gpu/drm/r128/r128_drv.h
34066 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
34067 int is_pci;
34068 unsigned long cce_buffers_offset;
34069
34070 - atomic_t idle_count;
34071 + atomic_unchecked_t idle_count;
34072
34073 int page_flipping;
34074 int current_page;
34075 u32 crtc_offset;
34076 u32 crtc_offset_cntl;
34077
34078 - atomic_t vbl_received;
34079 + atomic_unchecked_t vbl_received;
34080
34081 u32 color_fmt;
34082 unsigned int front_offset;
34083 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
34084 index 2ea4f09..d391371 100644
34085 --- a/drivers/gpu/drm/r128/r128_irq.c
34086 +++ b/drivers/gpu/drm/r128/r128_irq.c
34087 @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
34088 if (crtc != 0)
34089 return 0;
34090
34091 - return atomic_read(&dev_priv->vbl_received);
34092 + return atomic_read_unchecked(&dev_priv->vbl_received);
34093 }
34094
34095 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
34096 @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
34097 /* VBLANK interrupt */
34098 if (status & R128_CRTC_VBLANK_INT) {
34099 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
34100 - atomic_inc(&dev_priv->vbl_received);
34101 + atomic_inc_unchecked(&dev_priv->vbl_received);
34102 drm_handle_vblank(dev, 0);
34103 return IRQ_HANDLED;
34104 }
34105 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
34106 index 19bb7e6..de7e2a2 100644
34107 --- a/drivers/gpu/drm/r128/r128_state.c
34108 +++ b/drivers/gpu/drm/r128/r128_state.c
34109 @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
34110
34111 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
34112 {
34113 - if (atomic_read(&dev_priv->idle_count) == 0)
34114 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
34115 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
34116 else
34117 - atomic_set(&dev_priv->idle_count, 0);
34118 + atomic_set_unchecked(&dev_priv->idle_count, 0);
34119 }
34120
34121 #endif
34122 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
34123 index 5a82b6b..9e69c73 100644
34124 --- a/drivers/gpu/drm/radeon/mkregtable.c
34125 +++ b/drivers/gpu/drm/radeon/mkregtable.c
34126 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
34127 regex_t mask_rex;
34128 regmatch_t match[4];
34129 char buf[1024];
34130 - size_t end;
34131 + long end;
34132 int len;
34133 int done = 0;
34134 int r;
34135 unsigned o;
34136 struct offset *offset;
34137 char last_reg_s[10];
34138 - int last_reg;
34139 + unsigned long last_reg;
34140
34141 if (regcomp
34142 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
34143 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
34144 index 0d6562b..a154330 100644
34145 --- a/drivers/gpu/drm/radeon/radeon_device.c
34146 +++ b/drivers/gpu/drm/radeon/radeon_device.c
34147 @@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
34148 bool can_switch;
34149
34150 spin_lock(&dev->count_lock);
34151 - can_switch = (dev->open_count == 0);
34152 + can_switch = (local_read(&dev->open_count) == 0);
34153 spin_unlock(&dev->count_lock);
34154 return can_switch;
34155 }
34156 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
34157 index e7fdf16..f4f6490 100644
34158 --- a/drivers/gpu/drm/radeon/radeon_drv.h
34159 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
34160 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
34161
34162 /* SW interrupt */
34163 wait_queue_head_t swi_queue;
34164 - atomic_t swi_emitted;
34165 + atomic_unchecked_t swi_emitted;
34166 int vblank_crtc;
34167 uint32_t irq_enable_reg;
34168 uint32_t r500_disp_irq_reg;
34169 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
34170 index c180df8..cd80dd2d 100644
34171 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
34172 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
34173 @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
34174 request = compat_alloc_user_space(sizeof(*request));
34175 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
34176 || __put_user(req32.param, &request->param)
34177 - || __put_user((void __user *)(unsigned long)req32.value,
34178 + || __put_user((unsigned long)req32.value,
34179 &request->value))
34180 return -EFAULT;
34181
34182 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
34183 index e771033..a0bc6b3 100644
34184 --- a/drivers/gpu/drm/radeon/radeon_irq.c
34185 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
34186 @@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
34187 unsigned int ret;
34188 RING_LOCALS;
34189
34190 - atomic_inc(&dev_priv->swi_emitted);
34191 - ret = atomic_read(&dev_priv->swi_emitted);
34192 + atomic_inc_unchecked(&dev_priv->swi_emitted);
34193 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
34194
34195 BEGIN_RING(4);
34196 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
34197 @@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
34198 drm_radeon_private_t *dev_priv =
34199 (drm_radeon_private_t *) dev->dev_private;
34200
34201 - atomic_set(&dev_priv->swi_emitted, 0);
34202 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
34203 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
34204
34205 dev->max_vblank_count = 0x001fffff;
34206 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
34207 index 8e9057b..af6dacb 100644
34208 --- a/drivers/gpu/drm/radeon/radeon_state.c
34209 +++ b/drivers/gpu/drm/radeon/radeon_state.c
34210 @@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
34211 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
34212 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
34213
34214 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
34215 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
34216 sarea_priv->nbox * sizeof(depth_boxes[0])))
34217 return -EFAULT;
34218
34219 @@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
34220 {
34221 drm_radeon_private_t *dev_priv = dev->dev_private;
34222 drm_radeon_getparam_t *param = data;
34223 - int value;
34224 + int value = 0;
34225
34226 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
34227
34228 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
34229 index 93f760e..33d9839 100644
34230 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
34231 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
34232 @@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
34233 man->size = size >> PAGE_SHIFT;
34234 }
34235
34236 -static struct vm_operations_struct radeon_ttm_vm_ops;
34237 +static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
34238 static const struct vm_operations_struct *ttm_vm_ops = NULL;
34239
34240 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34241 @@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
34242 }
34243 if (unlikely(ttm_vm_ops == NULL)) {
34244 ttm_vm_ops = vma->vm_ops;
34245 + pax_open_kernel();
34246 radeon_ttm_vm_ops = *ttm_vm_ops;
34247 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
34248 + pax_close_kernel();
34249 }
34250 vma->vm_ops = &radeon_ttm_vm_ops;
34251 return 0;
34252 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
34253 index 5706d2a..17aedaa 100644
34254 --- a/drivers/gpu/drm/radeon/rs690.c
34255 +++ b/drivers/gpu/drm/radeon/rs690.c
34256 @@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
34257 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
34258 rdev->pm.sideport_bandwidth.full)
34259 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
34260 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
34261 + read_delay_latency.full = dfixed_const(800 * 1000);
34262 read_delay_latency.full = dfixed_div(read_delay_latency,
34263 rdev->pm.igp_sideport_mclk);
34264 + a.full = dfixed_const(370);
34265 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
34266 } else {
34267 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
34268 rdev->pm.k8_bandwidth.full)
34269 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
34270 index bd2a3b4..122d9ad 100644
34271 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
34272 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
34273 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
34274 static int ttm_pool_mm_shrink(struct shrinker *shrink,
34275 struct shrink_control *sc)
34276 {
34277 - static atomic_t start_pool = ATOMIC_INIT(0);
34278 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
34279 unsigned i;
34280 - unsigned pool_offset = atomic_add_return(1, &start_pool);
34281 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
34282 struct ttm_page_pool *pool;
34283 int shrink_pages = sc->nr_to_scan;
34284
34285 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34286 index 893a650..6190d3b 100644
34287 --- a/drivers/gpu/drm/via/via_drv.h
34288 +++ b/drivers/gpu/drm/via/via_drv.h
34289 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34290 typedef uint32_t maskarray_t[5];
34291
34292 typedef struct drm_via_irq {
34293 - atomic_t irq_received;
34294 + atomic_unchecked_t irq_received;
34295 uint32_t pending_mask;
34296 uint32_t enable_mask;
34297 wait_queue_head_t irq_queue;
34298 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
34299 struct timeval last_vblank;
34300 int last_vblank_valid;
34301 unsigned usec_per_vblank;
34302 - atomic_t vbl_received;
34303 + atomic_unchecked_t vbl_received;
34304 drm_via_state_t hc_state;
34305 char pci_buf[VIA_PCI_BUF_SIZE];
34306 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34307 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34308 index ac98964..5dbf512 100644
34309 --- a/drivers/gpu/drm/via/via_irq.c
34310 +++ b/drivers/gpu/drm/via/via_irq.c
34311 @@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34312 if (crtc != 0)
34313 return 0;
34314
34315 - return atomic_read(&dev_priv->vbl_received);
34316 + return atomic_read_unchecked(&dev_priv->vbl_received);
34317 }
34318
34319 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34320 @@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34321
34322 status = VIA_READ(VIA_REG_INTERRUPT);
34323 if (status & VIA_IRQ_VBLANK_PENDING) {
34324 - atomic_inc(&dev_priv->vbl_received);
34325 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34326 + atomic_inc_unchecked(&dev_priv->vbl_received);
34327 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34328 do_gettimeofday(&cur_vblank);
34329 if (dev_priv->last_vblank_valid) {
34330 dev_priv->usec_per_vblank =
34331 @@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34332 dev_priv->last_vblank = cur_vblank;
34333 dev_priv->last_vblank_valid = 1;
34334 }
34335 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34336 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34337 DRM_DEBUG("US per vblank is: %u\n",
34338 dev_priv->usec_per_vblank);
34339 }
34340 @@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34341
34342 for (i = 0; i < dev_priv->num_irqs; ++i) {
34343 if (status & cur_irq->pending_mask) {
34344 - atomic_inc(&cur_irq->irq_received);
34345 + atomic_inc_unchecked(&cur_irq->irq_received);
34346 DRM_WAKEUP(&cur_irq->irq_queue);
34347 handled = 1;
34348 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
34349 @@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
34350 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34351 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34352 masks[irq][4]));
34353 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34354 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34355 } else {
34356 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34357 (((cur_irq_sequence =
34358 - atomic_read(&cur_irq->irq_received)) -
34359 + atomic_read_unchecked(&cur_irq->irq_received)) -
34360 *sequence) <= (1 << 23)));
34361 }
34362 *sequence = cur_irq_sequence;
34363 @@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
34364 }
34365
34366 for (i = 0; i < dev_priv->num_irqs; ++i) {
34367 - atomic_set(&cur_irq->irq_received, 0);
34368 + atomic_set_unchecked(&cur_irq->irq_received, 0);
34369 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34370 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34371 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34372 @@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34373 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34374 case VIA_IRQ_RELATIVE:
34375 irqwait->request.sequence +=
34376 - atomic_read(&cur_irq->irq_received);
34377 + atomic_read_unchecked(&cur_irq->irq_received);
34378 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34379 case VIA_IRQ_ABSOLUTE:
34380 break;
34381 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
34382 index 13aeda7..4a952d1 100644
34383 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
34384 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
34385 @@ -290,7 +290,7 @@ struct vmw_private {
34386 * Fencing and IRQs.
34387 */
34388
34389 - atomic_t marker_seq;
34390 + atomic_unchecked_t marker_seq;
34391 wait_queue_head_t fence_queue;
34392 wait_queue_head_t fifo_queue;
34393 int fence_queue_waiters; /* Protected by hw_mutex */
34394 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
34395 index 3eb1486..0a47ee9 100644
34396 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
34397 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
34398 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
34399 (unsigned int) min,
34400 (unsigned int) fifo->capabilities);
34401
34402 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
34403 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
34404 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
34405 vmw_marker_queue_init(&fifo->marker_queue);
34406 return vmw_fifo_send_fence(dev_priv, &dummy);
34407 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
34408 if (reserveable)
34409 iowrite32(bytes, fifo_mem +
34410 SVGA_FIFO_RESERVED);
34411 - return fifo_mem + (next_cmd >> 2);
34412 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
34413 } else {
34414 need_bounce = true;
34415 }
34416 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
34417
34418 fm = vmw_fifo_reserve(dev_priv, bytes);
34419 if (unlikely(fm == NULL)) {
34420 - *seqno = atomic_read(&dev_priv->marker_seq);
34421 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
34422 ret = -ENOMEM;
34423 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
34424 false, 3*HZ);
34425 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
34426 }
34427
34428 do {
34429 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
34430 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
34431 } while (*seqno == 0);
34432
34433 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
34434 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
34435 index 4640adb..e1384ed 100644
34436 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
34437 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
34438 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
34439 * emitted. Then the fence is stale and signaled.
34440 */
34441
34442 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
34443 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
34444 > VMW_FENCE_WRAP);
34445
34446 return ret;
34447 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
34448
34449 if (fifo_idle)
34450 down_read(&fifo_state->rwsem);
34451 - signal_seq = atomic_read(&dev_priv->marker_seq);
34452 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
34453 ret = 0;
34454
34455 for (;;) {
34456 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
34457 index 8a8725c..afed796 100644
34458 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
34459 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
34460 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
34461 while (!vmw_lag_lt(queue, us)) {
34462 spin_lock(&queue->lock);
34463 if (list_empty(&queue->head))
34464 - seqno = atomic_read(&dev_priv->marker_seq);
34465 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
34466 else {
34467 marker = list_first_entry(&queue->head,
34468 struct vmw_marker, head);
34469 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34470 index ceb3040..6160c5c 100644
34471 --- a/drivers/hid/hid-core.c
34472 +++ b/drivers/hid/hid-core.c
34473 @@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
34474
34475 int hid_add_device(struct hid_device *hdev)
34476 {
34477 - static atomic_t id = ATOMIC_INIT(0);
34478 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34479 int ret;
34480
34481 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34482 @@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
34483 /* XXX hack, any other cleaner solution after the driver core
34484 * is converted to allow more than 20 bytes as the device name? */
34485 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34486 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34487 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34488
34489 hid_debug_register(hdev, dev_name(&hdev->dev));
34490 ret = device_add(&hdev->dev);
34491 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
34492 index eec3291..8ed706b 100644
34493 --- a/drivers/hid/hid-wiimote-debug.c
34494 +++ b/drivers/hid/hid-wiimote-debug.c
34495 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
34496 else if (size == 0)
34497 return -EIO;
34498
34499 - if (copy_to_user(u, buf, size))
34500 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
34501 return -EFAULT;
34502
34503 *off += size;
34504 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
34505 index 773a2f2..7ce08bc 100644
34506 --- a/drivers/hv/channel.c
34507 +++ b/drivers/hv/channel.c
34508 @@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
34509 int ret = 0;
34510 int t;
34511
34512 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34513 - atomic_inc(&vmbus_connection.next_gpadl_handle);
34514 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34515 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34516
34517 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34518 if (ret)
34519 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
34520 index 3648f8f..30ef30d 100644
34521 --- a/drivers/hv/hv.c
34522 +++ b/drivers/hv/hv.c
34523 @@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
34524 u64 output_address = (output) ? virt_to_phys(output) : 0;
34525 u32 output_address_hi = output_address >> 32;
34526 u32 output_address_lo = output_address & 0xFFFFFFFF;
34527 - void *hypercall_page = hv_context.hypercall_page;
34528 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34529
34530 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34531 "=a"(hv_status_lo) : "d" (control_hi),
34532 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
34533 index d8d1fad..b91caf7 100644
34534 --- a/drivers/hv/hyperv_vmbus.h
34535 +++ b/drivers/hv/hyperv_vmbus.h
34536 @@ -594,7 +594,7 @@ enum vmbus_connect_state {
34537 struct vmbus_connection {
34538 enum vmbus_connect_state conn_state;
34539
34540 - atomic_t next_gpadl_handle;
34541 + atomic_unchecked_t next_gpadl_handle;
34542
34543 /*
34544 * Represents channel interrupts. Each bit position represents a
34545 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
34546 index 8e1a9ec..4687821 100644
34547 --- a/drivers/hv/vmbus_drv.c
34548 +++ b/drivers/hv/vmbus_drv.c
34549 @@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
34550 {
34551 int ret = 0;
34552
34553 - static atomic_t device_num = ATOMIC_INIT(0);
34554 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34555
34556 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34557 - atomic_inc_return(&device_num));
34558 + atomic_inc_return_unchecked(&device_num));
34559
34560 child_device_obj->device.bus = &hv_bus;
34561 child_device_obj->device.parent = &hv_acpi_dev->dev;
34562 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
34563 index d64923d..72591e8 100644
34564 --- a/drivers/hwmon/coretemp.c
34565 +++ b/drivers/hwmon/coretemp.c
34566 @@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
34567 return NOTIFY_OK;
34568 }
34569
34570 -static struct notifier_block coretemp_cpu_notifier __refdata = {
34571 +static struct notifier_block coretemp_cpu_notifier = {
34572 .notifier_call = coretemp_cpu_callback,
34573 };
34574
34575 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34576 index 1c85d39..55ed3cf 100644
34577 --- a/drivers/hwmon/sht15.c
34578 +++ b/drivers/hwmon/sht15.c
34579 @@ -169,7 +169,7 @@ struct sht15_data {
34580 int supply_uV;
34581 bool supply_uV_valid;
34582 struct work_struct update_supply_work;
34583 - atomic_t interrupt_handled;
34584 + atomic_unchecked_t interrupt_handled;
34585 };
34586
34587 /**
34588 @@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
34589 return ret;
34590
34591 gpio_direction_input(data->pdata->gpio_data);
34592 - atomic_set(&data->interrupt_handled, 0);
34593 + atomic_set_unchecked(&data->interrupt_handled, 0);
34594
34595 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34596 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34597 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34598 /* Only relevant if the interrupt hasn't occurred. */
34599 - if (!atomic_read(&data->interrupt_handled))
34600 + if (!atomic_read_unchecked(&data->interrupt_handled))
34601 schedule_work(&data->read_work);
34602 }
34603 ret = wait_event_timeout(data->wait_queue,
34604 @@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34605
34606 /* First disable the interrupt */
34607 disable_irq_nosync(irq);
34608 - atomic_inc(&data->interrupt_handled);
34609 + atomic_inc_unchecked(&data->interrupt_handled);
34610 /* Then schedule a reading work struct */
34611 if (data->state != SHT15_READING_NOTHING)
34612 schedule_work(&data->read_work);
34613 @@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34614 * If not, then start the interrupt again - care here as could
34615 * have gone low in meantime so verify it hasn't!
34616 */
34617 - atomic_set(&data->interrupt_handled, 0);
34618 + atomic_set_unchecked(&data->interrupt_handled, 0);
34619 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34620 /* If still not occurred or another handler was scheduled */
34621 if (gpio_get_value(data->pdata->gpio_data)
34622 - || atomic_read(&data->interrupt_handled))
34623 + || atomic_read_unchecked(&data->interrupt_handled))
34624 return;
34625 }
34626
34627 diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
34628 index 76f157b..9c0db1b 100644
34629 --- a/drivers/hwmon/via-cputemp.c
34630 +++ b/drivers/hwmon/via-cputemp.c
34631 @@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
34632 return NOTIFY_OK;
34633 }
34634
34635 -static struct notifier_block via_cputemp_cpu_notifier __refdata = {
34636 +static struct notifier_block via_cputemp_cpu_notifier = {
34637 .notifier_call = via_cputemp_cpu_callback,
34638 };
34639
34640 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34641 index 378fcb5..5e91fa8 100644
34642 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34643 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34644 @@ -43,7 +43,7 @@
34645 extern struct i2c_adapter amd756_smbus;
34646
34647 static struct i2c_adapter *s4882_adapter;
34648 -static struct i2c_algorithm *s4882_algo;
34649 +static i2c_algorithm_no_const *s4882_algo;
34650
34651 /* Wrapper access functions for multiplexed SMBus */
34652 static DEFINE_MUTEX(amd756_lock);
34653 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34654 index 29015eb..af2d8e9 100644
34655 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34656 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34657 @@ -41,7 +41,7 @@
34658 extern struct i2c_adapter *nforce2_smbus;
34659
34660 static struct i2c_adapter *s4985_adapter;
34661 -static struct i2c_algorithm *s4985_algo;
34662 +static i2c_algorithm_no_const *s4985_algo;
34663
34664 /* Wrapper access functions for multiplexed SMBus */
34665 static DEFINE_MUTEX(nforce2_lock);
34666 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34667 index 8126824..55a2798 100644
34668 --- a/drivers/ide/ide-cd.c
34669 +++ b/drivers/ide/ide-cd.c
34670 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34671 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34672 if ((unsigned long)buf & alignment
34673 || blk_rq_bytes(rq) & q->dma_pad_mask
34674 - || object_is_on_stack(buf))
34675 + || object_starts_on_stack(buf))
34676 drive->dma = 0;
34677 }
34678 }
34679 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34680 index 394fea2..c833880 100644
34681 --- a/drivers/infiniband/core/cm.c
34682 +++ b/drivers/infiniband/core/cm.c
34683 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34684
34685 struct cm_counter_group {
34686 struct kobject obj;
34687 - atomic_long_t counter[CM_ATTR_COUNT];
34688 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34689 };
34690
34691 struct cm_counter_attribute {
34692 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34693 struct ib_mad_send_buf *msg = NULL;
34694 int ret;
34695
34696 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34697 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34698 counter[CM_REQ_COUNTER]);
34699
34700 /* Quick state check to discard duplicate REQs. */
34701 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34702 if (!cm_id_priv)
34703 return;
34704
34705 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34706 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34707 counter[CM_REP_COUNTER]);
34708 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34709 if (ret)
34710 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
34711 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34712 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34713 spin_unlock_irq(&cm_id_priv->lock);
34714 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34715 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34716 counter[CM_RTU_COUNTER]);
34717 goto out;
34718 }
34719 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
34720 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34721 dreq_msg->local_comm_id);
34722 if (!cm_id_priv) {
34723 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34724 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34725 counter[CM_DREQ_COUNTER]);
34726 cm_issue_drep(work->port, work->mad_recv_wc);
34727 return -EINVAL;
34728 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
34729 case IB_CM_MRA_REP_RCVD:
34730 break;
34731 case IB_CM_TIMEWAIT:
34732 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34733 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34734 counter[CM_DREQ_COUNTER]);
34735 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34736 goto unlock;
34737 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
34738 cm_free_msg(msg);
34739 goto deref;
34740 case IB_CM_DREQ_RCVD:
34741 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34742 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34743 counter[CM_DREQ_COUNTER]);
34744 goto unlock;
34745 default:
34746 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
34747 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34748 cm_id_priv->msg, timeout)) {
34749 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34750 - atomic_long_inc(&work->port->
34751 + atomic_long_inc_unchecked(&work->port->
34752 counter_group[CM_RECV_DUPLICATES].
34753 counter[CM_MRA_COUNTER]);
34754 goto out;
34755 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
34756 break;
34757 case IB_CM_MRA_REQ_RCVD:
34758 case IB_CM_MRA_REP_RCVD:
34759 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34760 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34761 counter[CM_MRA_COUNTER]);
34762 /* fall through */
34763 default:
34764 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
34765 case IB_CM_LAP_IDLE:
34766 break;
34767 case IB_CM_MRA_LAP_SENT:
34768 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34769 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34770 counter[CM_LAP_COUNTER]);
34771 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34772 goto unlock;
34773 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
34774 cm_free_msg(msg);
34775 goto deref;
34776 case IB_CM_LAP_RCVD:
34777 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34778 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34779 counter[CM_LAP_COUNTER]);
34780 goto unlock;
34781 default:
34782 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34783 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34784 if (cur_cm_id_priv) {
34785 spin_unlock_irq(&cm.lock);
34786 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34787 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34788 counter[CM_SIDR_REQ_COUNTER]);
34789 goto out; /* Duplicate message. */
34790 }
34791 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34792 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34793 msg->retries = 1;
34794
34795 - atomic_long_add(1 + msg->retries,
34796 + atomic_long_add_unchecked(1 + msg->retries,
34797 &port->counter_group[CM_XMIT].counter[attr_index]);
34798 if (msg->retries)
34799 - atomic_long_add(msg->retries,
34800 + atomic_long_add_unchecked(msg->retries,
34801 &port->counter_group[CM_XMIT_RETRIES].
34802 counter[attr_index]);
34803
34804 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34805 }
34806
34807 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34808 - atomic_long_inc(&port->counter_group[CM_RECV].
34809 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34810 counter[attr_id - CM_ATTR_ID_OFFSET]);
34811
34812 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34813 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34814 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34815
34816 return sprintf(buf, "%ld\n",
34817 - atomic_long_read(&group->counter[cm_attr->index]));
34818 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34819 }
34820
34821 static const struct sysfs_ops cm_counter_ops = {
34822 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34823 index 176c8f9..2627b62 100644
34824 --- a/drivers/infiniband/core/fmr_pool.c
34825 +++ b/drivers/infiniband/core/fmr_pool.c
34826 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
34827
34828 struct task_struct *thread;
34829
34830 - atomic_t req_ser;
34831 - atomic_t flush_ser;
34832 + atomic_unchecked_t req_ser;
34833 + atomic_unchecked_t flush_ser;
34834
34835 wait_queue_head_t force_wait;
34836 };
34837 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34838 struct ib_fmr_pool *pool = pool_ptr;
34839
34840 do {
34841 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34842 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34843 ib_fmr_batch_release(pool);
34844
34845 - atomic_inc(&pool->flush_ser);
34846 + atomic_inc_unchecked(&pool->flush_ser);
34847 wake_up_interruptible(&pool->force_wait);
34848
34849 if (pool->flush_function)
34850 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34851 }
34852
34853 set_current_state(TASK_INTERRUPTIBLE);
34854 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34855 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34856 !kthread_should_stop())
34857 schedule();
34858 __set_current_state(TASK_RUNNING);
34859 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34860 pool->dirty_watermark = params->dirty_watermark;
34861 pool->dirty_len = 0;
34862 spin_lock_init(&pool->pool_lock);
34863 - atomic_set(&pool->req_ser, 0);
34864 - atomic_set(&pool->flush_ser, 0);
34865 + atomic_set_unchecked(&pool->req_ser, 0);
34866 + atomic_set_unchecked(&pool->flush_ser, 0);
34867 init_waitqueue_head(&pool->force_wait);
34868
34869 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34870 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34871 }
34872 spin_unlock_irq(&pool->pool_lock);
34873
34874 - serial = atomic_inc_return(&pool->req_ser);
34875 + serial = atomic_inc_return_unchecked(&pool->req_ser);
34876 wake_up_process(pool->thread);
34877
34878 if (wait_event_interruptible(pool->force_wait,
34879 - atomic_read(&pool->flush_ser) - serial >= 0))
34880 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34881 return -EINTR;
34882
34883 return 0;
34884 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34885 } else {
34886 list_add_tail(&fmr->list, &pool->dirty_list);
34887 if (++pool->dirty_len >= pool->dirty_watermark) {
34888 - atomic_inc(&pool->req_ser);
34889 + atomic_inc_unchecked(&pool->req_ser);
34890 wake_up_process(pool->thread);
34891 }
34892 }
34893 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
34894 index afd8179..598063f 100644
34895 --- a/drivers/infiniband/hw/cxgb4/mem.c
34896 +++ b/drivers/infiniband/hw/cxgb4/mem.c
34897 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
34898 int err;
34899 struct fw_ri_tpte tpt;
34900 u32 stag_idx;
34901 - static atomic_t key;
34902 + static atomic_unchecked_t key;
34903
34904 if (c4iw_fatal_error(rdev))
34905 return -EIO;
34906 @@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
34907 if (rdev->stats.stag.cur > rdev->stats.stag.max)
34908 rdev->stats.stag.max = rdev->stats.stag.cur;
34909 mutex_unlock(&rdev->stats.lock);
34910 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
34911 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
34912 }
34913 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
34914 __func__, stag_state, type, pdid, stag_idx);
34915 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
34916 index 79b3dbc..96e5fcc 100644
34917 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
34918 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
34919 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
34920 struct ib_atomic_eth *ateth;
34921 struct ipath_ack_entry *e;
34922 u64 vaddr;
34923 - atomic64_t *maddr;
34924 + atomic64_unchecked_t *maddr;
34925 u64 sdata;
34926 u32 rkey;
34927 u8 next;
34928 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
34929 IB_ACCESS_REMOTE_ATOMIC)))
34930 goto nack_acc_unlck;
34931 /* Perform atomic OP and save result. */
34932 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
34933 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
34934 sdata = be64_to_cpu(ateth->swap_data);
34935 e = &qp->s_ack_queue[qp->r_head_ack_queue];
34936 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
34937 - (u64) atomic64_add_return(sdata, maddr) - sdata :
34938 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
34939 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
34940 be64_to_cpu(ateth->compare_data),
34941 sdata);
34942 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
34943 index 1f95bba..9530f87 100644
34944 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
34945 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
34946 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
34947 unsigned long flags;
34948 struct ib_wc wc;
34949 u64 sdata;
34950 - atomic64_t *maddr;
34951 + atomic64_unchecked_t *maddr;
34952 enum ib_wc_status send_status;
34953
34954 /*
34955 @@ -382,11 +382,11 @@ again:
34956 IB_ACCESS_REMOTE_ATOMIC)))
34957 goto acc_err;
34958 /* Perform atomic OP and save result. */
34959 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
34960 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
34961 sdata = wqe->wr.wr.atomic.compare_add;
34962 *(u64 *) sqp->s_sge.sge.vaddr =
34963 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
34964 - (u64) atomic64_add_return(sdata, maddr) - sdata :
34965 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
34966 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
34967 sdata, wqe->wr.wr.atomic.swap);
34968 goto send_comp;
34969 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34970 index 5b152a3..c1f3e83 100644
34971 --- a/drivers/infiniband/hw/nes/nes.c
34972 +++ b/drivers/infiniband/hw/nes/nes.c
34973 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34974 LIST_HEAD(nes_adapter_list);
34975 static LIST_HEAD(nes_dev_list);
34976
34977 -atomic_t qps_destroyed;
34978 +atomic_unchecked_t qps_destroyed;
34979
34980 static unsigned int ee_flsh_adapter;
34981 static unsigned int sysfs_nonidx_addr;
34982 @@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34983 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
34984 struct nes_adapter *nesadapter = nesdev->nesadapter;
34985
34986 - atomic_inc(&qps_destroyed);
34987 + atomic_inc_unchecked(&qps_destroyed);
34988
34989 /* Free the control structures */
34990
34991 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34992 index 33cc589..3bd6538 100644
34993 --- a/drivers/infiniband/hw/nes/nes.h
34994 +++ b/drivers/infiniband/hw/nes/nes.h
34995 @@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
34996 extern unsigned int wqm_quanta;
34997 extern struct list_head nes_adapter_list;
34998
34999 -extern atomic_t cm_connects;
35000 -extern atomic_t cm_accepts;
35001 -extern atomic_t cm_disconnects;
35002 -extern atomic_t cm_closes;
35003 -extern atomic_t cm_connecteds;
35004 -extern atomic_t cm_connect_reqs;
35005 -extern atomic_t cm_rejects;
35006 -extern atomic_t mod_qp_timouts;
35007 -extern atomic_t qps_created;
35008 -extern atomic_t qps_destroyed;
35009 -extern atomic_t sw_qps_destroyed;
35010 +extern atomic_unchecked_t cm_connects;
35011 +extern atomic_unchecked_t cm_accepts;
35012 +extern atomic_unchecked_t cm_disconnects;
35013 +extern atomic_unchecked_t cm_closes;
35014 +extern atomic_unchecked_t cm_connecteds;
35015 +extern atomic_unchecked_t cm_connect_reqs;
35016 +extern atomic_unchecked_t cm_rejects;
35017 +extern atomic_unchecked_t mod_qp_timouts;
35018 +extern atomic_unchecked_t qps_created;
35019 +extern atomic_unchecked_t qps_destroyed;
35020 +extern atomic_unchecked_t sw_qps_destroyed;
35021 extern u32 mh_detected;
35022 extern u32 mh_pauses_sent;
35023 extern u32 cm_packets_sent;
35024 @@ -196,16 +196,16 @@ extern u32 cm_packets_created;
35025 extern u32 cm_packets_received;
35026 extern u32 cm_packets_dropped;
35027 extern u32 cm_packets_retrans;
35028 -extern atomic_t cm_listens_created;
35029 -extern atomic_t cm_listens_destroyed;
35030 +extern atomic_unchecked_t cm_listens_created;
35031 +extern atomic_unchecked_t cm_listens_destroyed;
35032 extern u32 cm_backlog_drops;
35033 -extern atomic_t cm_loopbacks;
35034 -extern atomic_t cm_nodes_created;
35035 -extern atomic_t cm_nodes_destroyed;
35036 -extern atomic_t cm_accel_dropped_pkts;
35037 -extern atomic_t cm_resets_recvd;
35038 -extern atomic_t pau_qps_created;
35039 -extern atomic_t pau_qps_destroyed;
35040 +extern atomic_unchecked_t cm_loopbacks;
35041 +extern atomic_unchecked_t cm_nodes_created;
35042 +extern atomic_unchecked_t cm_nodes_destroyed;
35043 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35044 +extern atomic_unchecked_t cm_resets_recvd;
35045 +extern atomic_unchecked_t pau_qps_created;
35046 +extern atomic_unchecked_t pau_qps_destroyed;
35047
35048 extern u32 int_mod_timer_init;
35049 extern u32 int_mod_cq_depth_256;
35050 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35051 index 22ea67e..dcbe3bc 100644
35052 --- a/drivers/infiniband/hw/nes/nes_cm.c
35053 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35054 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
35055 u32 cm_packets_retrans;
35056 u32 cm_packets_created;
35057 u32 cm_packets_received;
35058 -atomic_t cm_listens_created;
35059 -atomic_t cm_listens_destroyed;
35060 +atomic_unchecked_t cm_listens_created;
35061 +atomic_unchecked_t cm_listens_destroyed;
35062 u32 cm_backlog_drops;
35063 -atomic_t cm_loopbacks;
35064 -atomic_t cm_nodes_created;
35065 -atomic_t cm_nodes_destroyed;
35066 -atomic_t cm_accel_dropped_pkts;
35067 -atomic_t cm_resets_recvd;
35068 +atomic_unchecked_t cm_loopbacks;
35069 +atomic_unchecked_t cm_nodes_created;
35070 +atomic_unchecked_t cm_nodes_destroyed;
35071 +atomic_unchecked_t cm_accel_dropped_pkts;
35072 +atomic_unchecked_t cm_resets_recvd;
35073
35074 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
35075 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
35076 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
35077
35078 static struct nes_cm_core *g_cm_core;
35079
35080 -atomic_t cm_connects;
35081 -atomic_t cm_accepts;
35082 -atomic_t cm_disconnects;
35083 -atomic_t cm_closes;
35084 -atomic_t cm_connecteds;
35085 -atomic_t cm_connect_reqs;
35086 -atomic_t cm_rejects;
35087 +atomic_unchecked_t cm_connects;
35088 +atomic_unchecked_t cm_accepts;
35089 +atomic_unchecked_t cm_disconnects;
35090 +atomic_unchecked_t cm_closes;
35091 +atomic_unchecked_t cm_connecteds;
35092 +atomic_unchecked_t cm_connect_reqs;
35093 +atomic_unchecked_t cm_rejects;
35094
35095 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
35096 {
35097 @@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
35098 kfree(listener);
35099 listener = NULL;
35100 ret = 0;
35101 - atomic_inc(&cm_listens_destroyed);
35102 + atomic_inc_unchecked(&cm_listens_destroyed);
35103 } else {
35104 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
35105 }
35106 @@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35107 cm_node->rem_mac);
35108
35109 add_hte_node(cm_core, cm_node);
35110 - atomic_inc(&cm_nodes_created);
35111 + atomic_inc_unchecked(&cm_nodes_created);
35112
35113 return cm_node;
35114 }
35115 @@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35116 }
35117
35118 atomic_dec(&cm_core->node_cnt);
35119 - atomic_inc(&cm_nodes_destroyed);
35120 + atomic_inc_unchecked(&cm_nodes_destroyed);
35121 nesqp = cm_node->nesqp;
35122 if (nesqp) {
35123 nesqp->cm_node = NULL;
35124 @@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35125
35126 static void drop_packet(struct sk_buff *skb)
35127 {
35128 - atomic_inc(&cm_accel_dropped_pkts);
35129 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35130 dev_kfree_skb_any(skb);
35131 }
35132
35133 @@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35134 {
35135
35136 int reset = 0; /* whether to send reset in case of err.. */
35137 - atomic_inc(&cm_resets_recvd);
35138 + atomic_inc_unchecked(&cm_resets_recvd);
35139 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35140 " refcnt=%d\n", cm_node, cm_node->state,
35141 atomic_read(&cm_node->ref_count));
35142 @@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35143 rem_ref_cm_node(cm_node->cm_core, cm_node);
35144 return NULL;
35145 }
35146 - atomic_inc(&cm_loopbacks);
35147 + atomic_inc_unchecked(&cm_loopbacks);
35148 loopbackremotenode->loopbackpartner = cm_node;
35149 loopbackremotenode->tcp_cntxt.rcv_wscale =
35150 NES_CM_DEFAULT_RCV_WND_SCALE;
35151 @@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35152 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
35153 else {
35154 rem_ref_cm_node(cm_core, cm_node);
35155 - atomic_inc(&cm_accel_dropped_pkts);
35156 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35157 dev_kfree_skb_any(skb);
35158 }
35159 break;
35160 @@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35161
35162 if ((cm_id) && (cm_id->event_handler)) {
35163 if (issue_disconn) {
35164 - atomic_inc(&cm_disconnects);
35165 + atomic_inc_unchecked(&cm_disconnects);
35166 cm_event.event = IW_CM_EVENT_DISCONNECT;
35167 cm_event.status = disconn_status;
35168 cm_event.local_addr = cm_id->local_addr;
35169 @@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35170 }
35171
35172 if (issue_close) {
35173 - atomic_inc(&cm_closes);
35174 + atomic_inc_unchecked(&cm_closes);
35175 nes_disconnect(nesqp, 1);
35176
35177 cm_id->provider_data = nesqp;
35178 @@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35179
35180 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35181 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35182 - atomic_inc(&cm_accepts);
35183 + atomic_inc_unchecked(&cm_accepts);
35184
35185 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35186 netdev_refcnt_read(nesvnic->netdev));
35187 @@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35188 struct nes_cm_core *cm_core;
35189 u8 *start_buff;
35190
35191 - atomic_inc(&cm_rejects);
35192 + atomic_inc_unchecked(&cm_rejects);
35193 cm_node = (struct nes_cm_node *)cm_id->provider_data;
35194 loopback = cm_node->loopbackpartner;
35195 cm_core = cm_node->cm_core;
35196 @@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35197 ntohl(cm_id->local_addr.sin_addr.s_addr),
35198 ntohs(cm_id->local_addr.sin_port));
35199
35200 - atomic_inc(&cm_connects);
35201 + atomic_inc_unchecked(&cm_connects);
35202 nesqp->active_conn = 1;
35203
35204 /* cache the cm_id in the qp */
35205 @@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
35206 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
35207 return err;
35208 }
35209 - atomic_inc(&cm_listens_created);
35210 + atomic_inc_unchecked(&cm_listens_created);
35211 }
35212
35213 cm_id->add_ref(cm_id);
35214 @@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35215
35216 if (nesqp->destroyed)
35217 return;
35218 - atomic_inc(&cm_connecteds);
35219 + atomic_inc_unchecked(&cm_connecteds);
35220 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35221 " local port 0x%04X. jiffies = %lu.\n",
35222 nesqp->hwqp.qp_id,
35223 @@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35224
35225 cm_id->add_ref(cm_id);
35226 ret = cm_id->event_handler(cm_id, &cm_event);
35227 - atomic_inc(&cm_closes);
35228 + atomic_inc_unchecked(&cm_closes);
35229 cm_event.event = IW_CM_EVENT_CLOSE;
35230 cm_event.status = 0;
35231 cm_event.provider_data = cm_id->provider_data;
35232 @@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35233 return;
35234 cm_id = cm_node->cm_id;
35235
35236 - atomic_inc(&cm_connect_reqs);
35237 + atomic_inc_unchecked(&cm_connect_reqs);
35238 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35239 cm_node, cm_id, jiffies);
35240
35241 @@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35242 return;
35243 cm_id = cm_node->cm_id;
35244
35245 - atomic_inc(&cm_connect_reqs);
35246 + atomic_inc_unchecked(&cm_connect_reqs);
35247 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35248 cm_node, cm_id, jiffies);
35249
35250 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
35251 index 4166452..fc952c3 100644
35252 --- a/drivers/infiniband/hw/nes/nes_mgt.c
35253 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
35254 @@ -40,8 +40,8 @@
35255 #include "nes.h"
35256 #include "nes_mgt.h"
35257
35258 -atomic_t pau_qps_created;
35259 -atomic_t pau_qps_destroyed;
35260 +atomic_unchecked_t pau_qps_created;
35261 +atomic_unchecked_t pau_qps_destroyed;
35262
35263 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
35264 {
35265 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
35266 {
35267 struct sk_buff *skb;
35268 unsigned long flags;
35269 - atomic_inc(&pau_qps_destroyed);
35270 + atomic_inc_unchecked(&pau_qps_destroyed);
35271
35272 /* Free packets that have not yet been forwarded */
35273 /* Lock is acquired by skb_dequeue when removing the skb */
35274 @@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
35275 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
35276 skb_queue_head_init(&nesqp->pau_list);
35277 spin_lock_init(&nesqp->pau_lock);
35278 - atomic_inc(&pau_qps_created);
35279 + atomic_inc_unchecked(&pau_qps_created);
35280 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
35281 }
35282
35283 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35284 index 9542e16..a008c40 100644
35285 --- a/drivers/infiniband/hw/nes/nes_nic.c
35286 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35287 @@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35288 target_stat_values[++index] = mh_detected;
35289 target_stat_values[++index] = mh_pauses_sent;
35290 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35291 - target_stat_values[++index] = atomic_read(&cm_connects);
35292 - target_stat_values[++index] = atomic_read(&cm_accepts);
35293 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35294 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35295 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35296 - target_stat_values[++index] = atomic_read(&cm_rejects);
35297 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35298 - target_stat_values[++index] = atomic_read(&qps_created);
35299 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35300 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35301 - target_stat_values[++index] = atomic_read(&cm_closes);
35302 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35303 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35304 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35305 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35306 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35307 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35308 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35309 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35310 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35311 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35312 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35313 target_stat_values[++index] = cm_packets_sent;
35314 target_stat_values[++index] = cm_packets_bounced;
35315 target_stat_values[++index] = cm_packets_created;
35316 target_stat_values[++index] = cm_packets_received;
35317 target_stat_values[++index] = cm_packets_dropped;
35318 target_stat_values[++index] = cm_packets_retrans;
35319 - target_stat_values[++index] = atomic_read(&cm_listens_created);
35320 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
35321 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
35322 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
35323 target_stat_values[++index] = cm_backlog_drops;
35324 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35325 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35326 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35327 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35328 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35329 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35330 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35331 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35332 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35333 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35334 target_stat_values[++index] = nesadapter->free_4kpbl;
35335 target_stat_values[++index] = nesadapter->free_256pbl;
35336 target_stat_values[++index] = int_mod_timer_init;
35337 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
35338 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
35339 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
35340 - target_stat_values[++index] = atomic_read(&pau_qps_created);
35341 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
35342 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
35343 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
35344 }
35345
35346 /**
35347 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35348 index 07e4fba..685f041 100644
35349 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35350 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35351 @@ -46,9 +46,9 @@
35352
35353 #include <rdma/ib_umem.h>
35354
35355 -atomic_t mod_qp_timouts;
35356 -atomic_t qps_created;
35357 -atomic_t sw_qps_destroyed;
35358 +atomic_unchecked_t mod_qp_timouts;
35359 +atomic_unchecked_t qps_created;
35360 +atomic_unchecked_t sw_qps_destroyed;
35361
35362 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35363
35364 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35365 if (init_attr->create_flags)
35366 return ERR_PTR(-EINVAL);
35367
35368 - atomic_inc(&qps_created);
35369 + atomic_inc_unchecked(&qps_created);
35370 switch (init_attr->qp_type) {
35371 case IB_QPT_RC:
35372 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35373 @@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35374 struct iw_cm_event cm_event;
35375 int ret = 0;
35376
35377 - atomic_inc(&sw_qps_destroyed);
35378 + atomic_inc_unchecked(&sw_qps_destroyed);
35379 nesqp->destroyed = 1;
35380
35381 /* Blow away the connection if it exists. */
35382 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
35383 index 4d11575..3e890e5 100644
35384 --- a/drivers/infiniband/hw/qib/qib.h
35385 +++ b/drivers/infiniband/hw/qib/qib.h
35386 @@ -51,6 +51,7 @@
35387 #include <linux/completion.h>
35388 #include <linux/kref.h>
35389 #include <linux/sched.h>
35390 +#include <linux/slab.h>
35391
35392 #include "qib_common.h"
35393 #include "qib_verbs.h"
35394 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35395 index da739d9..da1c7f4 100644
35396 --- a/drivers/input/gameport/gameport.c
35397 +++ b/drivers/input/gameport/gameport.c
35398 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
35399 */
35400 static void gameport_init_port(struct gameport *gameport)
35401 {
35402 - static atomic_t gameport_no = ATOMIC_INIT(0);
35403 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35404
35405 __module_get(THIS_MODULE);
35406
35407 mutex_init(&gameport->drv_mutex);
35408 device_initialize(&gameport->dev);
35409 dev_set_name(&gameport->dev, "gameport%lu",
35410 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
35411 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35412 gameport->dev.bus = &gameport_bus;
35413 gameport->dev.release = gameport_release_port;
35414 if (gameport->parent)
35415 diff --git a/drivers/input/input.c b/drivers/input/input.c
35416 index c044699..174d71a 100644
35417 --- a/drivers/input/input.c
35418 +++ b/drivers/input/input.c
35419 @@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
35420 */
35421 int input_register_device(struct input_dev *dev)
35422 {
35423 - static atomic_t input_no = ATOMIC_INIT(0);
35424 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35425 struct input_devres *devres = NULL;
35426 struct input_handler *handler;
35427 unsigned int packet_size;
35428 @@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
35429 dev->setkeycode = input_default_setkeycode;
35430
35431 dev_set_name(&dev->dev, "input%ld",
35432 - (unsigned long) atomic_inc_return(&input_no) - 1);
35433 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35434
35435 error = device_add(&dev->dev);
35436 if (error)
35437 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35438 index 04c69af..5f92d00 100644
35439 --- a/drivers/input/joystick/sidewinder.c
35440 +++ b/drivers/input/joystick/sidewinder.c
35441 @@ -30,6 +30,7 @@
35442 #include <linux/kernel.h>
35443 #include <linux/module.h>
35444 #include <linux/slab.h>
35445 +#include <linux/sched.h>
35446 #include <linux/init.h>
35447 #include <linux/input.h>
35448 #include <linux/gameport.h>
35449 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35450 index d6cbfe9..6225402 100644
35451 --- a/drivers/input/joystick/xpad.c
35452 +++ b/drivers/input/joystick/xpad.c
35453 @@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35454
35455 static int xpad_led_probe(struct usb_xpad *xpad)
35456 {
35457 - static atomic_t led_seq = ATOMIC_INIT(0);
35458 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35459 long led_no;
35460 struct xpad_led *led;
35461 struct led_classdev *led_cdev;
35462 @@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35463 if (!led)
35464 return -ENOMEM;
35465
35466 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35467 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35468
35469 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35470 led->xpad = xpad;
35471 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
35472 index 4c842c3..590b0bf 100644
35473 --- a/drivers/input/mousedev.c
35474 +++ b/drivers/input/mousedev.c
35475 @@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
35476
35477 spin_unlock_irq(&client->packet_lock);
35478
35479 - if (copy_to_user(buffer, data, count))
35480 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
35481 return -EFAULT;
35482
35483 return count;
35484 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35485 index 25fc597..558bf3b 100644
35486 --- a/drivers/input/serio/serio.c
35487 +++ b/drivers/input/serio/serio.c
35488 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
35489 */
35490 static void serio_init_port(struct serio *serio)
35491 {
35492 - static atomic_t serio_no = ATOMIC_INIT(0);
35493 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35494
35495 __module_get(THIS_MODULE);
35496
35497 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
35498 mutex_init(&serio->drv_mutex);
35499 device_initialize(&serio->dev);
35500 dev_set_name(&serio->dev, "serio%ld",
35501 - (long)atomic_inc_return(&serio_no) - 1);
35502 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35503 serio->dev.bus = &serio_bus;
35504 serio->dev.release = serio_release_port;
35505 serio->dev.groups = serio_device_attr_groups;
35506 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
35507 index 89562a8..218999b 100644
35508 --- a/drivers/isdn/capi/capi.c
35509 +++ b/drivers/isdn/capi/capi.c
35510 @@ -81,8 +81,8 @@ struct capiminor {
35511
35512 struct capi20_appl *ap;
35513 u32 ncci;
35514 - atomic_t datahandle;
35515 - atomic_t msgid;
35516 + atomic_unchecked_t datahandle;
35517 + atomic_unchecked_t msgid;
35518
35519 struct tty_port port;
35520 int ttyinstop;
35521 @@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
35522 capimsg_setu16(s, 2, mp->ap->applid);
35523 capimsg_setu8 (s, 4, CAPI_DATA_B3);
35524 capimsg_setu8 (s, 5, CAPI_RESP);
35525 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
35526 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
35527 capimsg_setu32(s, 8, mp->ncci);
35528 capimsg_setu16(s, 12, datahandle);
35529 }
35530 @@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
35531 mp->outbytes -= len;
35532 spin_unlock_bh(&mp->outlock);
35533
35534 - datahandle = atomic_inc_return(&mp->datahandle);
35535 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
35536 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
35537 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
35538 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
35539 capimsg_setu16(skb->data, 2, mp->ap->applid);
35540 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
35541 capimsg_setu8 (skb->data, 5, CAPI_REQ);
35542 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
35543 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
35544 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
35545 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
35546 capimsg_setu16(skb->data, 16, len); /* Data length */
35547 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35548 index 67abf3f..076b3a6 100644
35549 --- a/drivers/isdn/gigaset/interface.c
35550 +++ b/drivers/isdn/gigaset/interface.c
35551 @@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35552 }
35553 tty->driver_data = cs;
35554
35555 - ++cs->port.count;
35556 + atomic_inc(&cs->port.count);
35557
35558 - if (cs->port.count == 1) {
35559 + if (atomic_read(&cs->port.count) == 1) {
35560 tty_port_tty_set(&cs->port, tty);
35561 tty->low_latency = 1;
35562 }
35563 @@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35564
35565 if (!cs->connected)
35566 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35567 - else if (!cs->port.count)
35568 + else if (!atomic_read(&cs->port.count))
35569 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35570 - else if (!--cs->port.count)
35571 + else if (!atomic_dec_return(&cs->port.count))
35572 tty_port_tty_set(&cs->port, NULL);
35573
35574 mutex_unlock(&cs->mutex);
35575 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35576 index 821f7ac..28d4030 100644
35577 --- a/drivers/isdn/hardware/avm/b1.c
35578 +++ b/drivers/isdn/hardware/avm/b1.c
35579 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
35580 }
35581 if (left) {
35582 if (t4file->user) {
35583 - if (copy_from_user(buf, dp, left))
35584 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35585 return -EFAULT;
35586 } else {
35587 memcpy(buf, dp, left);
35588 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
35589 }
35590 if (left) {
35591 if (config->user) {
35592 - if (copy_from_user(buf, dp, left))
35593 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35594 return -EFAULT;
35595 } else {
35596 memcpy(buf, dp, left);
35597 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
35598 index e09dc8a..15e2efb 100644
35599 --- a/drivers/isdn/i4l/isdn_tty.c
35600 +++ b/drivers/isdn/i4l/isdn_tty.c
35601 @@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
35602
35603 #ifdef ISDN_DEBUG_MODEM_OPEN
35604 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
35605 - port->count);
35606 + atomic_read(&port->count));
35607 #endif
35608 - port->count++;
35609 + atomic_inc(&port->count);
35610 port->tty = tty;
35611 /*
35612 * Start up serial port
35613 @@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
35614 #endif
35615 return;
35616 }
35617 - if ((tty->count == 1) && (port->count != 1)) {
35618 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
35619 /*
35620 * Uh, oh. tty->count is 1, which means that the tty
35621 * structure will be freed. Info->count should always
35622 @@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
35623 * serial port won't be shutdown.
35624 */
35625 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
35626 - "info->count is %d\n", port->count);
35627 - port->count = 1;
35628 + "info->count is %d\n", atomic_read(&port->count));
35629 + atomic_set(&port->count, 1);
35630 }
35631 - if (--port->count < 0) {
35632 + if (atomic_dec_return(&port->count) < 0) {
35633 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
35634 - info->line, port->count);
35635 - port->count = 0;
35636 + info->line, atomic_read(&port->count));
35637 + atomic_set(&port->count, 0);
35638 }
35639 - if (port->count) {
35640 + if (atomic_read(&port->count)) {
35641 #ifdef ISDN_DEBUG_MODEM_OPEN
35642 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
35643 #endif
35644 @@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
35645 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
35646 return;
35647 isdn_tty_shutdown(info);
35648 - port->count = 0;
35649 + atomic_set(&port->count, 0);
35650 port->flags &= ~ASYNC_NORMAL_ACTIVE;
35651 port->tty = NULL;
35652 wake_up_interruptible(&port->open_wait);
35653 @@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
35654 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
35655 modem_info *info = &dev->mdm.info[i];
35656
35657 - if (info->port.count == 0)
35658 + if (atomic_read(&info->port.count) == 0)
35659 continue;
35660 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
35661 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
35662 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35663 index e74df7c..03a03ba 100644
35664 --- a/drivers/isdn/icn/icn.c
35665 +++ b/drivers/isdn/icn/icn.c
35666 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
35667 if (count > len)
35668 count = len;
35669 if (user) {
35670 - if (copy_from_user(msg, buf, count))
35671 + if (count > sizeof msg || copy_from_user(msg, buf, count))
35672 return -EFAULT;
35673 } else
35674 memcpy(msg, buf, count);
35675 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35676 index a5ebc00..982886f 100644
35677 --- a/drivers/lguest/core.c
35678 +++ b/drivers/lguest/core.c
35679 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
35680 * it's worked so far. The end address needs +1 because __get_vm_area
35681 * allocates an extra guard page, so we need space for that.
35682 */
35683 +
35684 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35685 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35686 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35687 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35688 +#else
35689 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35690 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35691 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35692 +#endif
35693 +
35694 if (!switcher_vma) {
35695 err = -ENOMEM;
35696 printk("lguest: could not map switcher pages high\n");
35697 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
35698 * Now the Switcher is mapped at the right address, we can't fail!
35699 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
35700 */
35701 - memcpy(switcher_vma->addr, start_switcher_text,
35702 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35703 end_switcher_text - start_switcher_text);
35704
35705 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35706 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35707 index 4af12e1..0e89afe 100644
35708 --- a/drivers/lguest/x86/core.c
35709 +++ b/drivers/lguest/x86/core.c
35710 @@ -59,7 +59,7 @@ static struct {
35711 /* Offset from where switcher.S was compiled to where we've copied it */
35712 static unsigned long switcher_offset(void)
35713 {
35714 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35715 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35716 }
35717
35718 /* This cpu's struct lguest_pages. */
35719 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35720 * These copies are pretty cheap, so we do them unconditionally: */
35721 /* Save the current Host top-level page directory.
35722 */
35723 +
35724 +#ifdef CONFIG_PAX_PER_CPU_PGD
35725 + pages->state.host_cr3 = read_cr3();
35726 +#else
35727 pages->state.host_cr3 = __pa(current->mm->pgd);
35728 +#endif
35729 +
35730 /*
35731 * Set up the Guest's page tables to see this CPU's pages (and no
35732 * other CPU's pages).
35733 @@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
35734 * compiled-in switcher code and the high-mapped copy we just made.
35735 */
35736 for (i = 0; i < IDT_ENTRIES; i++)
35737 - default_idt_entries[i] += switcher_offset();
35738 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35739
35740 /*
35741 * Set up the Switcher's per-cpu areas.
35742 @@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
35743 * it will be undisturbed when we switch. To change %cs and jump we
35744 * need this structure to feed to Intel's "lcall" instruction.
35745 */
35746 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35747 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35748 lguest_entry.segment = LGUEST_CS;
35749
35750 /*
35751 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35752 index 40634b0..4f5855e 100644
35753 --- a/drivers/lguest/x86/switcher_32.S
35754 +++ b/drivers/lguest/x86/switcher_32.S
35755 @@ -87,6 +87,7 @@
35756 #include <asm/page.h>
35757 #include <asm/segment.h>
35758 #include <asm/lguest.h>
35759 +#include <asm/processor-flags.h>
35760
35761 // We mark the start of the code to copy
35762 // It's placed in .text tho it's never run here
35763 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35764 // Changes type when we load it: damn Intel!
35765 // For after we switch over our page tables
35766 // That entry will be read-only: we'd crash.
35767 +
35768 +#ifdef CONFIG_PAX_KERNEXEC
35769 + mov %cr0, %edx
35770 + xor $X86_CR0_WP, %edx
35771 + mov %edx, %cr0
35772 +#endif
35773 +
35774 movl $(GDT_ENTRY_TSS*8), %edx
35775 ltr %dx
35776
35777 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35778 // Let's clear it again for our return.
35779 // The GDT descriptor of the Host
35780 // Points to the table after two "size" bytes
35781 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35782 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35783 // Clear "used" from type field (byte 5, bit 2)
35784 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35785 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35786 +
35787 +#ifdef CONFIG_PAX_KERNEXEC
35788 + mov %cr0, %eax
35789 + xor $X86_CR0_WP, %eax
35790 + mov %eax, %cr0
35791 +#endif
35792
35793 // Once our page table's switched, the Guest is live!
35794 // The Host fades as we run this final step.
35795 @@ -295,13 +309,12 @@ deliver_to_host:
35796 // I consulted gcc, and it gave
35797 // These instructions, which I gladly credit:
35798 leal (%edx,%ebx,8), %eax
35799 - movzwl (%eax),%edx
35800 - movl 4(%eax), %eax
35801 - xorw %ax, %ax
35802 - orl %eax, %edx
35803 + movl 4(%eax), %edx
35804 + movw (%eax), %dx
35805 // Now the address of the handler's in %edx
35806 // We call it now: its "iret" drops us home.
35807 - jmp *%edx
35808 + ljmp $__KERNEL_CS, $1f
35809 +1: jmp *%edx
35810
35811 // Every interrupt can come to us here
35812 // But we must truly tell each apart.
35813 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
35814 index 7155945..4bcc562 100644
35815 --- a/drivers/md/bitmap.c
35816 +++ b/drivers/md/bitmap.c
35817 @@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
35818 chunk_kb ? "KB" : "B");
35819 if (bitmap->storage.file) {
35820 seq_printf(seq, ", file: ");
35821 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
35822 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
35823 }
35824
35825 seq_printf(seq, "\n");
35826 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35827 index 0666b5d..ed82cb4 100644
35828 --- a/drivers/md/dm-ioctl.c
35829 +++ b/drivers/md/dm-ioctl.c
35830 @@ -1628,7 +1628,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35831 cmd == DM_LIST_VERSIONS_CMD)
35832 return 0;
35833
35834 - if ((cmd == DM_DEV_CREATE_CMD)) {
35835 + if (cmd == DM_DEV_CREATE_CMD) {
35836 if (!*param->name) {
35837 DMWARN("name not supplied when creating device");
35838 return -EINVAL;
35839 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
35840 index fa51918..c26253c 100644
35841 --- a/drivers/md/dm-raid1.c
35842 +++ b/drivers/md/dm-raid1.c
35843 @@ -40,7 +40,7 @@ enum dm_raid1_error {
35844
35845 struct mirror {
35846 struct mirror_set *ms;
35847 - atomic_t error_count;
35848 + atomic_unchecked_t error_count;
35849 unsigned long error_type;
35850 struct dm_dev *dev;
35851 sector_t offset;
35852 @@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
35853 struct mirror *m;
35854
35855 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
35856 - if (!atomic_read(&m->error_count))
35857 + if (!atomic_read_unchecked(&m->error_count))
35858 return m;
35859
35860 return NULL;
35861 @@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35862 * simple way to tell if a device has encountered
35863 * errors.
35864 */
35865 - atomic_inc(&m->error_count);
35866 + atomic_inc_unchecked(&m->error_count);
35867
35868 if (test_and_set_bit(error_type, &m->error_type))
35869 return;
35870 @@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
35871 struct mirror *m = get_default_mirror(ms);
35872
35873 do {
35874 - if (likely(!atomic_read(&m->error_count)))
35875 + if (likely(!atomic_read_unchecked(&m->error_count)))
35876 return m;
35877
35878 if (m-- == ms->mirror)
35879 @@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
35880 {
35881 struct mirror *default_mirror = get_default_mirror(m->ms);
35882
35883 - return !atomic_read(&default_mirror->error_count);
35884 + return !atomic_read_unchecked(&default_mirror->error_count);
35885 }
35886
35887 static int mirror_available(struct mirror_set *ms, struct bio *bio)
35888 @@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
35889 */
35890 if (likely(region_in_sync(ms, region, 1)))
35891 m = choose_mirror(ms, bio->bi_sector);
35892 - else if (m && atomic_read(&m->error_count))
35893 + else if (m && atomic_read_unchecked(&m->error_count))
35894 m = NULL;
35895
35896 if (likely(m))
35897 @@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
35898 }
35899
35900 ms->mirror[mirror].ms = ms;
35901 - atomic_set(&(ms->mirror[mirror].error_count), 0);
35902 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
35903 ms->mirror[mirror].error_type = 0;
35904 ms->mirror[mirror].offset = offset;
35905
35906 @@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
35907 */
35908 static char device_status_char(struct mirror *m)
35909 {
35910 - if (!atomic_read(&(m->error_count)))
35911 + if (!atomic_read_unchecked(&(m->error_count)))
35912 return 'A';
35913
35914 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
35915 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35916 index c89cde8..9d184cf 100644
35917 --- a/drivers/md/dm-stripe.c
35918 +++ b/drivers/md/dm-stripe.c
35919 @@ -20,7 +20,7 @@ struct stripe {
35920 struct dm_dev *dev;
35921 sector_t physical_start;
35922
35923 - atomic_t error_count;
35924 + atomic_unchecked_t error_count;
35925 };
35926
35927 struct stripe_c {
35928 @@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35929 kfree(sc);
35930 return r;
35931 }
35932 - atomic_set(&(sc->stripe[i].error_count), 0);
35933 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35934 }
35935
35936 ti->private = sc;
35937 @@ -325,7 +325,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
35938 DMEMIT("%d ", sc->stripes);
35939 for (i = 0; i < sc->stripes; i++) {
35940 DMEMIT("%s ", sc->stripe[i].dev->name);
35941 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35942 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35943 'D' : 'A';
35944 }
35945 buffer[i] = '\0';
35946 @@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
35947 */
35948 for (i = 0; i < sc->stripes; i++)
35949 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35950 - atomic_inc(&(sc->stripe[i].error_count));
35951 - if (atomic_read(&(sc->stripe[i].error_count)) <
35952 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
35953 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35954 DM_IO_ERROR_THRESHOLD)
35955 schedule_work(&sc->trigger_event);
35956 }
35957 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
35958 index daf25d0..d74f49f 100644
35959 --- a/drivers/md/dm-table.c
35960 +++ b/drivers/md/dm-table.c
35961 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
35962 if (!dev_size)
35963 return 0;
35964
35965 - if ((start >= dev_size) || (start + len > dev_size)) {
35966 + if ((start >= dev_size) || (len > dev_size - start)) {
35967 DMWARN("%s: %s too small for target: "
35968 "start=%llu, len=%llu, dev_size=%llu",
35969 dm_device_name(ti->table->md), bdevname(bdev, b),
35970 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
35971 index 4d6e853..a234157 100644
35972 --- a/drivers/md/dm-thin-metadata.c
35973 +++ b/drivers/md/dm-thin-metadata.c
35974 @@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35975 {
35976 pmd->info.tm = pmd->tm;
35977 pmd->info.levels = 2;
35978 - pmd->info.value_type.context = pmd->data_sm;
35979 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35980 pmd->info.value_type.size = sizeof(__le64);
35981 pmd->info.value_type.inc = data_block_inc;
35982 pmd->info.value_type.dec = data_block_dec;
35983 @@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35984
35985 pmd->bl_info.tm = pmd->tm;
35986 pmd->bl_info.levels = 1;
35987 - pmd->bl_info.value_type.context = pmd->data_sm;
35988 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35989 pmd->bl_info.value_type.size = sizeof(__le64);
35990 pmd->bl_info.value_type.inc = data_block_inc;
35991 pmd->bl_info.value_type.dec = data_block_dec;
35992 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
35993 index 314a0e2..1376406 100644
35994 --- a/drivers/md/dm.c
35995 +++ b/drivers/md/dm.c
35996 @@ -170,9 +170,9 @@ struct mapped_device {
35997 /*
35998 * Event handling.
35999 */
36000 - atomic_t event_nr;
36001 + atomic_unchecked_t event_nr;
36002 wait_queue_head_t eventq;
36003 - atomic_t uevent_seq;
36004 + atomic_unchecked_t uevent_seq;
36005 struct list_head uevent_list;
36006 spinlock_t uevent_lock; /* Protect access to uevent_list */
36007
36008 @@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
36009 rwlock_init(&md->map_lock);
36010 atomic_set(&md->holders, 1);
36011 atomic_set(&md->open_count, 0);
36012 - atomic_set(&md->event_nr, 0);
36013 - atomic_set(&md->uevent_seq, 0);
36014 + atomic_set_unchecked(&md->event_nr, 0);
36015 + atomic_set_unchecked(&md->uevent_seq, 0);
36016 INIT_LIST_HEAD(&md->uevent_list);
36017 spin_lock_init(&md->uevent_lock);
36018
36019 @@ -2014,7 +2014,7 @@ static void event_callback(void *context)
36020
36021 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36022
36023 - atomic_inc(&md->event_nr);
36024 + atomic_inc_unchecked(&md->event_nr);
36025 wake_up(&md->eventq);
36026 }
36027
36028 @@ -2669,18 +2669,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36029
36030 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36031 {
36032 - return atomic_add_return(1, &md->uevent_seq);
36033 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36034 }
36035
36036 uint32_t dm_get_event_nr(struct mapped_device *md)
36037 {
36038 - return atomic_read(&md->event_nr);
36039 + return atomic_read_unchecked(&md->event_nr);
36040 }
36041
36042 int dm_wait_event(struct mapped_device *md, int event_nr)
36043 {
36044 return wait_event_interruptible(md->eventq,
36045 - (event_nr != atomic_read(&md->event_nr)));
36046 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36047 }
36048
36049 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36050 diff --git a/drivers/md/md.c b/drivers/md/md.c
36051 index 3db3d1b..9487468 100644
36052 --- a/drivers/md/md.c
36053 +++ b/drivers/md/md.c
36054 @@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
36055 * start build, activate spare
36056 */
36057 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36058 -static atomic_t md_event_count;
36059 +static atomic_unchecked_t md_event_count;
36060 void md_new_event(struct mddev *mddev)
36061 {
36062 - atomic_inc(&md_event_count);
36063 + atomic_inc_unchecked(&md_event_count);
36064 wake_up(&md_event_waiters);
36065 }
36066 EXPORT_SYMBOL_GPL(md_new_event);
36067 @@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36068 */
36069 static void md_new_event_inintr(struct mddev *mddev)
36070 {
36071 - atomic_inc(&md_event_count);
36072 + atomic_inc_unchecked(&md_event_count);
36073 wake_up(&md_event_waiters);
36074 }
36075
36076 @@ -1503,7 +1503,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
36077 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
36078 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
36079 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
36080 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36081 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36082
36083 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36084 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36085 @@ -1747,7 +1747,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
36086 else
36087 sb->resync_offset = cpu_to_le64(0);
36088
36089 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36090 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36091
36092 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36093 sb->size = cpu_to_le64(mddev->dev_sectors);
36094 @@ -2747,7 +2747,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36095 static ssize_t
36096 errors_show(struct md_rdev *rdev, char *page)
36097 {
36098 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36099 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36100 }
36101
36102 static ssize_t
36103 @@ -2756,7 +2756,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
36104 char *e;
36105 unsigned long n = simple_strtoul(buf, &e, 10);
36106 if (*buf && (*e == 0 || *e == '\n')) {
36107 - atomic_set(&rdev->corrected_errors, n);
36108 + atomic_set_unchecked(&rdev->corrected_errors, n);
36109 return len;
36110 }
36111 return -EINVAL;
36112 @@ -3203,8 +3203,8 @@ int md_rdev_init(struct md_rdev *rdev)
36113 rdev->sb_loaded = 0;
36114 rdev->bb_page = NULL;
36115 atomic_set(&rdev->nr_pending, 0);
36116 - atomic_set(&rdev->read_errors, 0);
36117 - atomic_set(&rdev->corrected_errors, 0);
36118 + atomic_set_unchecked(&rdev->read_errors, 0);
36119 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36120
36121 INIT_LIST_HEAD(&rdev->same_set);
36122 init_waitqueue_head(&rdev->blocked_wait);
36123 @@ -6980,7 +6980,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36124
36125 spin_unlock(&pers_lock);
36126 seq_printf(seq, "\n");
36127 - seq->poll_event = atomic_read(&md_event_count);
36128 + seq->poll_event = atomic_read_unchecked(&md_event_count);
36129 return 0;
36130 }
36131 if (v == (void*)2) {
36132 @@ -7083,7 +7083,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36133 return error;
36134
36135 seq = file->private_data;
36136 - seq->poll_event = atomic_read(&md_event_count);
36137 + seq->poll_event = atomic_read_unchecked(&md_event_count);
36138 return error;
36139 }
36140
36141 @@ -7097,7 +7097,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36142 /* always allow read */
36143 mask = POLLIN | POLLRDNORM;
36144
36145 - if (seq->poll_event != atomic_read(&md_event_count))
36146 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
36147 mask |= POLLERR | POLLPRI;
36148 return mask;
36149 }
36150 @@ -7141,7 +7141,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
36151 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36152 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36153 (int)part_stat_read(&disk->part0, sectors[1]) -
36154 - atomic_read(&disk->sync_io);
36155 + atomic_read_unchecked(&disk->sync_io);
36156 /* sync IO will cause sync_io to increase before the disk_stats
36157 * as sync_io is counted when a request starts, and
36158 * disk_stats is counted when it completes.
36159 diff --git a/drivers/md/md.h b/drivers/md/md.h
36160 index eca59c3..7c42285 100644
36161 --- a/drivers/md/md.h
36162 +++ b/drivers/md/md.h
36163 @@ -94,13 +94,13 @@ struct md_rdev {
36164 * only maintained for arrays that
36165 * support hot removal
36166 */
36167 - atomic_t read_errors; /* number of consecutive read errors that
36168 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36169 * we have tried to ignore.
36170 */
36171 struct timespec last_read_error; /* monotonic time since our
36172 * last read error
36173 */
36174 - atomic_t corrected_errors; /* number of corrected read errors,
36175 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36176 * for reporting to userspace and storing
36177 * in superblock.
36178 */
36179 @@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
36180
36181 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36182 {
36183 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36184 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36185 }
36186
36187 struct md_personality
36188 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
36189 index 1cbfc6b..56e1dbb 100644
36190 --- a/drivers/md/persistent-data/dm-space-map.h
36191 +++ b/drivers/md/persistent-data/dm-space-map.h
36192 @@ -60,6 +60,7 @@ struct dm_space_map {
36193 int (*root_size)(struct dm_space_map *sm, size_t *result);
36194 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
36195 };
36196 +typedef struct dm_space_map __no_const dm_space_map_no_const;
36197
36198 /*----------------------------------------------------------------*/
36199
36200 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36201 index d5bddfc..b079b4b 100644
36202 --- a/drivers/md/raid1.c
36203 +++ b/drivers/md/raid1.c
36204 @@ -1818,7 +1818,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
36205 if (r1_sync_page_io(rdev, sect, s,
36206 bio->bi_io_vec[idx].bv_page,
36207 READ) != 0)
36208 - atomic_add(s, &rdev->corrected_errors);
36209 + atomic_add_unchecked(s, &rdev->corrected_errors);
36210 }
36211 sectors -= s;
36212 sect += s;
36213 @@ -2040,7 +2040,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
36214 test_bit(In_sync, &rdev->flags)) {
36215 if (r1_sync_page_io(rdev, sect, s,
36216 conf->tmppage, READ)) {
36217 - atomic_add(s, &rdev->corrected_errors);
36218 + atomic_add_unchecked(s, &rdev->corrected_errors);
36219 printk(KERN_INFO
36220 "md/raid1:%s: read error corrected "
36221 "(%d sectors at %llu on %s)\n",
36222 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36223 index 64d4824..8b9ea57 100644
36224 --- a/drivers/md/raid10.c
36225 +++ b/drivers/md/raid10.c
36226 @@ -1877,7 +1877,7 @@ static void end_sync_read(struct bio *bio, int error)
36227 /* The write handler will notice the lack of
36228 * R10BIO_Uptodate and record any errors etc
36229 */
36230 - atomic_add(r10_bio->sectors,
36231 + atomic_add_unchecked(r10_bio->sectors,
36232 &conf->mirrors[d].rdev->corrected_errors);
36233
36234 /* for reconstruct, we always reschedule after a read.
36235 @@ -2226,7 +2226,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
36236 {
36237 struct timespec cur_time_mon;
36238 unsigned long hours_since_last;
36239 - unsigned int read_errors = atomic_read(&rdev->read_errors);
36240 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
36241
36242 ktime_get_ts(&cur_time_mon);
36243
36244 @@ -2248,9 +2248,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
36245 * overflowing the shift of read_errors by hours_since_last.
36246 */
36247 if (hours_since_last >= 8 * sizeof(read_errors))
36248 - atomic_set(&rdev->read_errors, 0);
36249 + atomic_set_unchecked(&rdev->read_errors, 0);
36250 else
36251 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
36252 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
36253 }
36254
36255 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
36256 @@ -2304,8 +2304,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
36257 return;
36258
36259 check_decay_read_errors(mddev, rdev);
36260 - atomic_inc(&rdev->read_errors);
36261 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
36262 + atomic_inc_unchecked(&rdev->read_errors);
36263 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
36264 char b[BDEVNAME_SIZE];
36265 bdevname(rdev->bdev, b);
36266
36267 @@ -2313,7 +2313,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
36268 "md/raid10:%s: %s: Raid device exceeded "
36269 "read_error threshold [cur %d:max %d]\n",
36270 mdname(mddev), b,
36271 - atomic_read(&rdev->read_errors), max_read_errors);
36272 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
36273 printk(KERN_NOTICE
36274 "md/raid10:%s: %s: Failing raid device\n",
36275 mdname(mddev), b);
36276 @@ -2468,7 +2468,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
36277 sect +
36278 choose_data_offset(r10_bio, rdev)),
36279 bdevname(rdev->bdev, b));
36280 - atomic_add(s, &rdev->corrected_errors);
36281 + atomic_add_unchecked(s, &rdev->corrected_errors);
36282 }
36283
36284 rdev_dec_pending(rdev, mddev);
36285 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36286 index 19d77a0..56051b92 100644
36287 --- a/drivers/md/raid5.c
36288 +++ b/drivers/md/raid5.c
36289 @@ -1797,21 +1797,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
36290 mdname(conf->mddev), STRIPE_SECTORS,
36291 (unsigned long long)s,
36292 bdevname(rdev->bdev, b));
36293 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
36294 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
36295 clear_bit(R5_ReadError, &sh->dev[i].flags);
36296 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36297 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
36298 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
36299
36300 - if (atomic_read(&rdev->read_errors))
36301 - atomic_set(&rdev->read_errors, 0);
36302 + if (atomic_read_unchecked(&rdev->read_errors))
36303 + atomic_set_unchecked(&rdev->read_errors, 0);
36304 } else {
36305 const char *bdn = bdevname(rdev->bdev, b);
36306 int retry = 0;
36307 int set_bad = 0;
36308
36309 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36310 - atomic_inc(&rdev->read_errors);
36311 + atomic_inc_unchecked(&rdev->read_errors);
36312 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
36313 printk_ratelimited(
36314 KERN_WARNING
36315 @@ -1839,7 +1839,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36316 mdname(conf->mddev),
36317 (unsigned long long)s,
36318 bdn);
36319 - } else if (atomic_read(&rdev->read_errors)
36320 + } else if (atomic_read_unchecked(&rdev->read_errors)
36321 > conf->max_nr_stripes)
36322 printk(KERN_WARNING
36323 "md/raid:%s: Too many read errors, failing device %s.\n",
36324 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
36325 index d33101a..6b13069 100644
36326 --- a/drivers/media/dvb-core/dvbdev.c
36327 +++ b/drivers/media/dvb-core/dvbdev.c
36328 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36329 const struct dvb_device *template, void *priv, int type)
36330 {
36331 struct dvb_device *dvbdev;
36332 - struct file_operations *dvbdevfops;
36333 + file_operations_no_const *dvbdevfops;
36334 struct device *clsdev;
36335 int minor;
36336 int id;
36337 diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
36338 index 404f63a..4796533 100644
36339 --- a/drivers/media/dvb-frontends/dib3000.h
36340 +++ b/drivers/media/dvb-frontends/dib3000.h
36341 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36342 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36343 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36344 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36345 -};
36346 +} __no_const;
36347
36348 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36349 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36350 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
36351 index 8e9a668..78d6310 100644
36352 --- a/drivers/media/platform/omap/omap_vout.c
36353 +++ b/drivers/media/platform/omap/omap_vout.c
36354 @@ -63,7 +63,6 @@ enum omap_vout_channels {
36355 OMAP_VIDEO2,
36356 };
36357
36358 -static struct videobuf_queue_ops video_vbq_ops;
36359 /* Variables configurable through module params*/
36360 static u32 video1_numbuffers = 3;
36361 static u32 video2_numbuffers = 3;
36362 @@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
36363 {
36364 struct videobuf_queue *q;
36365 struct omap_vout_device *vout = NULL;
36366 + static struct videobuf_queue_ops video_vbq_ops = {
36367 + .buf_setup = omap_vout_buffer_setup,
36368 + .buf_prepare = omap_vout_buffer_prepare,
36369 + .buf_release = omap_vout_buffer_release,
36370 + .buf_queue = omap_vout_buffer_queue,
36371 + };
36372
36373 vout = video_drvdata(file);
36374 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
36375 @@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
36376 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
36377
36378 q = &vout->vbq;
36379 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
36380 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
36381 - video_vbq_ops.buf_release = omap_vout_buffer_release;
36382 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
36383 spin_lock_init(&vout->vbq_lock);
36384
36385 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
36386 diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
36387 index b671e20..34088b7 100644
36388 --- a/drivers/media/platform/s5p-tv/mixer.h
36389 +++ b/drivers/media/platform/s5p-tv/mixer.h
36390 @@ -155,7 +155,7 @@ struct mxr_layer {
36391 /** layer index (unique identifier) */
36392 int idx;
36393 /** callbacks for layer methods */
36394 - struct mxr_layer_ops ops;
36395 + struct mxr_layer_ops *ops;
36396 /** format array */
36397 const struct mxr_format **fmt_array;
36398 /** size of format array */
36399 diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
36400 index b93a21f..2535195 100644
36401 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
36402 +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
36403 @@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
36404 {
36405 struct mxr_layer *layer;
36406 int ret;
36407 - struct mxr_layer_ops ops = {
36408 + static struct mxr_layer_ops ops = {
36409 .release = mxr_graph_layer_release,
36410 .buffer_set = mxr_graph_buffer_set,
36411 .stream_set = mxr_graph_stream_set,
36412 diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
36413 index 3b1670a..595c939 100644
36414 --- a/drivers/media/platform/s5p-tv/mixer_reg.c
36415 +++ b/drivers/media/platform/s5p-tv/mixer_reg.c
36416 @@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
36417 layer->update_buf = next;
36418 }
36419
36420 - layer->ops.buffer_set(layer, layer->update_buf);
36421 + layer->ops->buffer_set(layer, layer->update_buf);
36422
36423 if (done && done != layer->shadow_buf)
36424 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
36425 diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
36426 index 1f3b743..e839271 100644
36427 --- a/drivers/media/platform/s5p-tv/mixer_video.c
36428 +++ b/drivers/media/platform/s5p-tv/mixer_video.c
36429 @@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
36430 layer->geo.src.height = layer->geo.src.full_height;
36431
36432 mxr_geometry_dump(mdev, &layer->geo);
36433 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
36434 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
36435 mxr_geometry_dump(mdev, &layer->geo);
36436 }
36437
36438 @@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
36439 layer->geo.dst.full_width = mbus_fmt.width;
36440 layer->geo.dst.full_height = mbus_fmt.height;
36441 layer->geo.dst.field = mbus_fmt.field;
36442 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
36443 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
36444
36445 mxr_geometry_dump(mdev, &layer->geo);
36446 }
36447 @@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
36448 /* set source size to highest accepted value */
36449 geo->src.full_width = max(geo->dst.full_width, pix->width);
36450 geo->src.full_height = max(geo->dst.full_height, pix->height);
36451 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
36452 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
36453 mxr_geometry_dump(mdev, &layer->geo);
36454 /* set cropping to total visible screen */
36455 geo->src.width = pix->width;
36456 @@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
36457 geo->src.x_offset = 0;
36458 geo->src.y_offset = 0;
36459 /* assure consistency of geometry */
36460 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
36461 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
36462 mxr_geometry_dump(mdev, &layer->geo);
36463 /* set full size to lowest possible value */
36464 geo->src.full_width = 0;
36465 geo->src.full_height = 0;
36466 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
36467 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
36468 mxr_geometry_dump(mdev, &layer->geo);
36469
36470 /* returning results */
36471 @@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
36472 target->width = s->r.width;
36473 target->height = s->r.height;
36474
36475 - layer->ops.fix_geometry(layer, stage, s->flags);
36476 + layer->ops->fix_geometry(layer, stage, s->flags);
36477
36478 /* retrieve update selection rectangle */
36479 res.left = target->x_offset;
36480 @@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
36481 mxr_output_get(mdev);
36482
36483 mxr_layer_update_output(layer);
36484 - layer->ops.format_set(layer);
36485 + layer->ops->format_set(layer);
36486 /* enabling layer in hardware */
36487 spin_lock_irqsave(&layer->enq_slock, flags);
36488 layer->state = MXR_LAYER_STREAMING;
36489 spin_unlock_irqrestore(&layer->enq_slock, flags);
36490
36491 - layer->ops.stream_set(layer, MXR_ENABLE);
36492 + layer->ops->stream_set(layer, MXR_ENABLE);
36493 mxr_streamer_get(mdev);
36494
36495 return 0;
36496 @@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
36497 spin_unlock_irqrestore(&layer->enq_slock, flags);
36498
36499 /* disabling layer in hardware */
36500 - layer->ops.stream_set(layer, MXR_DISABLE);
36501 + layer->ops->stream_set(layer, MXR_DISABLE);
36502 /* remove one streamer */
36503 mxr_streamer_put(mdev);
36504 /* allow changes in output configuration */
36505 @@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
36506
36507 void mxr_layer_release(struct mxr_layer *layer)
36508 {
36509 - if (layer->ops.release)
36510 - layer->ops.release(layer);
36511 + if (layer->ops->release)
36512 + layer->ops->release(layer);
36513 }
36514
36515 void mxr_base_layer_release(struct mxr_layer *layer)
36516 @@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
36517
36518 layer->mdev = mdev;
36519 layer->idx = idx;
36520 - layer->ops = *ops;
36521 + layer->ops = ops;
36522
36523 spin_lock_init(&layer->enq_slock);
36524 INIT_LIST_HEAD(&layer->enq_list);
36525 diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
36526 index 3d13a63..da31bf1 100644
36527 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
36528 +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
36529 @@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
36530 {
36531 struct mxr_layer *layer;
36532 int ret;
36533 - struct mxr_layer_ops ops = {
36534 + static struct mxr_layer_ops ops = {
36535 .release = mxr_vp_layer_release,
36536 .buffer_set = mxr_vp_buffer_set,
36537 .stream_set = mxr_vp_stream_set,
36538 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36539 index 643d80a..56bb96b 100644
36540 --- a/drivers/media/radio/radio-cadet.c
36541 +++ b/drivers/media/radio/radio-cadet.c
36542 @@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36543 unsigned char readbuf[RDS_BUFFER];
36544 int i = 0;
36545
36546 + if (count > RDS_BUFFER)
36547 + return -EFAULT;
36548 mutex_lock(&dev->lock);
36549 if (dev->rdsstat == 0)
36550 cadet_start_rds(dev);
36551 @@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36552 while (i < count && dev->rdsin != dev->rdsout)
36553 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36554
36555 - if (i && copy_to_user(data, readbuf, i))
36556 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
36557 i = -EFAULT;
36558 unlock:
36559 mutex_unlock(&dev->lock);
36560 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
36561 index 3940bb0..fb3952a 100644
36562 --- a/drivers/media/usb/dvb-usb/cxusb.c
36563 +++ b/drivers/media/usb/dvb-usb/cxusb.c
36564 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36565
36566 struct dib0700_adapter_state {
36567 int (*set_param_save) (struct dvb_frontend *);
36568 -};
36569 +} __no_const;
36570
36571 static int dib7070_set_param_override(struct dvb_frontend *fe)
36572 {
36573 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
36574 index 9382895..ac8093c 100644
36575 --- a/drivers/media/usb/dvb-usb/dw2102.c
36576 +++ b/drivers/media/usb/dvb-usb/dw2102.c
36577 @@ -95,7 +95,7 @@ struct su3000_state {
36578
36579 struct s6x0_state {
36580 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
36581 -};
36582 +} __no_const;
36583
36584 /* debug */
36585 static int dvb_usb_dw2102_debug;
36586 diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
36587 index 29b2172..a7c5b31 100644
36588 --- a/drivers/memstick/host/r592.c
36589 +++ b/drivers/memstick/host/r592.c
36590 @@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
36591 /* Executes one TPC (data is read/written from small or large fifo) */
36592 static void r592_execute_tpc(struct r592_device *dev)
36593 {
36594 - bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
36595 + bool is_write;
36596 int len, error;
36597 u32 status, reg;
36598
36599 @@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
36600 return;
36601 }
36602
36603 + is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
36604 len = dev->req->long_data ?
36605 dev->req->sg.length : dev->req->data_len;
36606
36607 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36608 index fb69baa..cf7ad22 100644
36609 --- a/drivers/message/fusion/mptbase.c
36610 +++ b/drivers/message/fusion/mptbase.c
36611 @@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
36612 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36613 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36614
36615 +#ifdef CONFIG_GRKERNSEC_HIDESYM
36616 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
36617 +#else
36618 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36619 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36620 +#endif
36621 +
36622 /*
36623 * Rounding UP to nearest 4-kB boundary here...
36624 */
36625 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36626 index fa43c39..daeb158 100644
36627 --- a/drivers/message/fusion/mptsas.c
36628 +++ b/drivers/message/fusion/mptsas.c
36629 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36630 return 0;
36631 }
36632
36633 +static inline void
36634 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36635 +{
36636 + if (phy_info->port_details) {
36637 + phy_info->port_details->rphy = rphy;
36638 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36639 + ioc->name, rphy));
36640 + }
36641 +
36642 + if (rphy) {
36643 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36644 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36645 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36646 + ioc->name, rphy, rphy->dev.release));
36647 + }
36648 +}
36649 +
36650 /* no mutex */
36651 static void
36652 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36653 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36654 return NULL;
36655 }
36656
36657 -static inline void
36658 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36659 -{
36660 - if (phy_info->port_details) {
36661 - phy_info->port_details->rphy = rphy;
36662 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36663 - ioc->name, rphy));
36664 - }
36665 -
36666 - if (rphy) {
36667 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36668 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36669 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36670 - ioc->name, rphy, rphy->dev.release));
36671 - }
36672 -}
36673 -
36674 static inline struct sas_port *
36675 mptsas_get_port(struct mptsas_phyinfo *phy_info)
36676 {
36677 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
36678 index 164afa7..b6b2e74 100644
36679 --- a/drivers/message/fusion/mptscsih.c
36680 +++ b/drivers/message/fusion/mptscsih.c
36681 @@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
36682
36683 h = shost_priv(SChost);
36684
36685 - if (h) {
36686 - if (h->info_kbuf == NULL)
36687 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36688 - return h->info_kbuf;
36689 - h->info_kbuf[0] = '\0';
36690 + if (!h)
36691 + return NULL;
36692
36693 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36694 - h->info_kbuf[size-1] = '\0';
36695 - }
36696 + if (h->info_kbuf == NULL)
36697 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36698 + return h->info_kbuf;
36699 + h->info_kbuf[0] = '\0';
36700 +
36701 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36702 + h->info_kbuf[size-1] = '\0';
36703
36704 return h->info_kbuf;
36705 }
36706 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
36707 index 8001aa6..b137580 100644
36708 --- a/drivers/message/i2o/i2o_proc.c
36709 +++ b/drivers/message/i2o/i2o_proc.c
36710 @@ -255,12 +255,6 @@ static char *scsi_devices[] = {
36711 "Array Controller Device"
36712 };
36713
36714 -static char *chtostr(char *tmp, u8 *chars, int n)
36715 -{
36716 - tmp[0] = 0;
36717 - return strncat(tmp, (char *)chars, n);
36718 -}
36719 -
36720 static int i2o_report_query_status(struct seq_file *seq, int block_status,
36721 char *group)
36722 {
36723 @@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
36724 } *result;
36725
36726 i2o_exec_execute_ddm_table ddm_table;
36727 - char tmp[28 + 1];
36728
36729 result = kmalloc(sizeof(*result), GFP_KERNEL);
36730 if (!result)
36731 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
36732
36733 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
36734 seq_printf(seq, "%-#8x", ddm_table.module_id);
36735 - seq_printf(seq, "%-29s",
36736 - chtostr(tmp, ddm_table.module_name_version, 28));
36737 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
36738 seq_printf(seq, "%9d ", ddm_table.data_size);
36739 seq_printf(seq, "%8d", ddm_table.code_size);
36740
36741 @@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
36742
36743 i2o_driver_result_table *result;
36744 i2o_driver_store_table *dst;
36745 - char tmp[28 + 1];
36746
36747 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
36748 if (result == NULL)
36749 @@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
36750
36751 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
36752 seq_printf(seq, "%-#8x", dst->module_id);
36753 - seq_printf(seq, "%-29s",
36754 - chtostr(tmp, dst->module_name_version, 28));
36755 - seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
36756 + seq_printf(seq, "%-.28s", dst->module_name_version);
36757 + seq_printf(seq, "%-.8s", dst->date);
36758 seq_printf(seq, "%8d ", dst->module_size);
36759 seq_printf(seq, "%8d ", dst->mpb_size);
36760 seq_printf(seq, "0x%04x", dst->module_flags);
36761 @@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
36762 // == (allow) 512d bytes (max)
36763 static u16 *work16 = (u16 *) work32;
36764 int token;
36765 - char tmp[16 + 1];
36766
36767 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
36768
36769 @@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
36770 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
36771 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
36772 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
36773 - seq_printf(seq, "Vendor info : %s\n",
36774 - chtostr(tmp, (u8 *) (work32 + 2), 16));
36775 - seq_printf(seq, "Product info : %s\n",
36776 - chtostr(tmp, (u8 *) (work32 + 6), 16));
36777 - seq_printf(seq, "Description : %s\n",
36778 - chtostr(tmp, (u8 *) (work32 + 10), 16));
36779 - seq_printf(seq, "Product rev. : %s\n",
36780 - chtostr(tmp, (u8 *) (work32 + 14), 8));
36781 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
36782 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
36783 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
36784 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
36785
36786 seq_printf(seq, "Serial number : ");
36787 print_serial_number(seq, (u8 *) (work32 + 16),
36788 @@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
36789 u8 pad[256]; // allow up to 256 byte (max) serial number
36790 } result;
36791
36792 - char tmp[24 + 1];
36793 -
36794 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
36795
36796 if (token < 0) {
36797 @@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
36798 }
36799
36800 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
36801 - seq_printf(seq, "Module name : %s\n",
36802 - chtostr(tmp, result.module_name, 24));
36803 - seq_printf(seq, "Module revision : %s\n",
36804 - chtostr(tmp, result.module_rev, 8));
36805 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
36806 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
36807
36808 seq_printf(seq, "Serial number : ");
36809 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
36810 @@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
36811 u8 instance_number[4];
36812 } result;
36813
36814 - char tmp[64 + 1];
36815 -
36816 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
36817
36818 if (token < 0) {
36819 @@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
36820 return 0;
36821 }
36822
36823 - seq_printf(seq, "Device name : %s\n",
36824 - chtostr(tmp, result.device_name, 64));
36825 - seq_printf(seq, "Service name : %s\n",
36826 - chtostr(tmp, result.service_name, 64));
36827 - seq_printf(seq, "Physical name : %s\n",
36828 - chtostr(tmp, result.physical_location, 64));
36829 - seq_printf(seq, "Instance number : %s\n",
36830 - chtostr(tmp, result.instance_number, 4));
36831 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
36832 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
36833 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
36834 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
36835
36836 return 0;
36837 }
36838 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
36839 index a8c08f3..155fe3d 100644
36840 --- a/drivers/message/i2o/iop.c
36841 +++ b/drivers/message/i2o/iop.c
36842 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
36843
36844 spin_lock_irqsave(&c->context_list_lock, flags);
36845
36846 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
36847 - atomic_inc(&c->context_list_counter);
36848 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
36849 + atomic_inc_unchecked(&c->context_list_counter);
36850
36851 - entry->context = atomic_read(&c->context_list_counter);
36852 + entry->context = atomic_read_unchecked(&c->context_list_counter);
36853
36854 list_add(&entry->list, &c->context_list);
36855
36856 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
36857
36858 #if BITS_PER_LONG == 64
36859 spin_lock_init(&c->context_list_lock);
36860 - atomic_set(&c->context_list_counter, 0);
36861 + atomic_set_unchecked(&c->context_list_counter, 0);
36862 INIT_LIST_HEAD(&c->context_list);
36863 #endif
36864
36865 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
36866 index 45ece11..8efa218 100644
36867 --- a/drivers/mfd/janz-cmodio.c
36868 +++ b/drivers/mfd/janz-cmodio.c
36869 @@ -13,6 +13,7 @@
36870
36871 #include <linux/kernel.h>
36872 #include <linux/module.h>
36873 +#include <linux/slab.h>
36874 #include <linux/init.h>
36875 #include <linux/pci.h>
36876 #include <linux/interrupt.h>
36877 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
36878 index 3aa9a96..59cf685 100644
36879 --- a/drivers/misc/kgdbts.c
36880 +++ b/drivers/misc/kgdbts.c
36881 @@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
36882 char before[BREAK_INSTR_SIZE];
36883 char after[BREAK_INSTR_SIZE];
36884
36885 - probe_kernel_read(before, (char *)kgdbts_break_test,
36886 + probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
36887 BREAK_INSTR_SIZE);
36888 init_simple_test();
36889 ts.tst = plant_and_detach_test;
36890 @@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
36891 /* Activate test with initial breakpoint */
36892 if (!is_early)
36893 kgdb_breakpoint();
36894 - probe_kernel_read(after, (char *)kgdbts_break_test,
36895 + probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
36896 BREAK_INSTR_SIZE);
36897 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
36898 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
36899 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
36900 index 4a87e5c..76bdf5c 100644
36901 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
36902 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
36903 @@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
36904 * the lid is closed. This leads to interrupts as soon as a little move
36905 * is done.
36906 */
36907 - atomic_inc(&lis3->count);
36908 + atomic_inc_unchecked(&lis3->count);
36909
36910 wake_up_interruptible(&lis3->misc_wait);
36911 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
36912 @@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
36913 if (lis3->pm_dev)
36914 pm_runtime_get_sync(lis3->pm_dev);
36915
36916 - atomic_set(&lis3->count, 0);
36917 + atomic_set_unchecked(&lis3->count, 0);
36918 return 0;
36919 }
36920
36921 @@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
36922 add_wait_queue(&lis3->misc_wait, &wait);
36923 while (true) {
36924 set_current_state(TASK_INTERRUPTIBLE);
36925 - data = atomic_xchg(&lis3->count, 0);
36926 + data = atomic_xchg_unchecked(&lis3->count, 0);
36927 if (data)
36928 break;
36929
36930 @@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
36931 struct lis3lv02d, miscdev);
36932
36933 poll_wait(file, &lis3->misc_wait, wait);
36934 - if (atomic_read(&lis3->count))
36935 + if (atomic_read_unchecked(&lis3->count))
36936 return POLLIN | POLLRDNORM;
36937 return 0;
36938 }
36939 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
36940 index c439c82..1f20f57 100644
36941 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
36942 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
36943 @@ -297,7 +297,7 @@ struct lis3lv02d {
36944 struct input_polled_dev *idev; /* input device */
36945 struct platform_device *pdev; /* platform device */
36946 struct regulator_bulk_data regulators[2];
36947 - atomic_t count; /* interrupt count after last read */
36948 + atomic_unchecked_t count; /* interrupt count after last read */
36949 union axis_conversion ac; /* hw -> logical axis */
36950 int mapped_btns[3];
36951
36952 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
36953 index 2f30bad..c4c13d0 100644
36954 --- a/drivers/misc/sgi-gru/gruhandles.c
36955 +++ b/drivers/misc/sgi-gru/gruhandles.c
36956 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
36957 unsigned long nsec;
36958
36959 nsec = CLKS2NSEC(clks);
36960 - atomic_long_inc(&mcs_op_statistics[op].count);
36961 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
36962 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
36963 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
36964 if (mcs_op_statistics[op].max < nsec)
36965 mcs_op_statistics[op].max = nsec;
36966 }
36967 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
36968 index 950dbe9..eeef0f8 100644
36969 --- a/drivers/misc/sgi-gru/gruprocfs.c
36970 +++ b/drivers/misc/sgi-gru/gruprocfs.c
36971 @@ -32,9 +32,9 @@
36972
36973 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
36974
36975 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36976 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
36977 {
36978 - unsigned long val = atomic_long_read(v);
36979 + unsigned long val = atomic_long_read_unchecked(v);
36980
36981 seq_printf(s, "%16lu %s\n", val, id);
36982 }
36983 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
36984
36985 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
36986 for (op = 0; op < mcsop_last; op++) {
36987 - count = atomic_long_read(&mcs_op_statistics[op].count);
36988 - total = atomic_long_read(&mcs_op_statistics[op].total);
36989 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
36990 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
36991 max = mcs_op_statistics[op].max;
36992 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
36993 count ? total / count : 0, max);
36994 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
36995 index 5c3ce24..4915ccb 100644
36996 --- a/drivers/misc/sgi-gru/grutables.h
36997 +++ b/drivers/misc/sgi-gru/grutables.h
36998 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
36999 * GRU statistics.
37000 */
37001 struct gru_stats_s {
37002 - atomic_long_t vdata_alloc;
37003 - atomic_long_t vdata_free;
37004 - atomic_long_t gts_alloc;
37005 - atomic_long_t gts_free;
37006 - atomic_long_t gms_alloc;
37007 - atomic_long_t gms_free;
37008 - atomic_long_t gts_double_allocate;
37009 - atomic_long_t assign_context;
37010 - atomic_long_t assign_context_failed;
37011 - atomic_long_t free_context;
37012 - atomic_long_t load_user_context;
37013 - atomic_long_t load_kernel_context;
37014 - atomic_long_t lock_kernel_context;
37015 - atomic_long_t unlock_kernel_context;
37016 - atomic_long_t steal_user_context;
37017 - atomic_long_t steal_kernel_context;
37018 - atomic_long_t steal_context_failed;
37019 - atomic_long_t nopfn;
37020 - atomic_long_t asid_new;
37021 - atomic_long_t asid_next;
37022 - atomic_long_t asid_wrap;
37023 - atomic_long_t asid_reuse;
37024 - atomic_long_t intr;
37025 - atomic_long_t intr_cbr;
37026 - atomic_long_t intr_tfh;
37027 - atomic_long_t intr_spurious;
37028 - atomic_long_t intr_mm_lock_failed;
37029 - atomic_long_t call_os;
37030 - atomic_long_t call_os_wait_queue;
37031 - atomic_long_t user_flush_tlb;
37032 - atomic_long_t user_unload_context;
37033 - atomic_long_t user_exception;
37034 - atomic_long_t set_context_option;
37035 - atomic_long_t check_context_retarget_intr;
37036 - atomic_long_t check_context_unload;
37037 - atomic_long_t tlb_dropin;
37038 - atomic_long_t tlb_preload_page;
37039 - atomic_long_t tlb_dropin_fail_no_asid;
37040 - atomic_long_t tlb_dropin_fail_upm;
37041 - atomic_long_t tlb_dropin_fail_invalid;
37042 - atomic_long_t tlb_dropin_fail_range_active;
37043 - atomic_long_t tlb_dropin_fail_idle;
37044 - atomic_long_t tlb_dropin_fail_fmm;
37045 - atomic_long_t tlb_dropin_fail_no_exception;
37046 - atomic_long_t tfh_stale_on_fault;
37047 - atomic_long_t mmu_invalidate_range;
37048 - atomic_long_t mmu_invalidate_page;
37049 - atomic_long_t flush_tlb;
37050 - atomic_long_t flush_tlb_gru;
37051 - atomic_long_t flush_tlb_gru_tgh;
37052 - atomic_long_t flush_tlb_gru_zero_asid;
37053 + atomic_long_unchecked_t vdata_alloc;
37054 + atomic_long_unchecked_t vdata_free;
37055 + atomic_long_unchecked_t gts_alloc;
37056 + atomic_long_unchecked_t gts_free;
37057 + atomic_long_unchecked_t gms_alloc;
37058 + atomic_long_unchecked_t gms_free;
37059 + atomic_long_unchecked_t gts_double_allocate;
37060 + atomic_long_unchecked_t assign_context;
37061 + atomic_long_unchecked_t assign_context_failed;
37062 + atomic_long_unchecked_t free_context;
37063 + atomic_long_unchecked_t load_user_context;
37064 + atomic_long_unchecked_t load_kernel_context;
37065 + atomic_long_unchecked_t lock_kernel_context;
37066 + atomic_long_unchecked_t unlock_kernel_context;
37067 + atomic_long_unchecked_t steal_user_context;
37068 + atomic_long_unchecked_t steal_kernel_context;
37069 + atomic_long_unchecked_t steal_context_failed;
37070 + atomic_long_unchecked_t nopfn;
37071 + atomic_long_unchecked_t asid_new;
37072 + atomic_long_unchecked_t asid_next;
37073 + atomic_long_unchecked_t asid_wrap;
37074 + atomic_long_unchecked_t asid_reuse;
37075 + atomic_long_unchecked_t intr;
37076 + atomic_long_unchecked_t intr_cbr;
37077 + atomic_long_unchecked_t intr_tfh;
37078 + atomic_long_unchecked_t intr_spurious;
37079 + atomic_long_unchecked_t intr_mm_lock_failed;
37080 + atomic_long_unchecked_t call_os;
37081 + atomic_long_unchecked_t call_os_wait_queue;
37082 + atomic_long_unchecked_t user_flush_tlb;
37083 + atomic_long_unchecked_t user_unload_context;
37084 + atomic_long_unchecked_t user_exception;
37085 + atomic_long_unchecked_t set_context_option;
37086 + atomic_long_unchecked_t check_context_retarget_intr;
37087 + atomic_long_unchecked_t check_context_unload;
37088 + atomic_long_unchecked_t tlb_dropin;
37089 + atomic_long_unchecked_t tlb_preload_page;
37090 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37091 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37092 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37093 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37094 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37095 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37096 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37097 + atomic_long_unchecked_t tfh_stale_on_fault;
37098 + atomic_long_unchecked_t mmu_invalidate_range;
37099 + atomic_long_unchecked_t mmu_invalidate_page;
37100 + atomic_long_unchecked_t flush_tlb;
37101 + atomic_long_unchecked_t flush_tlb_gru;
37102 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37103 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37104
37105 - atomic_long_t copy_gpa;
37106 - atomic_long_t read_gpa;
37107 + atomic_long_unchecked_t copy_gpa;
37108 + atomic_long_unchecked_t read_gpa;
37109
37110 - atomic_long_t mesq_receive;
37111 - atomic_long_t mesq_receive_none;
37112 - atomic_long_t mesq_send;
37113 - atomic_long_t mesq_send_failed;
37114 - atomic_long_t mesq_noop;
37115 - atomic_long_t mesq_send_unexpected_error;
37116 - atomic_long_t mesq_send_lb_overflow;
37117 - atomic_long_t mesq_send_qlimit_reached;
37118 - atomic_long_t mesq_send_amo_nacked;
37119 - atomic_long_t mesq_send_put_nacked;
37120 - atomic_long_t mesq_page_overflow;
37121 - atomic_long_t mesq_qf_locked;
37122 - atomic_long_t mesq_qf_noop_not_full;
37123 - atomic_long_t mesq_qf_switch_head_failed;
37124 - atomic_long_t mesq_qf_unexpected_error;
37125 - atomic_long_t mesq_noop_unexpected_error;
37126 - atomic_long_t mesq_noop_lb_overflow;
37127 - atomic_long_t mesq_noop_qlimit_reached;
37128 - atomic_long_t mesq_noop_amo_nacked;
37129 - atomic_long_t mesq_noop_put_nacked;
37130 - atomic_long_t mesq_noop_page_overflow;
37131 + atomic_long_unchecked_t mesq_receive;
37132 + atomic_long_unchecked_t mesq_receive_none;
37133 + atomic_long_unchecked_t mesq_send;
37134 + atomic_long_unchecked_t mesq_send_failed;
37135 + atomic_long_unchecked_t mesq_noop;
37136 + atomic_long_unchecked_t mesq_send_unexpected_error;
37137 + atomic_long_unchecked_t mesq_send_lb_overflow;
37138 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37139 + atomic_long_unchecked_t mesq_send_amo_nacked;
37140 + atomic_long_unchecked_t mesq_send_put_nacked;
37141 + atomic_long_unchecked_t mesq_page_overflow;
37142 + atomic_long_unchecked_t mesq_qf_locked;
37143 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37144 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37145 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37146 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37147 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37148 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37149 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37150 + atomic_long_unchecked_t mesq_noop_put_nacked;
37151 + atomic_long_unchecked_t mesq_noop_page_overflow;
37152
37153 };
37154
37155 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37156 tghop_invalidate, mcsop_last};
37157
37158 struct mcs_op_statistic {
37159 - atomic_long_t count;
37160 - atomic_long_t total;
37161 + atomic_long_unchecked_t count;
37162 + atomic_long_unchecked_t total;
37163 unsigned long max;
37164 };
37165
37166 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37167
37168 #define STAT(id) do { \
37169 if (gru_options & OPT_STATS) \
37170 - atomic_long_inc(&gru_stats.id); \
37171 + atomic_long_inc_unchecked(&gru_stats.id); \
37172 } while (0)
37173
37174 #ifdef CONFIG_SGI_GRU_DEBUG
37175 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37176 index c862cd4..0d176fe 100644
37177 --- a/drivers/misc/sgi-xp/xp.h
37178 +++ b/drivers/misc/sgi-xp/xp.h
37179 @@ -288,7 +288,7 @@ struct xpc_interface {
37180 xpc_notify_func, void *);
37181 void (*received) (short, int, void *);
37182 enum xp_retval (*partid_to_nasids) (short, void *);
37183 -};
37184 +} __no_const;
37185
37186 extern struct xpc_interface xpc_interface;
37187
37188 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37189 index b94d5f7..7f494c5 100644
37190 --- a/drivers/misc/sgi-xp/xpc.h
37191 +++ b/drivers/misc/sgi-xp/xpc.h
37192 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37193 void (*received_payload) (struct xpc_channel *, void *);
37194 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37195 };
37196 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37197
37198 /* struct xpc_partition act_state values (for XPC HB) */
37199
37200 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37201 /* found in xpc_main.c */
37202 extern struct device *xpc_part;
37203 extern struct device *xpc_chan;
37204 -extern struct xpc_arch_operations xpc_arch_ops;
37205 +extern xpc_arch_operations_no_const xpc_arch_ops;
37206 extern int xpc_disengage_timelimit;
37207 extern int xpc_disengage_timedout;
37208 extern int xpc_activate_IRQ_rcvd;
37209 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37210 index d971817..33bdca5 100644
37211 --- a/drivers/misc/sgi-xp/xpc_main.c
37212 +++ b/drivers/misc/sgi-xp/xpc_main.c
37213 @@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
37214 .notifier_call = xpc_system_die,
37215 };
37216
37217 -struct xpc_arch_operations xpc_arch_ops;
37218 +xpc_arch_operations_no_const xpc_arch_ops;
37219
37220 /*
37221 * Timer function to enforce the timelimit on the partition disengage.
37222 @@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
37223
37224 if (((die_args->trapnr == X86_TRAP_MF) ||
37225 (die_args->trapnr == X86_TRAP_XF)) &&
37226 - !user_mode_vm(die_args->regs))
37227 + !user_mode(die_args->regs))
37228 xpc_die_deactivate();
37229
37230 break;
37231 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
37232 index 6d8f701..35b6369 100644
37233 --- a/drivers/mmc/core/mmc_ops.c
37234 +++ b/drivers/mmc/core/mmc_ops.c
37235 @@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
37236 void *data_buf;
37237 int is_on_stack;
37238
37239 - is_on_stack = object_is_on_stack(buf);
37240 + is_on_stack = object_starts_on_stack(buf);
37241 if (is_on_stack) {
37242 /*
37243 * dma onto stack is unsafe/nonportable, but callers to this
37244 diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
37245 index 53b8fd9..615b462 100644
37246 --- a/drivers/mmc/host/dw_mmc.h
37247 +++ b/drivers/mmc/host/dw_mmc.h
37248 @@ -205,5 +205,5 @@ struct dw_mci_drv_data {
37249 int (*parse_dt)(struct dw_mci *host);
37250 int (*setup_bus)(struct dw_mci *host,
37251 struct device_node *slot_np, u8 bus_width);
37252 -};
37253 +} __do_const;
37254 #endif /* _DW_MMC_H_ */
37255 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
37256 index 82a8de1..3c56ccb 100644
37257 --- a/drivers/mmc/host/sdhci-s3c.c
37258 +++ b/drivers/mmc/host/sdhci-s3c.c
37259 @@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
37260 * we can use overriding functions instead of default.
37261 */
37262 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
37263 - sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
37264 - sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
37265 - sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
37266 + pax_open_kernel();
37267 + *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
37268 + *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
37269 + *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
37270 + pax_close_kernel();
37271 }
37272
37273 /* It supports additional host capabilities if needed */
37274 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37275 index a4eb8b5..8c0628f 100644
37276 --- a/drivers/mtd/devices/doc2000.c
37277 +++ b/drivers/mtd/devices/doc2000.c
37278 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37279
37280 /* The ECC will not be calculated correctly if less than 512 is written */
37281 /* DBB-
37282 - if (len != 0x200 && eccbuf)
37283 + if (len != 0x200)
37284 printk(KERN_WARNING
37285 "ECC needs a full sector write (adr: %lx size %lx)\n",
37286 (long) to, (long) len);
37287 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
37288 index 0c8bb6b..6f35deb 100644
37289 --- a/drivers/mtd/nand/denali.c
37290 +++ b/drivers/mtd/nand/denali.c
37291 @@ -24,6 +24,7 @@
37292 #include <linux/slab.h>
37293 #include <linux/mtd/mtd.h>
37294 #include <linux/module.h>
37295 +#include <linux/slab.h>
37296
37297 #include "denali.h"
37298
37299 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37300 index 51b9d6a..52af9a7 100644
37301 --- a/drivers/mtd/nftlmount.c
37302 +++ b/drivers/mtd/nftlmount.c
37303 @@ -24,6 +24,7 @@
37304 #include <asm/errno.h>
37305 #include <linux/delay.h>
37306 #include <linux/slab.h>
37307 +#include <linux/sched.h>
37308 #include <linux/mtd/mtd.h>
37309 #include <linux/mtd/nand.h>
37310 #include <linux/mtd/nftl.h>
37311 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
37312 index 70dba5d..11a0919 100644
37313 --- a/drivers/net/ethernet/8390/ax88796.c
37314 +++ b/drivers/net/ethernet/8390/ax88796.c
37315 @@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
37316 if (ax->plat->reg_offsets)
37317 ei_local->reg_offset = ax->plat->reg_offsets;
37318 else {
37319 + resource_size_t _mem_size = mem_size;
37320 + do_div(_mem_size, 0x18);
37321 ei_local->reg_offset = ax->reg_offsets;
37322 for (ret = 0; ret < 0x18; ret++)
37323 - ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
37324 + ax->reg_offsets[ret] = _mem_size * ret;
37325 }
37326
37327 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
37328 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
37329 index 0991534..8098e92 100644
37330 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
37331 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
37332 @@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
37333 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
37334 {
37335 /* RX_MODE controlling object */
37336 - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
37337 + bnx2x_init_rx_mode_obj(bp);
37338
37339 /* multicast configuration controlling object */
37340 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
37341 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
37342 index 09b625e..15b16fe 100644
37343 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
37344 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
37345 @@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
37346 return rc;
37347 }
37348
37349 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
37350 - struct bnx2x_rx_mode_obj *o)
37351 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
37352 {
37353 if (CHIP_IS_E1x(bp)) {
37354 - o->wait_comp = bnx2x_empty_rx_mode_wait;
37355 - o->config_rx_mode = bnx2x_set_rx_mode_e1x;
37356 + bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
37357 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
37358 } else {
37359 - o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
37360 - o->config_rx_mode = bnx2x_set_rx_mode_e2;
37361 + bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
37362 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
37363 }
37364 }
37365
37366 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
37367 index adbd91b..58ec94a 100644
37368 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
37369 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
37370 @@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
37371
37372 /********************* RX MODE ****************/
37373
37374 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
37375 - struct bnx2x_rx_mode_obj *o);
37376 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
37377
37378 /**
37379 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
37380 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
37381 index d330e81..ce1fb9a 100644
37382 --- a/drivers/net/ethernet/broadcom/tg3.h
37383 +++ b/drivers/net/ethernet/broadcom/tg3.h
37384 @@ -146,6 +146,7 @@
37385 #define CHIPREV_ID_5750_A0 0x4000
37386 #define CHIPREV_ID_5750_A1 0x4001
37387 #define CHIPREV_ID_5750_A3 0x4003
37388 +#define CHIPREV_ID_5750_C1 0x4201
37389 #define CHIPREV_ID_5750_C2 0x4202
37390 #define CHIPREV_ID_5752_A0_HW 0x5000
37391 #define CHIPREV_ID_5752_A0 0x6000
37392 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
37393 index 8cffcdf..aadf043 100644
37394 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
37395 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
37396 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37397 */
37398 struct l2t_skb_cb {
37399 arp_failure_handler_func arp_failure_handler;
37400 -};
37401 +} __no_const;
37402
37403 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37404
37405 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
37406 index 4c83003..2a2a5b9 100644
37407 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
37408 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
37409 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37410 for (i=0; i<ETH_ALEN; i++) {
37411 tmp.addr[i] = dev->dev_addr[i];
37412 }
37413 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
37414 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
37415 break;
37416
37417 case DE4X5_SET_HWADDR: /* Set the hardware address */
37418 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37419 spin_lock_irqsave(&lp->lock, flags);
37420 memcpy(&statbuf, &lp->pktStats, ioc->len);
37421 spin_unlock_irqrestore(&lp->lock, flags);
37422 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
37423 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
37424 return -EFAULT;
37425 break;
37426 }
37427 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
37428 index 4d6f3c5..6169e60 100644
37429 --- a/drivers/net/ethernet/emulex/benet/be_main.c
37430 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
37431 @@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
37432
37433 if (wrapped)
37434 newacc += 65536;
37435 - ACCESS_ONCE(*acc) = newacc;
37436 + ACCESS_ONCE_RW(*acc) = newacc;
37437 }
37438
37439 void be_parse_stats(struct be_adapter *adapter)
37440 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
37441 index 74d749e..eefb1bd 100644
37442 --- a/drivers/net/ethernet/faraday/ftgmac100.c
37443 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
37444 @@ -31,6 +31,8 @@
37445 #include <linux/netdevice.h>
37446 #include <linux/phy.h>
37447 #include <linux/platform_device.h>
37448 +#include <linux/interrupt.h>
37449 +#include <linux/irqreturn.h>
37450 #include <net/ip.h>
37451
37452 #include "ftgmac100.h"
37453 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
37454 index b901a01..1ff32ee 100644
37455 --- a/drivers/net/ethernet/faraday/ftmac100.c
37456 +++ b/drivers/net/ethernet/faraday/ftmac100.c
37457 @@ -31,6 +31,8 @@
37458 #include <linux/module.h>
37459 #include <linux/netdevice.h>
37460 #include <linux/platform_device.h>
37461 +#include <linux/interrupt.h>
37462 +#include <linux/irqreturn.h>
37463
37464 #include "ftmac100.h"
37465
37466 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
37467 index bb9256a..56d8752 100644
37468 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
37469 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
37470 @@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
37471 }
37472
37473 /* update the base incval used to calculate frequency adjustment */
37474 - ACCESS_ONCE(adapter->base_incval) = incval;
37475 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
37476 smp_mb();
37477
37478 /* need lock to prevent incorrect read while modifying cyclecounter */
37479 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
37480 index fbe5363..266b4e3 100644
37481 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
37482 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
37483 @@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
37484 struct __vxge_hw_fifo *fifo;
37485 struct vxge_hw_fifo_config *config;
37486 u32 txdl_size, txdl_per_memblock;
37487 - struct vxge_hw_mempool_cbs fifo_mp_callback;
37488 + static struct vxge_hw_mempool_cbs fifo_mp_callback = {
37489 + .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
37490 + };
37491 +
37492 struct __vxge_hw_virtualpath *vpath;
37493
37494 if ((vp == NULL) || (attr == NULL)) {
37495 @@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
37496 goto exit;
37497 }
37498
37499 - fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
37500 -
37501 fifo->mempool =
37502 __vxge_hw_mempool_create(vpath->hldev,
37503 fifo->config->memblock_size,
37504 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
37505 index 998974f..ecd26db 100644
37506 --- a/drivers/net/ethernet/realtek/r8169.c
37507 +++ b/drivers/net/ethernet/realtek/r8169.c
37508 @@ -741,22 +741,22 @@ struct rtl8169_private {
37509 struct mdio_ops {
37510 void (*write)(struct rtl8169_private *, int, int);
37511 int (*read)(struct rtl8169_private *, int);
37512 - } mdio_ops;
37513 + } __no_const mdio_ops;
37514
37515 struct pll_power_ops {
37516 void (*down)(struct rtl8169_private *);
37517 void (*up)(struct rtl8169_private *);
37518 - } pll_power_ops;
37519 + } __no_const pll_power_ops;
37520
37521 struct jumbo_ops {
37522 void (*enable)(struct rtl8169_private *);
37523 void (*disable)(struct rtl8169_private *);
37524 - } jumbo_ops;
37525 + } __no_const jumbo_ops;
37526
37527 struct csi_ops {
37528 void (*write)(struct rtl8169_private *, int, int);
37529 u32 (*read)(struct rtl8169_private *, int);
37530 - } csi_ops;
37531 + } __no_const csi_ops;
37532
37533 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
37534 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
37535 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
37536 index 0767043f..08c2553 100644
37537 --- a/drivers/net/ethernet/sfc/ptp.c
37538 +++ b/drivers/net/ethernet/sfc/ptp.c
37539 @@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
37540 (u32)((u64)ptp->start.dma_addr >> 32));
37541
37542 /* Clear flag that signals MC ready */
37543 - ACCESS_ONCE(*start) = 0;
37544 + ACCESS_ONCE_RW(*start) = 0;
37545 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
37546 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
37547
37548 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
37549 index 0c74a70..3bc6f68 100644
37550 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
37551 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
37552 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
37553
37554 writel(value, ioaddr + MMC_CNTRL);
37555
37556 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
37557 - MMC_CNTRL, value);
37558 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
37559 +// MMC_CNTRL, value);
37560 }
37561
37562 /* To mask all all interrupts.*/
37563 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
37564 index e6fe0d8..2b7d752 100644
37565 --- a/drivers/net/hyperv/hyperv_net.h
37566 +++ b/drivers/net/hyperv/hyperv_net.h
37567 @@ -101,7 +101,7 @@ struct rndis_device {
37568
37569 enum rndis_device_state state;
37570 bool link_state;
37571 - atomic_t new_req_id;
37572 + atomic_unchecked_t new_req_id;
37573
37574 spinlock_t request_lock;
37575 struct list_head req_list;
37576 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
37577 index 2b657d4..9903bc0 100644
37578 --- a/drivers/net/hyperv/rndis_filter.c
37579 +++ b/drivers/net/hyperv/rndis_filter.c
37580 @@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
37581 * template
37582 */
37583 set = &rndis_msg->msg.set_req;
37584 - set->req_id = atomic_inc_return(&dev->new_req_id);
37585 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
37586
37587 /* Add to the request list */
37588 spin_lock_irqsave(&dev->request_lock, flags);
37589 @@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
37590
37591 /* Setup the rndis set */
37592 halt = &request->request_msg.msg.halt_req;
37593 - halt->req_id = atomic_inc_return(&dev->new_req_id);
37594 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
37595
37596 /* Ignore return since this msg is optional. */
37597 rndis_filter_send_request(dev, request);
37598 diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
37599 index 1e9cb0b..7839125 100644
37600 --- a/drivers/net/ieee802154/fakehard.c
37601 +++ b/drivers/net/ieee802154/fakehard.c
37602 @@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
37603 phy->transmit_power = 0xbf;
37604
37605 dev->netdev_ops = &fake_ops;
37606 - dev->ml_priv = &fake_mlme;
37607 + dev->ml_priv = (void *)&fake_mlme;
37608
37609 priv = netdev_priv(dev);
37610 priv->phy = phy;
37611 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
37612 index d3fb97d..e229d3e 100644
37613 --- a/drivers/net/macvlan.c
37614 +++ b/drivers/net/macvlan.c
37615 @@ -913,7 +913,7 @@ static int macvlan_device_event(struct notifier_block *unused,
37616 return NOTIFY_DONE;
37617 }
37618
37619 -static struct notifier_block macvlan_notifier_block __read_mostly = {
37620 +static struct notifier_block macvlan_notifier_block = {
37621 .notifier_call = macvlan_device_event,
37622 };
37623
37624 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
37625 index 0f0f9ce..0ca5819 100644
37626 --- a/drivers/net/macvtap.c
37627 +++ b/drivers/net/macvtap.c
37628 @@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
37629 return NOTIFY_DONE;
37630 }
37631
37632 -static struct notifier_block macvtap_notifier_block __read_mostly = {
37633 +static struct notifier_block macvtap_notifier_block = {
37634 .notifier_call = macvtap_device_event,
37635 };
37636
37637 diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
37638 index daec9b0..6428fcb 100644
37639 --- a/drivers/net/phy/mdio-bitbang.c
37640 +++ b/drivers/net/phy/mdio-bitbang.c
37641 @@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
37642 struct mdiobb_ctrl *ctrl = bus->priv;
37643
37644 module_put(ctrl->ops->owner);
37645 + mdiobus_unregister(bus);
37646 mdiobus_free(bus);
37647 }
37648 EXPORT_SYMBOL(free_mdio_bitbang);
37649 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
37650 index 508570e..f706dc7 100644
37651 --- a/drivers/net/ppp/ppp_generic.c
37652 +++ b/drivers/net/ppp/ppp_generic.c
37653 @@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37654 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
37655 struct ppp_stats stats;
37656 struct ppp_comp_stats cstats;
37657 - char *vers;
37658
37659 switch (cmd) {
37660 case SIOCGPPPSTATS:
37661 @@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37662 break;
37663
37664 case SIOCGPPPVER:
37665 - vers = PPP_VERSION;
37666 - if (copy_to_user(addr, vers, strlen(vers) + 1))
37667 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
37668 break;
37669 err = 0;
37670 break;
37671 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
37672 index ad86660..9fd0884 100644
37673 --- a/drivers/net/team/team.c
37674 +++ b/drivers/net/team/team.c
37675 @@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
37676 return NOTIFY_DONE;
37677 }
37678
37679 -static struct notifier_block team_notifier_block __read_mostly = {
37680 +static struct notifier_block team_notifier_block = {
37681 .notifier_call = team_device_event,
37682 };
37683
37684 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
37685 index 2917a86..edd463f 100644
37686 --- a/drivers/net/tun.c
37687 +++ b/drivers/net/tun.c
37688 @@ -1836,7 +1836,7 @@ unlock:
37689 }
37690
37691 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
37692 - unsigned long arg, int ifreq_len)
37693 + unsigned long arg, size_t ifreq_len)
37694 {
37695 struct tun_file *tfile = file->private_data;
37696 struct tun_struct *tun;
37697 @@ -1848,6 +1848,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
37698 int vnet_hdr_sz;
37699 int ret;
37700
37701 + if (ifreq_len > sizeof ifr)
37702 + return -EFAULT;
37703 +
37704 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
37705 if (copy_from_user(&ifr, argp, ifreq_len))
37706 return -EFAULT;
37707 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
37708 index cd8ccb2..cff5144 100644
37709 --- a/drivers/net/usb/hso.c
37710 +++ b/drivers/net/usb/hso.c
37711 @@ -71,7 +71,7 @@
37712 #include <asm/byteorder.h>
37713 #include <linux/serial_core.h>
37714 #include <linux/serial.h>
37715 -
37716 +#include <asm/local.h>
37717
37718 #define MOD_AUTHOR "Option Wireless"
37719 #define MOD_DESCRIPTION "USB High Speed Option driver"
37720 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
37721 struct urb *urb;
37722
37723 urb = serial->rx_urb[0];
37724 - if (serial->port.count > 0) {
37725 + if (atomic_read(&serial->port.count) > 0) {
37726 count = put_rxbuf_data(urb, serial);
37727 if (count == -1)
37728 return;
37729 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
37730 DUMP1(urb->transfer_buffer, urb->actual_length);
37731
37732 /* Anyone listening? */
37733 - if (serial->port.count == 0)
37734 + if (atomic_read(&serial->port.count) == 0)
37735 return;
37736
37737 if (status == 0) {
37738 @@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
37739 tty_port_tty_set(&serial->port, tty);
37740
37741 /* check for port already opened, if not set the termios */
37742 - serial->port.count++;
37743 - if (serial->port.count == 1) {
37744 + if (atomic_inc_return(&serial->port.count) == 1) {
37745 serial->rx_state = RX_IDLE;
37746 /* Force default termio settings */
37747 _hso_serial_set_termios(tty, NULL);
37748 @@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
37749 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
37750 if (result) {
37751 hso_stop_serial_device(serial->parent);
37752 - serial->port.count--;
37753 + atomic_dec(&serial->port.count);
37754 kref_put(&serial->parent->ref, hso_serial_ref_free);
37755 }
37756 } else {
37757 @@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
37758
37759 /* reset the rts and dtr */
37760 /* do the actual close */
37761 - serial->port.count--;
37762 + atomic_dec(&serial->port.count);
37763
37764 - if (serial->port.count <= 0) {
37765 - serial->port.count = 0;
37766 + if (atomic_read(&serial->port.count) <= 0) {
37767 + atomic_set(&serial->port.count, 0);
37768 tty_port_tty_set(&serial->port, NULL);
37769 if (!usb_gone)
37770 hso_stop_serial_device(serial->parent);
37771 @@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
37772
37773 /* the actual setup */
37774 spin_lock_irqsave(&serial->serial_lock, flags);
37775 - if (serial->port.count)
37776 + if (atomic_read(&serial->port.count))
37777 _hso_serial_set_termios(tty, old);
37778 else
37779 tty->termios = *old;
37780 @@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
37781 D1("Pending read interrupt on port %d\n", i);
37782 spin_lock(&serial->serial_lock);
37783 if (serial->rx_state == RX_IDLE &&
37784 - serial->port.count > 0) {
37785 + atomic_read(&serial->port.count) > 0) {
37786 /* Setup and send a ctrl req read on
37787 * port i */
37788 if (!serial->rx_urb_filled[0]) {
37789 @@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
37790 /* Start all serial ports */
37791 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
37792 if (serial_table[i] && (serial_table[i]->interface == iface)) {
37793 - if (dev2ser(serial_table[i])->port.count) {
37794 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
37795 result =
37796 hso_start_serial_device(serial_table[i], GFP_NOIO);
37797 hso_kick_transmit(dev2ser(serial_table[i]));
37798 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
37799 index 8d78253..bebbb68 100644
37800 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
37801 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
37802 @@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37803 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
37804 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
37805
37806 - ACCESS_ONCE(ads->ds_link) = i->link;
37807 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
37808 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
37809 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
37810
37811 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
37812 ctl6 = SM(i->keytype, AR_EncrType);
37813 @@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37814
37815 if ((i->is_first || i->is_last) &&
37816 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
37817 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
37818 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
37819 | set11nTries(i->rates, 1)
37820 | set11nTries(i->rates, 2)
37821 | set11nTries(i->rates, 3)
37822 | (i->dur_update ? AR_DurUpdateEna : 0)
37823 | SM(0, AR_BurstDur);
37824
37825 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
37826 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
37827 | set11nRate(i->rates, 1)
37828 | set11nRate(i->rates, 2)
37829 | set11nRate(i->rates, 3);
37830 } else {
37831 - ACCESS_ONCE(ads->ds_ctl2) = 0;
37832 - ACCESS_ONCE(ads->ds_ctl3) = 0;
37833 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
37834 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
37835 }
37836
37837 if (!i->is_first) {
37838 - ACCESS_ONCE(ads->ds_ctl0) = 0;
37839 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
37840 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
37841 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
37842 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
37843 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
37844 return;
37845 }
37846
37847 @@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37848 break;
37849 }
37850
37851 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
37852 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
37853 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
37854 | SM(i->txpower, AR_XmitPower)
37855 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
37856 @@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37857 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
37858 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
37859
37860 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
37861 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
37862 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
37863 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
37864
37865 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
37866 return;
37867
37868 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
37869 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
37870 | set11nPktDurRTSCTS(i->rates, 1);
37871
37872 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
37873 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
37874 | set11nPktDurRTSCTS(i->rates, 3);
37875
37876 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
37877 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
37878 | set11nRateFlags(i->rates, 1)
37879 | set11nRateFlags(i->rates, 2)
37880 | set11nRateFlags(i->rates, 3)
37881 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
37882 index 301bf72..3f5654f 100644
37883 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
37884 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
37885 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37886 (i->qcu << AR_TxQcuNum_S) | desc_len;
37887
37888 checksum += val;
37889 - ACCESS_ONCE(ads->info) = val;
37890 + ACCESS_ONCE_RW(ads->info) = val;
37891
37892 checksum += i->link;
37893 - ACCESS_ONCE(ads->link) = i->link;
37894 + ACCESS_ONCE_RW(ads->link) = i->link;
37895
37896 checksum += i->buf_addr[0];
37897 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
37898 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
37899 checksum += i->buf_addr[1];
37900 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
37901 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
37902 checksum += i->buf_addr[2];
37903 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
37904 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
37905 checksum += i->buf_addr[3];
37906 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
37907 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
37908
37909 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
37910 - ACCESS_ONCE(ads->ctl3) = val;
37911 + ACCESS_ONCE_RW(ads->ctl3) = val;
37912 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
37913 - ACCESS_ONCE(ads->ctl5) = val;
37914 + ACCESS_ONCE_RW(ads->ctl5) = val;
37915 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
37916 - ACCESS_ONCE(ads->ctl7) = val;
37917 + ACCESS_ONCE_RW(ads->ctl7) = val;
37918 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
37919 - ACCESS_ONCE(ads->ctl9) = val;
37920 + ACCESS_ONCE_RW(ads->ctl9) = val;
37921
37922 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
37923 - ACCESS_ONCE(ads->ctl10) = checksum;
37924 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
37925
37926 if (i->is_first || i->is_last) {
37927 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
37928 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
37929 | set11nTries(i->rates, 1)
37930 | set11nTries(i->rates, 2)
37931 | set11nTries(i->rates, 3)
37932 | (i->dur_update ? AR_DurUpdateEna : 0)
37933 | SM(0, AR_BurstDur);
37934
37935 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
37936 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
37937 | set11nRate(i->rates, 1)
37938 | set11nRate(i->rates, 2)
37939 | set11nRate(i->rates, 3);
37940 } else {
37941 - ACCESS_ONCE(ads->ctl13) = 0;
37942 - ACCESS_ONCE(ads->ctl14) = 0;
37943 + ACCESS_ONCE_RW(ads->ctl13) = 0;
37944 + ACCESS_ONCE_RW(ads->ctl14) = 0;
37945 }
37946
37947 ads->ctl20 = 0;
37948 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37949
37950 ctl17 = SM(i->keytype, AR_EncrType);
37951 if (!i->is_first) {
37952 - ACCESS_ONCE(ads->ctl11) = 0;
37953 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
37954 - ACCESS_ONCE(ads->ctl15) = 0;
37955 - ACCESS_ONCE(ads->ctl16) = 0;
37956 - ACCESS_ONCE(ads->ctl17) = ctl17;
37957 - ACCESS_ONCE(ads->ctl18) = 0;
37958 - ACCESS_ONCE(ads->ctl19) = 0;
37959 + ACCESS_ONCE_RW(ads->ctl11) = 0;
37960 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
37961 + ACCESS_ONCE_RW(ads->ctl15) = 0;
37962 + ACCESS_ONCE_RW(ads->ctl16) = 0;
37963 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
37964 + ACCESS_ONCE_RW(ads->ctl18) = 0;
37965 + ACCESS_ONCE_RW(ads->ctl19) = 0;
37966 return;
37967 }
37968
37969 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
37970 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
37971 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
37972 | SM(i->txpower, AR_XmitPower)
37973 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
37974 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37975 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
37976 ctl12 |= SM(val, AR_PAPRDChainMask);
37977
37978 - ACCESS_ONCE(ads->ctl12) = ctl12;
37979 - ACCESS_ONCE(ads->ctl17) = ctl17;
37980 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
37981 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
37982
37983 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
37984 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
37985 | set11nPktDurRTSCTS(i->rates, 1);
37986
37987 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
37988 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
37989 | set11nPktDurRTSCTS(i->rates, 3);
37990
37991 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
37992 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
37993 | set11nRateFlags(i->rates, 1)
37994 | set11nRateFlags(i->rates, 2)
37995 | set11nRateFlags(i->rates, 3)
37996 | SM(i->rtscts_rate, AR_RTSCTSRate);
37997
37998 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
37999 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
38000 }
38001
38002 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
38003 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
38004 index 9d26fc5..60d9f14 100644
38005 --- a/drivers/net/wireless/ath/ath9k/hw.h
38006 +++ b/drivers/net/wireless/ath/ath9k/hw.h
38007 @@ -658,7 +658,7 @@ struct ath_hw_private_ops {
38008
38009 /* ANI */
38010 void (*ani_cache_ini_regs)(struct ath_hw *ah);
38011 -};
38012 +} __no_const;
38013
38014 /**
38015 * struct ath_hw_ops - callbacks used by hardware code and driver code
38016 @@ -688,7 +688,7 @@ struct ath_hw_ops {
38017 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
38018 struct ath_hw_antcomb_conf *antconf);
38019 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
38020 -};
38021 +} __no_const;
38022
38023 struct ath_nf_limits {
38024 s16 max;
38025 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
38026 index 3726cd6..b655808 100644
38027 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
38028 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
38029 @@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
38030 */
38031 if (il3945_mod_params.disable_hw_scan) {
38032 D_INFO("Disabling hw_scan\n");
38033 - il3945_mac_ops.hw_scan = NULL;
38034 + pax_open_kernel();
38035 + *(void **)&il3945_mac_ops.hw_scan = NULL;
38036 + pax_close_kernel();
38037 }
38038
38039 D_INFO("*** LOAD DRIVER ***\n");
38040 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
38041 index 5b9533e..7733880 100644
38042 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
38043 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
38044 @@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
38045 {
38046 struct iwl_priv *priv = file->private_data;
38047 char buf[64];
38048 - int buf_size;
38049 + size_t buf_size;
38050 u32 offset, len;
38051
38052 memset(buf, 0, sizeof(buf));
38053 @@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
38054 struct iwl_priv *priv = file->private_data;
38055
38056 char buf[8];
38057 - int buf_size;
38058 + size_t buf_size;
38059 u32 reset_flag;
38060
38061 memset(buf, 0, sizeof(buf));
38062 @@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
38063 {
38064 struct iwl_priv *priv = file->private_data;
38065 char buf[8];
38066 - int buf_size;
38067 + size_t buf_size;
38068 int ht40;
38069
38070 memset(buf, 0, sizeof(buf));
38071 @@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
38072 {
38073 struct iwl_priv *priv = file->private_data;
38074 char buf[8];
38075 - int buf_size;
38076 + size_t buf_size;
38077 int value;
38078
38079 memset(buf, 0, sizeof(buf));
38080 @@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
38081 {
38082 struct iwl_priv *priv = file->private_data;
38083 char buf[8];
38084 - int buf_size;
38085 + size_t buf_size;
38086 int clear;
38087
38088 memset(buf, 0, sizeof(buf));
38089 @@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
38090 {
38091 struct iwl_priv *priv = file->private_data;
38092 char buf[8];
38093 - int buf_size;
38094 + size_t buf_size;
38095 int trace;
38096
38097 memset(buf, 0, sizeof(buf));
38098 @@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
38099 {
38100 struct iwl_priv *priv = file->private_data;
38101 char buf[8];
38102 - int buf_size;
38103 + size_t buf_size;
38104 int missed;
38105
38106 memset(buf, 0, sizeof(buf));
38107 @@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
38108
38109 struct iwl_priv *priv = file->private_data;
38110 char buf[8];
38111 - int buf_size;
38112 + size_t buf_size;
38113 int plcp;
38114
38115 memset(buf, 0, sizeof(buf));
38116 @@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
38117
38118 struct iwl_priv *priv = file->private_data;
38119 char buf[8];
38120 - int buf_size;
38121 + size_t buf_size;
38122 int flush;
38123
38124 memset(buf, 0, sizeof(buf));
38125 @@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
38126
38127 struct iwl_priv *priv = file->private_data;
38128 char buf[8];
38129 - int buf_size;
38130 + size_t buf_size;
38131 int rts;
38132
38133 if (!priv->cfg->ht_params)
38134 @@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
38135 {
38136 struct iwl_priv *priv = file->private_data;
38137 char buf[8];
38138 - int buf_size;
38139 + size_t buf_size;
38140
38141 memset(buf, 0, sizeof(buf));
38142 buf_size = min(count, sizeof(buf) - 1);
38143 @@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
38144 struct iwl_priv *priv = file->private_data;
38145 u32 event_log_flag;
38146 char buf[8];
38147 - int buf_size;
38148 + size_t buf_size;
38149
38150 /* check that the interface is up */
38151 if (!iwl_is_ready(priv))
38152 @@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
38153 struct iwl_priv *priv = file->private_data;
38154 char buf[8];
38155 u32 calib_disabled;
38156 - int buf_size;
38157 + size_t buf_size;
38158
38159 memset(buf, 0, sizeof(buf));
38160 buf_size = min(count, sizeof(buf) - 1);
38161 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
38162 index 35708b9..31f7754 100644
38163 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
38164 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
38165 @@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
38166 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
38167
38168 char buf[8];
38169 - int buf_size;
38170 + size_t buf_size;
38171 u32 reset_flag;
38172
38173 memset(buf, 0, sizeof(buf));
38174 @@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
38175 {
38176 struct iwl_trans *trans = file->private_data;
38177 char buf[8];
38178 - int buf_size;
38179 + size_t buf_size;
38180 int csr;
38181
38182 memset(buf, 0, sizeof(buf));
38183 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
38184 index ff90855..e46d223 100644
38185 --- a/drivers/net/wireless/mac80211_hwsim.c
38186 +++ b/drivers/net/wireless/mac80211_hwsim.c
38187 @@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
38188
38189 if (channels > 1) {
38190 hwsim_if_comb.num_different_channels = channels;
38191 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
38192 - mac80211_hwsim_ops.cancel_hw_scan =
38193 - mac80211_hwsim_cancel_hw_scan;
38194 - mac80211_hwsim_ops.sw_scan_start = NULL;
38195 - mac80211_hwsim_ops.sw_scan_complete = NULL;
38196 - mac80211_hwsim_ops.remain_on_channel =
38197 - mac80211_hwsim_roc;
38198 - mac80211_hwsim_ops.cancel_remain_on_channel =
38199 - mac80211_hwsim_croc;
38200 - mac80211_hwsim_ops.add_chanctx =
38201 - mac80211_hwsim_add_chanctx;
38202 - mac80211_hwsim_ops.remove_chanctx =
38203 - mac80211_hwsim_remove_chanctx;
38204 - mac80211_hwsim_ops.change_chanctx =
38205 - mac80211_hwsim_change_chanctx;
38206 - mac80211_hwsim_ops.assign_vif_chanctx =
38207 - mac80211_hwsim_assign_vif_chanctx;
38208 - mac80211_hwsim_ops.unassign_vif_chanctx =
38209 - mac80211_hwsim_unassign_vif_chanctx;
38210 + pax_open_kernel();
38211 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
38212 + *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
38213 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
38214 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
38215 + *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
38216 + *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
38217 + *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
38218 + *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
38219 + *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
38220 + *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
38221 + *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
38222 + pax_close_kernel();
38223 }
38224
38225 spin_lock_init(&hwsim_radio_lock);
38226 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
38227 index abe1d03..fb02c22 100644
38228 --- a/drivers/net/wireless/rndis_wlan.c
38229 +++ b/drivers/net/wireless/rndis_wlan.c
38230 @@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
38231
38232 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
38233
38234 - if (rts_threshold < 0 || rts_threshold > 2347)
38235 + if (rts_threshold > 2347)
38236 rts_threshold = 2347;
38237
38238 tmp = cpu_to_le32(rts_threshold);
38239 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
38240 index 0751b35..246ba3e 100644
38241 --- a/drivers/net/wireless/rt2x00/rt2x00.h
38242 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
38243 @@ -398,7 +398,7 @@ struct rt2x00_intf {
38244 * for hardware which doesn't support hardware
38245 * sequence counting.
38246 */
38247 - atomic_t seqno;
38248 + atomic_unchecked_t seqno;
38249 };
38250
38251 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
38252 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
38253 index e488b94..14b6a0c 100644
38254 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
38255 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
38256 @@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
38257 * sequence counter given by mac80211.
38258 */
38259 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
38260 - seqno = atomic_add_return(0x10, &intf->seqno);
38261 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
38262 else
38263 - seqno = atomic_read(&intf->seqno);
38264 + seqno = atomic_read_unchecked(&intf->seqno);
38265
38266 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
38267 hdr->seq_ctrl |= cpu_to_le16(seqno);
38268 diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
38269 index e57ee48..541cf6c 100644
38270 --- a/drivers/net/wireless/ti/wl1251/sdio.c
38271 +++ b/drivers/net/wireless/ti/wl1251/sdio.c
38272 @@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
38273
38274 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
38275
38276 - wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
38277 - wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
38278 + pax_open_kernel();
38279 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
38280 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
38281 + pax_close_kernel();
38282
38283 wl1251_info("using dedicated interrupt line");
38284 } else {
38285 - wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
38286 - wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
38287 + pax_open_kernel();
38288 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
38289 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
38290 + pax_close_kernel();
38291
38292 wl1251_info("using SDIO interrupt");
38293 }
38294 diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
38295 index e5f5f8f..fdf15b7 100644
38296 --- a/drivers/net/wireless/ti/wl12xx/main.c
38297 +++ b/drivers/net/wireless/ti/wl12xx/main.c
38298 @@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
38299 sizeof(wl->conf.mem));
38300
38301 /* read data preparation is only needed by wl127x */
38302 - wl->ops->prepare_read = wl127x_prepare_read;
38303 + pax_open_kernel();
38304 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
38305 + pax_close_kernel();
38306
38307 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
38308 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
38309 @@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
38310 sizeof(wl->conf.mem));
38311
38312 /* read data preparation is only needed by wl127x */
38313 - wl->ops->prepare_read = wl127x_prepare_read;
38314 + pax_open_kernel();
38315 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
38316 + pax_close_kernel();
38317
38318 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
38319 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
38320 diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
38321 index 8d8c1f8..e754844 100644
38322 --- a/drivers/net/wireless/ti/wl18xx/main.c
38323 +++ b/drivers/net/wireless/ti/wl18xx/main.c
38324 @@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
38325 }
38326
38327 if (!checksum_param) {
38328 - wl18xx_ops.set_rx_csum = NULL;
38329 - wl18xx_ops.init_vif = NULL;
38330 + pax_open_kernel();
38331 + *(void **)&wl18xx_ops.set_rx_csum = NULL;
38332 + *(void **)&wl18xx_ops.init_vif = NULL;
38333 + pax_close_kernel();
38334 }
38335
38336 /* Enable 11a Band only if we have 5G antennas */
38337 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
38338 index d93b2b6..ae50401 100644
38339 --- a/drivers/oprofile/buffer_sync.c
38340 +++ b/drivers/oprofile/buffer_sync.c
38341 @@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
38342 if (cookie == NO_COOKIE)
38343 offset = pc;
38344 if (cookie == INVALID_COOKIE) {
38345 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
38346 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
38347 offset = pc;
38348 }
38349 if (cookie != last_cookie) {
38350 @@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
38351 /* add userspace sample */
38352
38353 if (!mm) {
38354 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
38355 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
38356 return 0;
38357 }
38358
38359 cookie = lookup_dcookie(mm, s->eip, &offset);
38360
38361 if (cookie == INVALID_COOKIE) {
38362 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
38363 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
38364 return 0;
38365 }
38366
38367 @@ -552,7 +552,7 @@ void sync_buffer(int cpu)
38368 /* ignore backtraces if failed to add a sample */
38369 if (state == sb_bt_start) {
38370 state = sb_bt_ignore;
38371 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
38372 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
38373 }
38374 }
38375 release_mm(mm);
38376 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
38377 index c0cc4e7..44d4e54 100644
38378 --- a/drivers/oprofile/event_buffer.c
38379 +++ b/drivers/oprofile/event_buffer.c
38380 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
38381 }
38382
38383 if (buffer_pos == buffer_size) {
38384 - atomic_inc(&oprofile_stats.event_lost_overflow);
38385 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
38386 return;
38387 }
38388
38389 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
38390 index ed2c3ec..deda85a 100644
38391 --- a/drivers/oprofile/oprof.c
38392 +++ b/drivers/oprofile/oprof.c
38393 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
38394 if (oprofile_ops.switch_events())
38395 return;
38396
38397 - atomic_inc(&oprofile_stats.multiplex_counter);
38398 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
38399 start_switch_worker();
38400 }
38401
38402 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
38403 index 917d28e..d62d981 100644
38404 --- a/drivers/oprofile/oprofile_stats.c
38405 +++ b/drivers/oprofile/oprofile_stats.c
38406 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
38407 cpu_buf->sample_invalid_eip = 0;
38408 }
38409
38410 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
38411 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
38412 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
38413 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
38414 - atomic_set(&oprofile_stats.multiplex_counter, 0);
38415 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
38416 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
38417 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
38418 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
38419 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
38420 }
38421
38422
38423 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
38424 index 38b6fc0..b5cbfce 100644
38425 --- a/drivers/oprofile/oprofile_stats.h
38426 +++ b/drivers/oprofile/oprofile_stats.h
38427 @@ -13,11 +13,11 @@
38428 #include <linux/atomic.h>
38429
38430 struct oprofile_stat_struct {
38431 - atomic_t sample_lost_no_mm;
38432 - atomic_t sample_lost_no_mapping;
38433 - atomic_t bt_lost_no_mapping;
38434 - atomic_t event_lost_overflow;
38435 - atomic_t multiplex_counter;
38436 + atomic_unchecked_t sample_lost_no_mm;
38437 + atomic_unchecked_t sample_lost_no_mapping;
38438 + atomic_unchecked_t bt_lost_no_mapping;
38439 + atomic_unchecked_t event_lost_overflow;
38440 + atomic_unchecked_t multiplex_counter;
38441 };
38442
38443 extern struct oprofile_stat_struct oprofile_stats;
38444 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
38445 index 849357c..b83c1e0 100644
38446 --- a/drivers/oprofile/oprofilefs.c
38447 +++ b/drivers/oprofile/oprofilefs.c
38448 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
38449
38450
38451 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
38452 - char const *name, atomic_t *val)
38453 + char const *name, atomic_unchecked_t *val)
38454 {
38455 return __oprofilefs_create_file(sb, root, name,
38456 &atomic_ro_fops, 0444, val);
38457 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
38458 index 93404f7..4a313d8 100644
38459 --- a/drivers/oprofile/timer_int.c
38460 +++ b/drivers/oprofile/timer_int.c
38461 @@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
38462 return NOTIFY_OK;
38463 }
38464
38465 -static struct notifier_block __refdata oprofile_cpu_notifier = {
38466 +static struct notifier_block oprofile_cpu_notifier = {
38467 .notifier_call = oprofile_cpu_notify,
38468 };
38469
38470 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
38471 index 3f56bc0..707d642 100644
38472 --- a/drivers/parport/procfs.c
38473 +++ b/drivers/parport/procfs.c
38474 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
38475
38476 *ppos += len;
38477
38478 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
38479 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
38480 }
38481
38482 #ifdef CONFIG_PARPORT_1284
38483 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
38484
38485 *ppos += len;
38486
38487 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
38488 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
38489 }
38490 #endif /* IEEE1284.3 support. */
38491
38492 diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
38493 index a6a71c4..c91097b 100644
38494 --- a/drivers/pci/hotplug/cpcihp_generic.c
38495 +++ b/drivers/pci/hotplug/cpcihp_generic.c
38496 @@ -73,7 +73,6 @@ static u16 port;
38497 static unsigned int enum_bit;
38498 static u8 enum_mask;
38499
38500 -static struct cpci_hp_controller_ops generic_hpc_ops;
38501 static struct cpci_hp_controller generic_hpc;
38502
38503 static int __init validate_parameters(void)
38504 @@ -139,6 +138,10 @@ static int query_enum(void)
38505 return ((value & enum_mask) == enum_mask);
38506 }
38507
38508 +static struct cpci_hp_controller_ops generic_hpc_ops = {
38509 + .query_enum = query_enum,
38510 +};
38511 +
38512 static int __init cpcihp_generic_init(void)
38513 {
38514 int status;
38515 @@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
38516 pci_dev_put(dev);
38517
38518 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
38519 - generic_hpc_ops.query_enum = query_enum;
38520 generic_hpc.ops = &generic_hpc_ops;
38521
38522 status = cpci_hp_register_controller(&generic_hpc);
38523 diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
38524 index 449b4bb..257e2e8 100644
38525 --- a/drivers/pci/hotplug/cpcihp_zt5550.c
38526 +++ b/drivers/pci/hotplug/cpcihp_zt5550.c
38527 @@ -59,7 +59,6 @@
38528 /* local variables */
38529 static bool debug;
38530 static bool poll;
38531 -static struct cpci_hp_controller_ops zt5550_hpc_ops;
38532 static struct cpci_hp_controller zt5550_hpc;
38533
38534 /* Primary cPCI bus bridge device */
38535 @@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
38536 return 0;
38537 }
38538
38539 +static struct cpci_hp_controller_ops zt5550_hpc_ops = {
38540 + .query_enum = zt5550_hc_query_enum,
38541 +};
38542 +
38543 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
38544 {
38545 int status;
38546 @@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
38547 dbg("returned from zt5550_hc_config");
38548
38549 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
38550 - zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
38551 zt5550_hpc.ops = &zt5550_hpc_ops;
38552 if(!poll) {
38553 zt5550_hpc.irq = hc_dev->irq;
38554 zt5550_hpc.irq_flags = IRQF_SHARED;
38555 zt5550_hpc.dev_id = hc_dev;
38556
38557 - zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
38558 - zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
38559 - zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
38560 + pax_open_kernel();
38561 + *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
38562 + *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
38563 + *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
38564 + pax_open_kernel();
38565 } else {
38566 info("using ENUM# polling mode");
38567 }
38568 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
38569 index 76ba8a1..20ca857 100644
38570 --- a/drivers/pci/hotplug/cpqphp_nvram.c
38571 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
38572 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
38573
38574 void compaq_nvram_init (void __iomem *rom_start)
38575 {
38576 +
38577 +#ifndef CONFIG_PAX_KERNEXEC
38578 if (rom_start) {
38579 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
38580 }
38581 +#endif
38582 +
38583 dbg("int15 entry = %p\n", compaq_int15_entry_point);
38584
38585 /* initialize our int15 lock */
38586 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
38587 index 8474b6a..ee81993 100644
38588 --- a/drivers/pci/pcie/aspm.c
38589 +++ b/drivers/pci/pcie/aspm.c
38590 @@ -27,9 +27,9 @@
38591 #define MODULE_PARAM_PREFIX "pcie_aspm."
38592
38593 /* Note: those are not register definitions */
38594 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
38595 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
38596 -#define ASPM_STATE_L1 (4) /* L1 state */
38597 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
38598 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
38599 +#define ASPM_STATE_L1 (4U) /* L1 state */
38600 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
38601 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
38602
38603 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
38604 index 6186f03..1a78714 100644
38605 --- a/drivers/pci/probe.c
38606 +++ b/drivers/pci/probe.c
38607 @@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
38608 struct pci_bus_region region;
38609 bool bar_too_big = false, bar_disabled = false;
38610
38611 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
38612 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
38613
38614 /* No printks while decoding is disabled! */
38615 if (!dev->mmio_always_on) {
38616 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
38617 index 9b8505c..f00870a 100644
38618 --- a/drivers/pci/proc.c
38619 +++ b/drivers/pci/proc.c
38620 @@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
38621 static int __init pci_proc_init(void)
38622 {
38623 struct pci_dev *dev = NULL;
38624 +
38625 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
38626 +#ifdef CONFIG_GRKERNSEC_PROC_USER
38627 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
38628 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
38629 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
38630 +#endif
38631 +#else
38632 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
38633 +#endif
38634 proc_create("devices", 0, proc_bus_pci_dir,
38635 &proc_bus_pci_dev_operations);
38636 proc_initialized = 1;
38637 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
38638 index f946ca7..f25c833 100644
38639 --- a/drivers/platform/x86/thinkpad_acpi.c
38640 +++ b/drivers/platform/x86/thinkpad_acpi.c
38641 @@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
38642 return 0;
38643 }
38644
38645 -void static hotkey_mask_warn_incomplete_mask(void)
38646 +static void hotkey_mask_warn_incomplete_mask(void)
38647 {
38648 /* log only what the user can fix... */
38649 const u32 wantedmask = hotkey_driver_mask &
38650 @@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
38651 }
38652 }
38653
38654 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38655 - struct tp_nvram_state *newn,
38656 - const u32 event_mask)
38657 -{
38658 -
38659 #define TPACPI_COMPARE_KEY(__scancode, __member) \
38660 do { \
38661 if ((event_mask & (1 << __scancode)) && \
38662 @@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38663 tpacpi_hotkey_send_key(__scancode); \
38664 } while (0)
38665
38666 - void issue_volchange(const unsigned int oldvol,
38667 - const unsigned int newvol)
38668 - {
38669 - unsigned int i = oldvol;
38670 +static void issue_volchange(const unsigned int oldvol,
38671 + const unsigned int newvol,
38672 + const u32 event_mask)
38673 +{
38674 + unsigned int i = oldvol;
38675
38676 - while (i > newvol) {
38677 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
38678 - i--;
38679 - }
38680 - while (i < newvol) {
38681 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
38682 - i++;
38683 - }
38684 + while (i > newvol) {
38685 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
38686 + i--;
38687 }
38688 + while (i < newvol) {
38689 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
38690 + i++;
38691 + }
38692 +}
38693
38694 - void issue_brightnesschange(const unsigned int oldbrt,
38695 - const unsigned int newbrt)
38696 - {
38697 - unsigned int i = oldbrt;
38698 +static void issue_brightnesschange(const unsigned int oldbrt,
38699 + const unsigned int newbrt,
38700 + const u32 event_mask)
38701 +{
38702 + unsigned int i = oldbrt;
38703
38704 - while (i > newbrt) {
38705 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
38706 - i--;
38707 - }
38708 - while (i < newbrt) {
38709 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
38710 - i++;
38711 - }
38712 + while (i > newbrt) {
38713 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
38714 + i--;
38715 + }
38716 + while (i < newbrt) {
38717 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
38718 + i++;
38719 }
38720 +}
38721
38722 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38723 + struct tp_nvram_state *newn,
38724 + const u32 event_mask)
38725 +{
38726 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
38727 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
38728 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
38729 @@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38730 oldn->volume_level != newn->volume_level) {
38731 /* recently muted, or repeated mute keypress, or
38732 * multiple presses ending in mute */
38733 - issue_volchange(oldn->volume_level, newn->volume_level);
38734 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
38735 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
38736 }
38737 } else {
38738 @@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38739 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
38740 }
38741 if (oldn->volume_level != newn->volume_level) {
38742 - issue_volchange(oldn->volume_level, newn->volume_level);
38743 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
38744 } else if (oldn->volume_toggle != newn->volume_toggle) {
38745 /* repeated vol up/down keypress at end of scale ? */
38746 if (newn->volume_level == 0)
38747 @@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38748 /* handle brightness */
38749 if (oldn->brightness_level != newn->brightness_level) {
38750 issue_brightnesschange(oldn->brightness_level,
38751 - newn->brightness_level);
38752 + newn->brightness_level,
38753 + event_mask);
38754 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
38755 /* repeated key presses that didn't change state */
38756 if (newn->brightness_level == 0)
38757 @@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
38758 && !tp_features.bright_unkfw)
38759 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
38760 }
38761 +}
38762
38763 #undef TPACPI_COMPARE_KEY
38764 #undef TPACPI_MAY_SEND_KEY
38765 -}
38766
38767 /*
38768 * Polling driver
38769 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
38770 index 769d265..a3a05ca 100644
38771 --- a/drivers/pnp/pnpbios/bioscalls.c
38772 +++ b/drivers/pnp/pnpbios/bioscalls.c
38773 @@ -58,7 +58,7 @@ do { \
38774 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
38775 } while(0)
38776
38777 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
38778 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
38779 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
38780
38781 /*
38782 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
38783
38784 cpu = get_cpu();
38785 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
38786 +
38787 + pax_open_kernel();
38788 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
38789 + pax_close_kernel();
38790
38791 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
38792 spin_lock_irqsave(&pnp_bios_lock, flags);
38793 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
38794 :"memory");
38795 spin_unlock_irqrestore(&pnp_bios_lock, flags);
38796
38797 + pax_open_kernel();
38798 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
38799 + pax_close_kernel();
38800 +
38801 put_cpu();
38802
38803 /* If we get here and this is set then the PnP BIOS faulted on us. */
38804 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
38805 return status;
38806 }
38807
38808 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
38809 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
38810 {
38811 int i;
38812
38813 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
38814 pnp_bios_callpoint.offset = header->fields.pm16offset;
38815 pnp_bios_callpoint.segment = PNP_CS16;
38816
38817 + pax_open_kernel();
38818 +
38819 for_each_possible_cpu(i) {
38820 struct desc_struct *gdt = get_cpu_gdt_table(i);
38821 if (!gdt)
38822 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
38823 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
38824 (unsigned long)__va(header->fields.pm16dseg));
38825 }
38826 +
38827 + pax_close_kernel();
38828 }
38829 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
38830 index 3e6db1c..1fbbdae 100644
38831 --- a/drivers/pnp/resource.c
38832 +++ b/drivers/pnp/resource.c
38833 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
38834 return 1;
38835
38836 /* check if the resource is valid */
38837 - if (*irq < 0 || *irq > 15)
38838 + if (*irq > 15)
38839 return 0;
38840
38841 /* check if the resource is reserved */
38842 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
38843 return 1;
38844
38845 /* check if the resource is valid */
38846 - if (*dma < 0 || *dma == 4 || *dma > 7)
38847 + if (*dma == 4 || *dma > 7)
38848 return 0;
38849
38850 /* check if the resource is reserved */
38851 diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
38852 index 7df7c5f..bd48c47 100644
38853 --- a/drivers/power/pda_power.c
38854 +++ b/drivers/power/pda_power.c
38855 @@ -37,7 +37,11 @@ static int polling;
38856
38857 #ifdef CONFIG_USB_OTG_UTILS
38858 static struct usb_phy *transceiver;
38859 -static struct notifier_block otg_nb;
38860 +static int otg_handle_notification(struct notifier_block *nb,
38861 + unsigned long event, void *unused);
38862 +static struct notifier_block otg_nb = {
38863 + .notifier_call = otg_handle_notification
38864 +};
38865 #endif
38866
38867 static struct regulator *ac_draw;
38868 @@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
38869
38870 #ifdef CONFIG_USB_OTG_UTILS
38871 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
38872 - otg_nb.notifier_call = otg_handle_notification;
38873 ret = usb_register_notifier(transceiver, &otg_nb);
38874 if (ret) {
38875 dev_err(dev, "failure to register otg notifier\n");
38876 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
38877 index 4d7c635..9860196 100644
38878 --- a/drivers/regulator/max8660.c
38879 +++ b/drivers/regulator/max8660.c
38880 @@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
38881 max8660->shadow_regs[MAX8660_OVER1] = 5;
38882 } else {
38883 /* Otherwise devices can be toggled via software */
38884 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
38885 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
38886 + pax_open_kernel();
38887 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
38888 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
38889 + pax_close_kernel();
38890 }
38891
38892 /*
38893 diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
38894 index 9a8ea91..c483dd9 100644
38895 --- a/drivers/regulator/max8973-regulator.c
38896 +++ b/drivers/regulator/max8973-regulator.c
38897 @@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
38898 if (!pdata->enable_ext_control) {
38899 max->desc.enable_reg = MAX8973_VOUT;
38900 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
38901 - max8973_dcdc_ops.enable = regulator_enable_regmap;
38902 - max8973_dcdc_ops.disable = regulator_disable_regmap;
38903 - max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
38904 + pax_open_kernel();
38905 + *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
38906 + *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
38907 + *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
38908 + pax_close_kernel();
38909 }
38910
38911 max->enable_external_control = pdata->enable_ext_control;
38912 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
38913 index 0d84b1f..c2da6ac 100644
38914 --- a/drivers/regulator/mc13892-regulator.c
38915 +++ b/drivers/regulator/mc13892-regulator.c
38916 @@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
38917 }
38918 mc13xxx_unlock(mc13892);
38919
38920 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
38921 + pax_open_kernel();
38922 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
38923 = mc13892_vcam_set_mode;
38924 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
38925 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
38926 = mc13892_vcam_get_mode;
38927 + pax_close_kernel();
38928
38929 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
38930 ARRAY_SIZE(mc13892_regulators));
38931 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
38932 index 9a86b4b..3a383dc 100644
38933 --- a/drivers/rtc/rtc-dev.c
38934 +++ b/drivers/rtc/rtc-dev.c
38935 @@ -14,6 +14,7 @@
38936 #include <linux/module.h>
38937 #include <linux/rtc.h>
38938 #include <linux/sched.h>
38939 +#include <linux/grsecurity.h>
38940 #include "rtc-core.h"
38941
38942 static dev_t rtc_devt;
38943 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
38944 if (copy_from_user(&tm, uarg, sizeof(tm)))
38945 return -EFAULT;
38946
38947 + gr_log_timechange();
38948 +
38949 return rtc_set_time(rtc, &tm);
38950
38951 case RTC_PIE_ON:
38952 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
38953 index 4ad7e36..d004679 100644
38954 --- a/drivers/scsi/bfa/bfa.h
38955 +++ b/drivers/scsi/bfa/bfa.h
38956 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
38957 u32 *end);
38958 int cpe_vec_q0;
38959 int rme_vec_q0;
38960 -};
38961 +} __no_const;
38962 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
38963
38964 struct bfa_faa_cbfn_s {
38965 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
38966 index e693af6..2e525b6 100644
38967 --- a/drivers/scsi/bfa/bfa_fcpim.h
38968 +++ b/drivers/scsi/bfa/bfa_fcpim.h
38969 @@ -36,7 +36,7 @@ struct bfa_iotag_s {
38970
38971 struct bfa_itn_s {
38972 bfa_isr_func_t isr;
38973 -};
38974 +} __no_const;
38975
38976 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
38977 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
38978 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
38979 index 23a90e7..9cf04ee 100644
38980 --- a/drivers/scsi/bfa/bfa_ioc.h
38981 +++ b/drivers/scsi/bfa/bfa_ioc.h
38982 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
38983 bfa_ioc_disable_cbfn_t disable_cbfn;
38984 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
38985 bfa_ioc_reset_cbfn_t reset_cbfn;
38986 -};
38987 +} __no_const;
38988
38989 /*
38990 * IOC event notification mechanism.
38991 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
38992 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
38993 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
38994 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
38995 -};
38996 +} __no_const;
38997
38998 /*
38999 * Queue element to wait for room in request queue. FIFO order is
39000 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
39001 index 593085a..47aa999 100644
39002 --- a/drivers/scsi/hosts.c
39003 +++ b/drivers/scsi/hosts.c
39004 @@ -42,7 +42,7 @@
39005 #include "scsi_logging.h"
39006
39007
39008 -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
39009 +static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
39010
39011
39012 static void scsi_host_cls_release(struct device *dev)
39013 @@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
39014 * subtract one because we increment first then return, but we need to
39015 * know what the next host number was before increment
39016 */
39017 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
39018 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
39019 shost->dma_channel = 0xff;
39020
39021 /* These three are default values which can be overridden */
39022 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
39023 index 4f33806..afd6f60 100644
39024 --- a/drivers/scsi/hpsa.c
39025 +++ b/drivers/scsi/hpsa.c
39026 @@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
39027 unsigned long flags;
39028
39029 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39030 - return h->access.command_completed(h, q);
39031 + return h->access->command_completed(h, q);
39032
39033 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
39034 a = rq->head[rq->current_entry];
39035 @@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
39036 while (!list_empty(&h->reqQ)) {
39037 c = list_entry(h->reqQ.next, struct CommandList, list);
39038 /* can't do anything if fifo is full */
39039 - if ((h->access.fifo_full(h))) {
39040 + if ((h->access->fifo_full(h))) {
39041 dev_warn(&h->pdev->dev, "fifo full\n");
39042 break;
39043 }
39044 @@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
39045
39046 /* Tell the controller execute command */
39047 spin_unlock_irqrestore(&h->lock, flags);
39048 - h->access.submit_command(h, c);
39049 + h->access->submit_command(h, c);
39050 spin_lock_irqsave(&h->lock, flags);
39051 }
39052 spin_unlock_irqrestore(&h->lock, flags);
39053 @@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
39054
39055 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
39056 {
39057 - return h->access.command_completed(h, q);
39058 + return h->access->command_completed(h, q);
39059 }
39060
39061 static inline bool interrupt_pending(struct ctlr_info *h)
39062 {
39063 - return h->access.intr_pending(h);
39064 + return h->access->intr_pending(h);
39065 }
39066
39067 static inline long interrupt_not_for_us(struct ctlr_info *h)
39068 {
39069 - return (h->access.intr_pending(h) == 0) ||
39070 + return (h->access->intr_pending(h) == 0) ||
39071 (h->interrupts_enabled == 0);
39072 }
39073
39074 @@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
39075 if (prod_index < 0)
39076 return -ENODEV;
39077 h->product_name = products[prod_index].product_name;
39078 - h->access = *(products[prod_index].access);
39079 + h->access = products[prod_index].access;
39080
39081 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
39082 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
39083 @@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
39084
39085 assert_spin_locked(&lockup_detector_lock);
39086 remove_ctlr_from_lockup_detector_list(h);
39087 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
39088 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
39089 spin_lock_irqsave(&h->lock, flags);
39090 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
39091 spin_unlock_irqrestore(&h->lock, flags);
39092 @@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
39093 }
39094
39095 /* make sure the board interrupts are off */
39096 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
39097 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
39098
39099 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
39100 goto clean2;
39101 @@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
39102 * fake ones to scoop up any residual completions.
39103 */
39104 spin_lock_irqsave(&h->lock, flags);
39105 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
39106 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
39107 spin_unlock_irqrestore(&h->lock, flags);
39108 free_irqs(h);
39109 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
39110 @@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
39111 dev_info(&h->pdev->dev, "Board READY.\n");
39112 dev_info(&h->pdev->dev,
39113 "Waiting for stale completions to drain.\n");
39114 - h->access.set_intr_mask(h, HPSA_INTR_ON);
39115 + h->access->set_intr_mask(h, HPSA_INTR_ON);
39116 msleep(10000);
39117 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
39118 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
39119
39120 rc = controller_reset_failed(h->cfgtable);
39121 if (rc)
39122 @@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
39123 }
39124
39125 /* Turn the interrupts on so we can service requests */
39126 - h->access.set_intr_mask(h, HPSA_INTR_ON);
39127 + h->access->set_intr_mask(h, HPSA_INTR_ON);
39128
39129 hpsa_hba_inquiry(h);
39130 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
39131 @@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
39132 * To write all data in the battery backed cache to disks
39133 */
39134 hpsa_flush_cache(h);
39135 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
39136 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
39137 hpsa_free_irqs_and_disable_msix(h);
39138 }
39139
39140 @@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
39141 return;
39142 }
39143 /* Change the access methods to the performant access methods */
39144 - h->access = SA5_performant_access;
39145 + h->access = &SA5_performant_access;
39146 h->transMethod = CFGTBL_Trans_Performant;
39147 }
39148
39149 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
39150 index 9816479..c5d4e97 100644
39151 --- a/drivers/scsi/hpsa.h
39152 +++ b/drivers/scsi/hpsa.h
39153 @@ -79,7 +79,7 @@ struct ctlr_info {
39154 unsigned int msix_vector;
39155 unsigned int msi_vector;
39156 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
39157 - struct access_method access;
39158 + struct access_method *access;
39159
39160 /* queue and queue Info */
39161 struct list_head reqQ;
39162 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
39163 index c772d8d..35c362c 100644
39164 --- a/drivers/scsi/libfc/fc_exch.c
39165 +++ b/drivers/scsi/libfc/fc_exch.c
39166 @@ -100,12 +100,12 @@ struct fc_exch_mgr {
39167 u16 pool_max_index;
39168
39169 struct {
39170 - atomic_t no_free_exch;
39171 - atomic_t no_free_exch_xid;
39172 - atomic_t xid_not_found;
39173 - atomic_t xid_busy;
39174 - atomic_t seq_not_found;
39175 - atomic_t non_bls_resp;
39176 + atomic_unchecked_t no_free_exch;
39177 + atomic_unchecked_t no_free_exch_xid;
39178 + atomic_unchecked_t xid_not_found;
39179 + atomic_unchecked_t xid_busy;
39180 + atomic_unchecked_t seq_not_found;
39181 + atomic_unchecked_t non_bls_resp;
39182 } stats;
39183 };
39184
39185 @@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
39186 /* allocate memory for exchange */
39187 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
39188 if (!ep) {
39189 - atomic_inc(&mp->stats.no_free_exch);
39190 + atomic_inc_unchecked(&mp->stats.no_free_exch);
39191 goto out;
39192 }
39193 memset(ep, 0, sizeof(*ep));
39194 @@ -786,7 +786,7 @@ out:
39195 return ep;
39196 err:
39197 spin_unlock_bh(&pool->lock);
39198 - atomic_inc(&mp->stats.no_free_exch_xid);
39199 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
39200 mempool_free(ep, mp->ep_pool);
39201 return NULL;
39202 }
39203 @@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
39204 xid = ntohs(fh->fh_ox_id); /* we originated exch */
39205 ep = fc_exch_find(mp, xid);
39206 if (!ep) {
39207 - atomic_inc(&mp->stats.xid_not_found);
39208 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39209 reject = FC_RJT_OX_ID;
39210 goto out;
39211 }
39212 @@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
39213 ep = fc_exch_find(mp, xid);
39214 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
39215 if (ep) {
39216 - atomic_inc(&mp->stats.xid_busy);
39217 + atomic_inc_unchecked(&mp->stats.xid_busy);
39218 reject = FC_RJT_RX_ID;
39219 goto rel;
39220 }
39221 @@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
39222 }
39223 xid = ep->xid; /* get our XID */
39224 } else if (!ep) {
39225 - atomic_inc(&mp->stats.xid_not_found);
39226 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39227 reject = FC_RJT_RX_ID; /* XID not found */
39228 goto out;
39229 }
39230 @@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
39231 } else {
39232 sp = &ep->seq;
39233 if (sp->id != fh->fh_seq_id) {
39234 - atomic_inc(&mp->stats.seq_not_found);
39235 + atomic_inc_unchecked(&mp->stats.seq_not_found);
39236 if (f_ctl & FC_FC_END_SEQ) {
39237 /*
39238 * Update sequence_id based on incoming last
39239 @@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
39240
39241 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
39242 if (!ep) {
39243 - atomic_inc(&mp->stats.xid_not_found);
39244 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39245 goto out;
39246 }
39247 if (ep->esb_stat & ESB_ST_COMPLETE) {
39248 - atomic_inc(&mp->stats.xid_not_found);
39249 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39250 goto rel;
39251 }
39252 if (ep->rxid == FC_XID_UNKNOWN)
39253 ep->rxid = ntohs(fh->fh_rx_id);
39254 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
39255 - atomic_inc(&mp->stats.xid_not_found);
39256 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39257 goto rel;
39258 }
39259 if (ep->did != ntoh24(fh->fh_s_id) &&
39260 ep->did != FC_FID_FLOGI) {
39261 - atomic_inc(&mp->stats.xid_not_found);
39262 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39263 goto rel;
39264 }
39265 sof = fr_sof(fp);
39266 @@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
39267 sp->ssb_stat |= SSB_ST_RESP;
39268 sp->id = fh->fh_seq_id;
39269 } else if (sp->id != fh->fh_seq_id) {
39270 - atomic_inc(&mp->stats.seq_not_found);
39271 + atomic_inc_unchecked(&mp->stats.seq_not_found);
39272 goto rel;
39273 }
39274
39275 @@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
39276 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
39277
39278 if (!sp)
39279 - atomic_inc(&mp->stats.xid_not_found);
39280 + atomic_inc_unchecked(&mp->stats.xid_not_found);
39281 else
39282 - atomic_inc(&mp->stats.non_bls_resp);
39283 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
39284
39285 fc_frame_free(fp);
39286 }
39287 @@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
39288
39289 list_for_each_entry(ema, &lport->ema_list, ema_list) {
39290 mp = ema->mp;
39291 - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
39292 + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
39293 st->fc_no_free_exch_xid +=
39294 - atomic_read(&mp->stats.no_free_exch_xid);
39295 - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
39296 - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
39297 - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
39298 - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
39299 + atomic_read_unchecked(&mp->stats.no_free_exch_xid);
39300 + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
39301 + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
39302 + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
39303 + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
39304 }
39305 }
39306 EXPORT_SYMBOL(fc_exch_update_stats);
39307 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
39308 index bdb81cd..d3c7c2c 100644
39309 --- a/drivers/scsi/libsas/sas_ata.c
39310 +++ b/drivers/scsi/libsas/sas_ata.c
39311 @@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
39312 .postreset = ata_std_postreset,
39313 .error_handler = ata_std_error_handler,
39314 .post_internal_cmd = sas_ata_post_internal,
39315 - .qc_defer = ata_std_qc_defer,
39316 + .qc_defer = ata_std_qc_defer,
39317 .qc_prep = ata_noop_qc_prep,
39318 .qc_issue = sas_ata_qc_issue,
39319 .qc_fill_rtf = sas_ata_qc_fill_rtf,
39320 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
39321 index df4c13a..a51e90c 100644
39322 --- a/drivers/scsi/lpfc/lpfc.h
39323 +++ b/drivers/scsi/lpfc/lpfc.h
39324 @@ -424,7 +424,7 @@ struct lpfc_vport {
39325 struct dentry *debug_nodelist;
39326 struct dentry *vport_debugfs_root;
39327 struct lpfc_debugfs_trc *disc_trc;
39328 - atomic_t disc_trc_cnt;
39329 + atomic_unchecked_t disc_trc_cnt;
39330 #endif
39331 uint8_t stat_data_enabled;
39332 uint8_t stat_data_blocked;
39333 @@ -842,8 +842,8 @@ struct lpfc_hba {
39334 struct timer_list fabric_block_timer;
39335 unsigned long bit_flags;
39336 #define FABRIC_COMANDS_BLOCKED 0
39337 - atomic_t num_rsrc_err;
39338 - atomic_t num_cmd_success;
39339 + atomic_unchecked_t num_rsrc_err;
39340 + atomic_unchecked_t num_cmd_success;
39341 unsigned long last_rsrc_error_time;
39342 unsigned long last_ramp_down_time;
39343 unsigned long last_ramp_up_time;
39344 @@ -879,7 +879,7 @@ struct lpfc_hba {
39345
39346 struct dentry *debug_slow_ring_trc;
39347 struct lpfc_debugfs_trc *slow_ring_trc;
39348 - atomic_t slow_ring_trc_cnt;
39349 + atomic_unchecked_t slow_ring_trc_cnt;
39350 /* iDiag debugfs sub-directory */
39351 struct dentry *idiag_root;
39352 struct dentry *idiag_pci_cfg;
39353 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
39354 index f63f5ff..de29189 100644
39355 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
39356 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
39357 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
39358
39359 #include <linux/debugfs.h>
39360
39361 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
39362 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
39363 static unsigned long lpfc_debugfs_start_time = 0L;
39364
39365 /* iDiag */
39366 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
39367 lpfc_debugfs_enable = 0;
39368
39369 len = 0;
39370 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
39371 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
39372 (lpfc_debugfs_max_disc_trc - 1);
39373 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
39374 dtp = vport->disc_trc + i;
39375 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
39376 lpfc_debugfs_enable = 0;
39377
39378 len = 0;
39379 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
39380 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
39381 (lpfc_debugfs_max_slow_ring_trc - 1);
39382 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
39383 dtp = phba->slow_ring_trc + i;
39384 @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
39385 !vport || !vport->disc_trc)
39386 return;
39387
39388 - index = atomic_inc_return(&vport->disc_trc_cnt) &
39389 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
39390 (lpfc_debugfs_max_disc_trc - 1);
39391 dtp = vport->disc_trc + index;
39392 dtp->fmt = fmt;
39393 dtp->data1 = data1;
39394 dtp->data2 = data2;
39395 dtp->data3 = data3;
39396 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
39397 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
39398 dtp->jif = jiffies;
39399 #endif
39400 return;
39401 @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
39402 !phba || !phba->slow_ring_trc)
39403 return;
39404
39405 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
39406 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
39407 (lpfc_debugfs_max_slow_ring_trc - 1);
39408 dtp = phba->slow_ring_trc + index;
39409 dtp->fmt = fmt;
39410 dtp->data1 = data1;
39411 dtp->data2 = data2;
39412 dtp->data3 = data3;
39413 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
39414 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
39415 dtp->jif = jiffies;
39416 #endif
39417 return;
39418 @@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
39419 "slow_ring buffer\n");
39420 goto debug_failed;
39421 }
39422 - atomic_set(&phba->slow_ring_trc_cnt, 0);
39423 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
39424 memset(phba->slow_ring_trc, 0,
39425 (sizeof(struct lpfc_debugfs_trc) *
39426 lpfc_debugfs_max_slow_ring_trc));
39427 @@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
39428 "buffer\n");
39429 goto debug_failed;
39430 }
39431 - atomic_set(&vport->disc_trc_cnt, 0);
39432 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
39433
39434 snprintf(name, sizeof(name), "discovery_trace");
39435 vport->debug_disc_trc =
39436 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
39437 index 89ad558..76956c4 100644
39438 --- a/drivers/scsi/lpfc/lpfc_init.c
39439 +++ b/drivers/scsi/lpfc/lpfc_init.c
39440 @@ -10618,8 +10618,10 @@ lpfc_init(void)
39441 "misc_register returned with status %d", error);
39442
39443 if (lpfc_enable_npiv) {
39444 - lpfc_transport_functions.vport_create = lpfc_vport_create;
39445 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
39446 + pax_open_kernel();
39447 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
39448 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
39449 + pax_close_kernel();
39450 }
39451 lpfc_transport_template =
39452 fc_attach_transport(&lpfc_transport_functions);
39453 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
39454 index 60e5a17..ff7a793 100644
39455 --- a/drivers/scsi/lpfc/lpfc_scsi.c
39456 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
39457 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
39458 uint32_t evt_posted;
39459
39460 spin_lock_irqsave(&phba->hbalock, flags);
39461 - atomic_inc(&phba->num_rsrc_err);
39462 + atomic_inc_unchecked(&phba->num_rsrc_err);
39463 phba->last_rsrc_error_time = jiffies;
39464
39465 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
39466 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
39467 unsigned long flags;
39468 struct lpfc_hba *phba = vport->phba;
39469 uint32_t evt_posted;
39470 - atomic_inc(&phba->num_cmd_success);
39471 + atomic_inc_unchecked(&phba->num_cmd_success);
39472
39473 if (vport->cfg_lun_queue_depth <= queue_depth)
39474 return;
39475 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
39476 unsigned long num_rsrc_err, num_cmd_success;
39477 int i;
39478
39479 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
39480 - num_cmd_success = atomic_read(&phba->num_cmd_success);
39481 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
39482 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
39483
39484 /*
39485 * The error and success command counters are global per
39486 @@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
39487 }
39488 }
39489 lpfc_destroy_vport_work_array(phba, vports);
39490 - atomic_set(&phba->num_rsrc_err, 0);
39491 - atomic_set(&phba->num_cmd_success, 0);
39492 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
39493 + atomic_set_unchecked(&phba->num_cmd_success, 0);
39494 }
39495
39496 /**
39497 @@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
39498 }
39499 }
39500 lpfc_destroy_vport_work_array(phba, vports);
39501 - atomic_set(&phba->num_rsrc_err, 0);
39502 - atomic_set(&phba->num_cmd_success, 0);
39503 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
39504 + atomic_set_unchecked(&phba->num_cmd_success, 0);
39505 }
39506
39507 /**
39508 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
39509 index b46f5e9..c4c4ccb 100644
39510 --- a/drivers/scsi/pmcraid.c
39511 +++ b/drivers/scsi/pmcraid.c
39512 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
39513 res->scsi_dev = scsi_dev;
39514 scsi_dev->hostdata = res;
39515 res->change_detected = 0;
39516 - atomic_set(&res->read_failures, 0);
39517 - atomic_set(&res->write_failures, 0);
39518 + atomic_set_unchecked(&res->read_failures, 0);
39519 + atomic_set_unchecked(&res->write_failures, 0);
39520 rc = 0;
39521 }
39522 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
39523 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
39524
39525 /* If this was a SCSI read/write command keep count of errors */
39526 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
39527 - atomic_inc(&res->read_failures);
39528 + atomic_inc_unchecked(&res->read_failures);
39529 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
39530 - atomic_inc(&res->write_failures);
39531 + atomic_inc_unchecked(&res->write_failures);
39532
39533 if (!RES_IS_GSCSI(res->cfg_entry) &&
39534 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
39535 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
39536 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
39537 * hrrq_id assigned here in queuecommand
39538 */
39539 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
39540 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
39541 pinstance->num_hrrq;
39542 cmd->cmd_done = pmcraid_io_done;
39543
39544 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
39545 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
39546 * hrrq_id assigned here in queuecommand
39547 */
39548 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
39549 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
39550 pinstance->num_hrrq;
39551
39552 if (request_size) {
39553 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
39554
39555 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
39556 /* add resources only after host is added into system */
39557 - if (!atomic_read(&pinstance->expose_resources))
39558 + if (!atomic_read_unchecked(&pinstance->expose_resources))
39559 return;
39560
39561 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
39562 @@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
39563 init_waitqueue_head(&pinstance->reset_wait_q);
39564
39565 atomic_set(&pinstance->outstanding_cmds, 0);
39566 - atomic_set(&pinstance->last_message_id, 0);
39567 - atomic_set(&pinstance->expose_resources, 0);
39568 + atomic_set_unchecked(&pinstance->last_message_id, 0);
39569 + atomic_set_unchecked(&pinstance->expose_resources, 0);
39570
39571 INIT_LIST_HEAD(&pinstance->free_res_q);
39572 INIT_LIST_HEAD(&pinstance->used_res_q);
39573 @@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
39574 /* Schedule worker thread to handle CCN and take care of adding and
39575 * removing devices to OS
39576 */
39577 - atomic_set(&pinstance->expose_resources, 1);
39578 + atomic_set_unchecked(&pinstance->expose_resources, 1);
39579 schedule_work(&pinstance->worker_q);
39580 return rc;
39581
39582 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
39583 index e1d150f..6c6df44 100644
39584 --- a/drivers/scsi/pmcraid.h
39585 +++ b/drivers/scsi/pmcraid.h
39586 @@ -748,7 +748,7 @@ struct pmcraid_instance {
39587 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
39588
39589 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
39590 - atomic_t last_message_id;
39591 + atomic_unchecked_t last_message_id;
39592
39593 /* configuration table */
39594 struct pmcraid_config_table *cfg_table;
39595 @@ -777,7 +777,7 @@ struct pmcraid_instance {
39596 atomic_t outstanding_cmds;
39597
39598 /* should add/delete resources to mid-layer now ?*/
39599 - atomic_t expose_resources;
39600 + atomic_unchecked_t expose_resources;
39601
39602
39603
39604 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
39605 struct pmcraid_config_table_entry_ext cfg_entry_ext;
39606 };
39607 struct scsi_device *scsi_dev; /* Link scsi_device structure */
39608 - atomic_t read_failures; /* count of failed READ commands */
39609 - atomic_t write_failures; /* count of failed WRITE commands */
39610 + atomic_unchecked_t read_failures; /* count of failed READ commands */
39611 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
39612
39613 /* To indicate add/delete/modify during CCN */
39614 u8 change_detected;
39615 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
39616 index 83d7984..a27d947 100644
39617 --- a/drivers/scsi/qla2xxx/qla_attr.c
39618 +++ b/drivers/scsi/qla2xxx/qla_attr.c
39619 @@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
39620 return 0;
39621 }
39622
39623 -struct fc_function_template qla2xxx_transport_functions = {
39624 +fc_function_template_no_const qla2xxx_transport_functions = {
39625
39626 .show_host_node_name = 1,
39627 .show_host_port_name = 1,
39628 @@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
39629 .bsg_timeout = qla24xx_bsg_timeout,
39630 };
39631
39632 -struct fc_function_template qla2xxx_transport_vport_functions = {
39633 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
39634
39635 .show_host_node_name = 1,
39636 .show_host_port_name = 1,
39637 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
39638 index 2411d1a..4673766 100644
39639 --- a/drivers/scsi/qla2xxx/qla_gbl.h
39640 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
39641 @@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
39642 struct device_attribute;
39643 extern struct device_attribute *qla2x00_host_attrs[];
39644 struct fc_function_template;
39645 -extern struct fc_function_template qla2xxx_transport_functions;
39646 -extern struct fc_function_template qla2xxx_transport_vport_functions;
39647 +extern fc_function_template_no_const qla2xxx_transport_functions;
39648 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
39649 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
39650 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
39651 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
39652 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
39653 index 10d23f8..a7d5d4c 100644
39654 --- a/drivers/scsi/qla2xxx/qla_os.c
39655 +++ b/drivers/scsi/qla2xxx/qla_os.c
39656 @@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
39657 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
39658 /* Ok, a 64bit DMA mask is applicable. */
39659 ha->flags.enable_64bit_addressing = 1;
39660 - ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
39661 - ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
39662 + pax_open_kernel();
39663 + *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
39664 + *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
39665 + pax_close_kernel();
39666 return;
39667 }
39668 }
39669 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
39670 index 329d553..f20d31d 100644
39671 --- a/drivers/scsi/qla4xxx/ql4_def.h
39672 +++ b/drivers/scsi/qla4xxx/ql4_def.h
39673 @@ -273,7 +273,7 @@ struct ddb_entry {
39674 * (4000 only) */
39675 atomic_t relogin_timer; /* Max Time to wait for
39676 * relogin to complete */
39677 - atomic_t relogin_retry_count; /* Num of times relogin has been
39678 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
39679 * retried */
39680 uint32_t default_time2wait; /* Default Min time between
39681 * relogins (+aens) */
39682 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
39683 index 4cec123..7c1329f 100644
39684 --- a/drivers/scsi/qla4xxx/ql4_os.c
39685 +++ b/drivers/scsi/qla4xxx/ql4_os.c
39686 @@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
39687 */
39688 if (!iscsi_is_session_online(cls_sess)) {
39689 /* Reset retry relogin timer */
39690 - atomic_inc(&ddb_entry->relogin_retry_count);
39691 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
39692 DEBUG2(ql4_printk(KERN_INFO, ha,
39693 "%s: index[%d] relogin timed out-retrying"
39694 " relogin (%d), retry (%d)\n", __func__,
39695 ddb_entry->fw_ddb_index,
39696 - atomic_read(&ddb_entry->relogin_retry_count),
39697 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
39698 ddb_entry->default_time2wait + 4));
39699 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
39700 atomic_set(&ddb_entry->retry_relogin_timer,
39701 @@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
39702
39703 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
39704 atomic_set(&ddb_entry->relogin_timer, 0);
39705 - atomic_set(&ddb_entry->relogin_retry_count, 0);
39706 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
39707 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
39708 ddb_entry->default_relogin_timeout =
39709 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
39710 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
39711 index 2c0d0ec..4e8681a 100644
39712 --- a/drivers/scsi/scsi.c
39713 +++ b/drivers/scsi/scsi.c
39714 @@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
39715 unsigned long timeout;
39716 int rtn = 0;
39717
39718 - atomic_inc(&cmd->device->iorequest_cnt);
39719 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
39720
39721 /* check if the device is still usable */
39722 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
39723 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
39724 index f1bf5af..f67e943 100644
39725 --- a/drivers/scsi/scsi_lib.c
39726 +++ b/drivers/scsi/scsi_lib.c
39727 @@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
39728 shost = sdev->host;
39729 scsi_init_cmd_errh(cmd);
39730 cmd->result = DID_NO_CONNECT << 16;
39731 - atomic_inc(&cmd->device->iorequest_cnt);
39732 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
39733
39734 /*
39735 * SCSI request completion path will do scsi_device_unbusy(),
39736 @@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
39737
39738 INIT_LIST_HEAD(&cmd->eh_entry);
39739
39740 - atomic_inc(&cmd->device->iodone_cnt);
39741 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
39742 if (cmd->result)
39743 - atomic_inc(&cmd->device->ioerr_cnt);
39744 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
39745
39746 disposition = scsi_decide_disposition(cmd);
39747 if (disposition != SUCCESS &&
39748 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
39749 index 931a7d9..0c2a754 100644
39750 --- a/drivers/scsi/scsi_sysfs.c
39751 +++ b/drivers/scsi/scsi_sysfs.c
39752 @@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
39753 char *buf) \
39754 { \
39755 struct scsi_device *sdev = to_scsi_device(dev); \
39756 - unsigned long long count = atomic_read(&sdev->field); \
39757 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
39758 return snprintf(buf, 20, "0x%llx\n", count); \
39759 } \
39760 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
39761 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
39762 index 84a1fdf..693b0d6 100644
39763 --- a/drivers/scsi/scsi_tgt_lib.c
39764 +++ b/drivers/scsi/scsi_tgt_lib.c
39765 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
39766 int err;
39767
39768 dprintk("%lx %u\n", uaddr, len);
39769 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
39770 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
39771 if (err) {
39772 /*
39773 * TODO: need to fixup sg_tablesize, max_segment_size,
39774 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
39775 index e894ca7..de9d7660 100644
39776 --- a/drivers/scsi/scsi_transport_fc.c
39777 +++ b/drivers/scsi/scsi_transport_fc.c
39778 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
39779 * Netlink Infrastructure
39780 */
39781
39782 -static atomic_t fc_event_seq;
39783 +static atomic_unchecked_t fc_event_seq;
39784
39785 /**
39786 * fc_get_event_number - Obtain the next sequential FC event number
39787 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
39788 u32
39789 fc_get_event_number(void)
39790 {
39791 - return atomic_add_return(1, &fc_event_seq);
39792 + return atomic_add_return_unchecked(1, &fc_event_seq);
39793 }
39794 EXPORT_SYMBOL(fc_get_event_number);
39795
39796 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
39797 {
39798 int error;
39799
39800 - atomic_set(&fc_event_seq, 0);
39801 + atomic_set_unchecked(&fc_event_seq, 0);
39802
39803 error = transport_class_register(&fc_host_class);
39804 if (error)
39805 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
39806 char *cp;
39807
39808 *val = simple_strtoul(buf, &cp, 0);
39809 - if ((*cp && (*cp != '\n')) || (*val < 0))
39810 + if (*cp && (*cp != '\n'))
39811 return -EINVAL;
39812 /*
39813 * Check for overflow; dev_loss_tmo is u32
39814 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
39815 index 31969f2..2b348f0 100644
39816 --- a/drivers/scsi/scsi_transport_iscsi.c
39817 +++ b/drivers/scsi/scsi_transport_iscsi.c
39818 @@ -79,7 +79,7 @@ struct iscsi_internal {
39819 struct transport_container session_cont;
39820 };
39821
39822 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
39823 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
39824 static struct workqueue_struct *iscsi_eh_timer_workq;
39825
39826 static DEFINE_IDA(iscsi_sess_ida);
39827 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
39828 int err;
39829
39830 ihost = shost->shost_data;
39831 - session->sid = atomic_add_return(1, &iscsi_session_nr);
39832 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
39833
39834 if (target_id == ISCSI_MAX_TARGET) {
39835 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
39836 @@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
39837 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
39838 ISCSI_TRANSPORT_VERSION);
39839
39840 - atomic_set(&iscsi_session_nr, 0);
39841 + atomic_set_unchecked(&iscsi_session_nr, 0);
39842
39843 err = class_register(&iscsi_transport_class);
39844 if (err)
39845 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
39846 index f379c7f..e8fc69c 100644
39847 --- a/drivers/scsi/scsi_transport_srp.c
39848 +++ b/drivers/scsi/scsi_transport_srp.c
39849 @@ -33,7 +33,7 @@
39850 #include "scsi_transport_srp_internal.h"
39851
39852 struct srp_host_attrs {
39853 - atomic_t next_port_id;
39854 + atomic_unchecked_t next_port_id;
39855 };
39856 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
39857
39858 @@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
39859 struct Scsi_Host *shost = dev_to_shost(dev);
39860 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
39861
39862 - atomic_set(&srp_host->next_port_id, 0);
39863 + atomic_set_unchecked(&srp_host->next_port_id, 0);
39864 return 0;
39865 }
39866
39867 @@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
39868 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
39869 rport->roles = ids->roles;
39870
39871 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
39872 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
39873 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
39874
39875 transport_setup_device(&rport->dev);
39876 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
39877 index 7992635..609faf8 100644
39878 --- a/drivers/scsi/sd.c
39879 +++ b/drivers/scsi/sd.c
39880 @@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
39881 sdkp->disk = gd;
39882 sdkp->index = index;
39883 atomic_set(&sdkp->openers, 0);
39884 - atomic_set(&sdkp->device->ioerr_cnt, 0);
39885 + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
39886
39887 if (!sdp->request_queue->rq_timeout) {
39888 if (sdp->type != TYPE_MOD)
39889 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
39890 index be2c9a6..275525c 100644
39891 --- a/drivers/scsi/sg.c
39892 +++ b/drivers/scsi/sg.c
39893 @@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
39894 sdp->disk->disk_name,
39895 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
39896 NULL,
39897 - (char *)arg);
39898 + (char __user *)arg);
39899 case BLKTRACESTART:
39900 return blk_trace_startstop(sdp->device->request_queue, 1);
39901 case BLKTRACESTOP:
39902 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
39903 index 19ee901..6e8c2ef 100644
39904 --- a/drivers/spi/spi.c
39905 +++ b/drivers/spi/spi.c
39906 @@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
39907 EXPORT_SYMBOL_GPL(spi_bus_unlock);
39908
39909 /* portable code must never pass more than 32 bytes */
39910 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
39911 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
39912
39913 static u8 *buf;
39914
39915 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
39916 index 34afc16..ffe44dd 100644
39917 --- a/drivers/staging/octeon/ethernet-rx.c
39918 +++ b/drivers/staging/octeon/ethernet-rx.c
39919 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
39920 /* Increment RX stats for virtual ports */
39921 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
39922 #ifdef CONFIG_64BIT
39923 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
39924 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
39925 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
39926 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
39927 #else
39928 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
39929 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
39930 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
39931 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
39932 #endif
39933 }
39934 netif_receive_skb(skb);
39935 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
39936 dev->name);
39937 */
39938 #ifdef CONFIG_64BIT
39939 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
39940 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
39941 #else
39942 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
39943 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
39944 #endif
39945 dev_kfree_skb_irq(skb);
39946 }
39947 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
39948 index ef32dc1..a159d68 100644
39949 --- a/drivers/staging/octeon/ethernet.c
39950 +++ b/drivers/staging/octeon/ethernet.c
39951 @@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
39952 * since the RX tasklet also increments it.
39953 */
39954 #ifdef CONFIG_64BIT
39955 - atomic64_add(rx_status.dropped_packets,
39956 - (atomic64_t *)&priv->stats.rx_dropped);
39957 + atomic64_add_unchecked(rx_status.dropped_packets,
39958 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
39959 #else
39960 - atomic_add(rx_status.dropped_packets,
39961 - (atomic_t *)&priv->stats.rx_dropped);
39962 + atomic_add_unchecked(rx_status.dropped_packets,
39963 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
39964 #endif
39965 }
39966
39967 diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
39968 index a2b7e03..aaf3630 100644
39969 --- a/drivers/staging/ramster/tmem.c
39970 +++ b/drivers/staging/ramster/tmem.c
39971 @@ -50,25 +50,25 @@
39972 * A tmem host implementation must use this function to register callbacks
39973 * for memory allocation.
39974 */
39975 -static struct tmem_hostops tmem_hostops;
39976 +static struct tmem_hostops *tmem_hostops;
39977
39978 static void tmem_objnode_tree_init(void);
39979
39980 void tmem_register_hostops(struct tmem_hostops *m)
39981 {
39982 tmem_objnode_tree_init();
39983 - tmem_hostops = *m;
39984 + tmem_hostops = m;
39985 }
39986
39987 /*
39988 * A tmem host implementation must use this function to register
39989 * callbacks for a page-accessible memory (PAM) implementation.
39990 */
39991 -static struct tmem_pamops tmem_pamops;
39992 +static struct tmem_pamops *tmem_pamops;
39993
39994 void tmem_register_pamops(struct tmem_pamops *m)
39995 {
39996 - tmem_pamops = *m;
39997 + tmem_pamops = m;
39998 }
39999
40000 /*
40001 @@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
40002 obj->pampd_count = 0;
40003 #ifdef CONFIG_RAMSTER
40004 if (tmem_pamops.new_obj != NULL)
40005 - (*tmem_pamops.new_obj)(obj);
40006 + (tmem_pamops->new_obj)(obj);
40007 #endif
40008 SET_SENTINEL(obj, OBJ);
40009
40010 @@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
40011 rbnode = rb_next(rbnode);
40012 tmem_pampd_destroy_all_in_obj(obj, true);
40013 tmem_obj_free(obj, hb);
40014 - (*tmem_hostops.obj_free)(obj, pool);
40015 + (tmem_hostops->obj_free)(obj, pool);
40016 }
40017 spin_unlock(&hb->lock);
40018 }
40019 @@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
40020 ASSERT_SENTINEL(obj, OBJ);
40021 BUG_ON(obj->pool == NULL);
40022 ASSERT_SENTINEL(obj->pool, POOL);
40023 - objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
40024 + objnode = (tmem_hostops->objnode_alloc)(obj->pool);
40025 if (unlikely(objnode == NULL))
40026 goto out;
40027 objnode->obj = obj;
40028 @@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
40029 ASSERT_SENTINEL(pool, POOL);
40030 objnode->obj->objnode_count--;
40031 objnode->obj = NULL;
40032 - (*tmem_hostops.objnode_free)(objnode, pool);
40033 + (tmem_hostops->objnode_free)(objnode, pool);
40034 }
40035
40036 /*
40037 @@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
40038 void *old_pampd = *(void **)slot;
40039 *(void **)slot = new_pampd;
40040 if (!no_free)
40041 - (*tmem_pamops.free)(old_pampd, obj->pool,
40042 + (tmem_pamops->free)(old_pampd, obj->pool,
40043 NULL, 0, false);
40044 ret = new_pampd;
40045 }
40046 @@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
40047 if (objnode->slots[i]) {
40048 if (ht == 1) {
40049 obj->pampd_count--;
40050 - (*tmem_pamops.free)(objnode->slots[i],
40051 + (tmem_pamops->free)(objnode->slots[i],
40052 obj->pool, NULL, 0, true);
40053 objnode->slots[i] = NULL;
40054 continue;
40055 @@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
40056 return;
40057 if (obj->objnode_tree_height == 0) {
40058 obj->pampd_count--;
40059 - (*tmem_pamops.free)(obj->objnode_tree_root,
40060 + (tmem_pamops->free)(obj->objnode_tree_root,
40061 obj->pool, NULL, 0, true);
40062 } else {
40063 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
40064 @@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
40065 obj->objnode_tree_root = NULL;
40066 #ifdef CONFIG_RAMSTER
40067 if (tmem_pamops.free_obj != NULL)
40068 - (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
40069 + (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
40070 #endif
40071 }
40072
40073 @@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
40074 /* if found, is a dup put, flush the old one */
40075 pampd_del = tmem_pampd_delete_from_obj(obj, index);
40076 BUG_ON(pampd_del != pampd);
40077 - (*tmem_pamops.free)(pampd, pool, oidp, index, true);
40078 + (tmem_pamops->free)(pampd, pool, oidp, index, true);
40079 if (obj->pampd_count == 0) {
40080 objnew = obj;
40081 objfound = NULL;
40082 @@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
40083 pampd = NULL;
40084 }
40085 } else {
40086 - obj = objnew = (*tmem_hostops.obj_alloc)(pool);
40087 + obj = objnew = (tmem_hostops->obj_alloc)(pool);
40088 if (unlikely(obj == NULL)) {
40089 ret = -ENOMEM;
40090 goto out;
40091 @@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
40092 if (unlikely(ret == -ENOMEM))
40093 /* may have partially built objnode tree ("stump") */
40094 goto delete_and_free;
40095 - (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
40096 + (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
40097 goto out;
40098
40099 delete_and_free:
40100 (void)tmem_pampd_delete_from_obj(obj, index);
40101 if (pampd)
40102 - (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
40103 + (tmem_pamops->free)(pampd, pool, NULL, 0, true);
40104 if (objnew) {
40105 tmem_obj_free(objnew, hb);
40106 - (*tmem_hostops.obj_free)(objnew, pool);
40107 + (tmem_hostops->obj_free)(objnew, pool);
40108 }
40109 out:
40110 spin_unlock(&hb->lock);
40111 @@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
40112 if (pampd != NULL) {
40113 BUG_ON(obj == NULL);
40114 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
40115 - (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
40116 + (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
40117 } else if (delete) {
40118 BUG_ON(obj == NULL);
40119 (void)tmem_pampd_delete_from_obj(obj, index);
40120 @@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
40121 int ret = 0;
40122
40123 if (!is_ephemeral(pool))
40124 - new_pampd = (*tmem_pamops.repatriate_preload)(
40125 + new_pampd = (tmem_pamops->repatriate_preload)(
40126 old_pampd, pool, oidp, index, &intransit);
40127 if (intransit)
40128 ret = -EAGAIN;
40129 @@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
40130 /* must release the hb->lock else repatriate can't sleep */
40131 spin_unlock(&hb->lock);
40132 if (!intransit)
40133 - ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
40134 + ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
40135 oidp, index, free, data);
40136 if (ret == -EAGAIN) {
40137 /* rare I think, but should cond_resched()??? */
40138 @@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
40139 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
40140 /* if we bug here, pamops wasn't properly set up for ramster */
40141 BUG_ON(tmem_pamops.replace_in_obj == NULL);
40142 - ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
40143 + ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
40144 out:
40145 spin_unlock(&hb->lock);
40146 return ret;
40147 @@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
40148 if (free) {
40149 if (obj->pampd_count == 0) {
40150 tmem_obj_free(obj, hb);
40151 - (*tmem_hostops.obj_free)(obj, pool);
40152 + (tmem_hostops->obj_free)(obj, pool);
40153 obj = NULL;
40154 }
40155 }
40156 if (free)
40157 - ret = (*tmem_pamops.get_data_and_free)(
40158 + ret = (tmem_pamops->get_data_and_free)(
40159 data, sizep, raw, pampd, pool, oidp, index);
40160 else
40161 - ret = (*tmem_pamops.get_data)(
40162 + ret = (tmem_pamops->get_data)(
40163 data, sizep, raw, pampd, pool, oidp, index);
40164 if (ret < 0)
40165 goto out;
40166 @@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
40167 pampd = tmem_pampd_delete_from_obj(obj, index);
40168 if (pampd == NULL)
40169 goto out;
40170 - (*tmem_pamops.free)(pampd, pool, oidp, index, true);
40171 + (tmem_pamops->free)(pampd, pool, oidp, index, true);
40172 if (obj->pampd_count == 0) {
40173 tmem_obj_free(obj, hb);
40174 - (*tmem_hostops.obj_free)(obj, pool);
40175 + (tmem_hostops->obj_free)(obj, pool);
40176 }
40177 ret = 0;
40178
40179 @@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
40180 goto out;
40181 tmem_pampd_destroy_all_in_obj(obj, false);
40182 tmem_obj_free(obj, hb);
40183 - (*tmem_hostops.obj_free)(obj, pool);
40184 + (tmem_hostops->obj_free)(obj, pool);
40185 ret = 0;
40186
40187 out:
40188 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
40189 index dc23395..cf7e9b1 100644
40190 --- a/drivers/staging/rtl8712/rtl871x_io.h
40191 +++ b/drivers/staging/rtl8712/rtl871x_io.h
40192 @@ -108,7 +108,7 @@ struct _io_ops {
40193 u8 *pmem);
40194 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
40195 u8 *pmem);
40196 -};
40197 +} __no_const;
40198
40199 struct io_req {
40200 struct list_head list;
40201 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
40202 index 1f5088b..0e59820 100644
40203 --- a/drivers/staging/sbe-2t3e3/netdev.c
40204 +++ b/drivers/staging/sbe-2t3e3/netdev.c
40205 @@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40206 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
40207
40208 if (rlen)
40209 - if (copy_to_user(data, &resp, rlen))
40210 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
40211 return -EFAULT;
40212
40213 return 0;
40214 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
40215 index 5dddc4d..34fcb2f 100644
40216 --- a/drivers/staging/usbip/vhci.h
40217 +++ b/drivers/staging/usbip/vhci.h
40218 @@ -83,7 +83,7 @@ struct vhci_hcd {
40219 unsigned resuming:1;
40220 unsigned long re_timeout;
40221
40222 - atomic_t seqnum;
40223 + atomic_unchecked_t seqnum;
40224
40225 /*
40226 * NOTE:
40227 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
40228 index c3aa219..bf8b3de 100644
40229 --- a/drivers/staging/usbip/vhci_hcd.c
40230 +++ b/drivers/staging/usbip/vhci_hcd.c
40231 @@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
40232 return;
40233 }
40234
40235 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
40236 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
40237 if (priv->seqnum == 0xffff)
40238 dev_info(&urb->dev->dev, "seqnum max\n");
40239
40240 @@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
40241 return -ENOMEM;
40242 }
40243
40244 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
40245 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
40246 if (unlink->seqnum == 0xffff)
40247 pr_info("seqnum max\n");
40248
40249 @@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
40250 vdev->rhport = rhport;
40251 }
40252
40253 - atomic_set(&vhci->seqnum, 0);
40254 + atomic_set_unchecked(&vhci->seqnum, 0);
40255 spin_lock_init(&vhci->lock);
40256
40257 hcd->power_budget = 0; /* no limit */
40258 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
40259 index ba5f1c0..11d8122 100644
40260 --- a/drivers/staging/usbip/vhci_rx.c
40261 +++ b/drivers/staging/usbip/vhci_rx.c
40262 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
40263 if (!urb) {
40264 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
40265 pr_info("max seqnum %d\n",
40266 - atomic_read(&the_controller->seqnum));
40267 + atomic_read_unchecked(&the_controller->seqnum));
40268 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
40269 return;
40270 }
40271 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
40272 index 5f13890..36a044b 100644
40273 --- a/drivers/staging/vt6655/hostap.c
40274 +++ b/drivers/staging/vt6655/hostap.c
40275 @@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
40276 *
40277 */
40278
40279 +static net_device_ops_no_const apdev_netdev_ops;
40280 +
40281 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
40282 {
40283 PSDevice apdev_priv;
40284 struct net_device *dev = pDevice->dev;
40285 int ret;
40286 - const struct net_device_ops apdev_netdev_ops = {
40287 - .ndo_start_xmit = pDevice->tx_80211,
40288 - };
40289
40290 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
40291
40292 @@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
40293 *apdev_priv = *pDevice;
40294 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
40295
40296 + /* only half broken now */
40297 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
40298 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
40299
40300 pDevice->apdev->type = ARPHRD_IEEE80211;
40301 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
40302 index 26a7d0e..897b083 100644
40303 --- a/drivers/staging/vt6656/hostap.c
40304 +++ b/drivers/staging/vt6656/hostap.c
40305 @@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
40306 *
40307 */
40308
40309 +static net_device_ops_no_const apdev_netdev_ops;
40310 +
40311 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
40312 {
40313 PSDevice apdev_priv;
40314 struct net_device *dev = pDevice->dev;
40315 int ret;
40316 - const struct net_device_ops apdev_netdev_ops = {
40317 - .ndo_start_xmit = pDevice->tx_80211,
40318 - };
40319
40320 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
40321
40322 @@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
40323 *apdev_priv = *pDevice;
40324 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
40325
40326 + /* only half broken now */
40327 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
40328 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
40329
40330 pDevice->apdev->type = ARPHRD_IEEE80211;
40331 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
40332 index 56c8e60..1920c63 100644
40333 --- a/drivers/staging/zcache/tmem.c
40334 +++ b/drivers/staging/zcache/tmem.c
40335 @@ -39,7 +39,7 @@
40336 * A tmem host implementation must use this function to register callbacks
40337 * for memory allocation.
40338 */
40339 -static struct tmem_hostops tmem_hostops;
40340 +static tmem_hostops_no_const tmem_hostops;
40341
40342 static void tmem_objnode_tree_init(void);
40343
40344 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
40345 * A tmem host implementation must use this function to register
40346 * callbacks for a page-accessible memory (PAM) implementation
40347 */
40348 -static struct tmem_pamops tmem_pamops;
40349 +static tmem_pamops_no_const tmem_pamops;
40350
40351 void tmem_register_pamops(struct tmem_pamops *m)
40352 {
40353 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
40354 index 0d4aa82..f7832d4 100644
40355 --- a/drivers/staging/zcache/tmem.h
40356 +++ b/drivers/staging/zcache/tmem.h
40357 @@ -180,6 +180,7 @@ struct tmem_pamops {
40358 void (*new_obj)(struct tmem_obj *);
40359 int (*replace_in_obj)(void *, struct tmem_obj *);
40360 };
40361 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
40362 extern void tmem_register_pamops(struct tmem_pamops *m);
40363
40364 /* memory allocation methods provided by the host implementation */
40365 @@ -189,6 +190,7 @@ struct tmem_hostops {
40366 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
40367 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
40368 };
40369 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
40370 extern void tmem_register_hostops(struct tmem_hostops *m);
40371
40372 /* core tmem accessor functions */
40373 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
40374 index 96f4981..4daaa7e 100644
40375 --- a/drivers/target/target_core_device.c
40376 +++ b/drivers/target/target_core_device.c
40377 @@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
40378 spin_lock_init(&dev->se_port_lock);
40379 spin_lock_init(&dev->se_tmr_lock);
40380 spin_lock_init(&dev->qf_cmd_lock);
40381 - atomic_set(&dev->dev_ordered_id, 0);
40382 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
40383 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
40384 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
40385 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
40386 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
40387 index bd587b7..173daf3 100644
40388 --- a/drivers/target/target_core_transport.c
40389 +++ b/drivers/target/target_core_transport.c
40390 @@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
40391 * Used to determine when ORDERED commands should go from
40392 * Dormant to Active status.
40393 */
40394 - cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
40395 + cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
40396 smp_mb__after_atomic_inc();
40397 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
40398 cmd->se_ordered_id, cmd->sam_task_attr,
40399 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
40400 index b09c8d1f..c4225c0 100644
40401 --- a/drivers/tty/cyclades.c
40402 +++ b/drivers/tty/cyclades.c
40403 @@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
40404 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
40405 info->port.count);
40406 #endif
40407 - info->port.count++;
40408 + atomic_inc(&info->port.count);
40409 #ifdef CY_DEBUG_COUNT
40410 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
40411 - current->pid, info->port.count);
40412 + current->pid, atomic_read(&info->port.count));
40413 #endif
40414
40415 /*
40416 @@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
40417 for (j = 0; j < cy_card[i].nports; j++) {
40418 info = &cy_card[i].ports[j];
40419
40420 - if (info->port.count) {
40421 + if (atomic_read(&info->port.count)) {
40422 /* XXX is the ldisc num worth this? */
40423 struct tty_struct *tty;
40424 struct tty_ldisc *ld;
40425 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
40426 index 13ee53b..418d164 100644
40427 --- a/drivers/tty/hvc/hvc_console.c
40428 +++ b/drivers/tty/hvc/hvc_console.c
40429 @@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
40430
40431 spin_lock_irqsave(&hp->port.lock, flags);
40432 /* Check and then increment for fast path open. */
40433 - if (hp->port.count++ > 0) {
40434 + if (atomic_inc_return(&hp->port.count) > 1) {
40435 spin_unlock_irqrestore(&hp->port.lock, flags);
40436 hvc_kick();
40437 return 0;
40438 @@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
40439
40440 spin_lock_irqsave(&hp->port.lock, flags);
40441
40442 - if (--hp->port.count == 0) {
40443 + if (atomic_dec_return(&hp->port.count) == 0) {
40444 spin_unlock_irqrestore(&hp->port.lock, flags);
40445 /* We are done with the tty pointer now. */
40446 tty_port_tty_set(&hp->port, NULL);
40447 @@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
40448 */
40449 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
40450 } else {
40451 - if (hp->port.count < 0)
40452 + if (atomic_read(&hp->port.count) < 0)
40453 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
40454 - hp->vtermno, hp->port.count);
40455 + hp->vtermno, atomic_read(&hp->port.count));
40456 spin_unlock_irqrestore(&hp->port.lock, flags);
40457 }
40458 }
40459 @@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
40460 * open->hangup case this can be called after the final close so prevent
40461 * that from happening for now.
40462 */
40463 - if (hp->port.count <= 0) {
40464 + if (atomic_read(&hp->port.count) <= 0) {
40465 spin_unlock_irqrestore(&hp->port.lock, flags);
40466 return;
40467 }
40468
40469 - hp->port.count = 0;
40470 + atomic_set(&hp->port.count, 0);
40471 spin_unlock_irqrestore(&hp->port.lock, flags);
40472 tty_port_tty_set(&hp->port, NULL);
40473
40474 @@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
40475 return -EPIPE;
40476
40477 /* FIXME what's this (unprotected) check for? */
40478 - if (hp->port.count <= 0)
40479 + if (atomic_read(&hp->port.count) <= 0)
40480 return -EIO;
40481
40482 spin_lock_irqsave(&hp->lock, flags);
40483 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
40484 index 8776357..b2d4afd 100644
40485 --- a/drivers/tty/hvc/hvcs.c
40486 +++ b/drivers/tty/hvc/hvcs.c
40487 @@ -83,6 +83,7 @@
40488 #include <asm/hvcserver.h>
40489 #include <asm/uaccess.h>
40490 #include <asm/vio.h>
40491 +#include <asm/local.h>
40492
40493 /*
40494 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
40495 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
40496
40497 spin_lock_irqsave(&hvcsd->lock, flags);
40498
40499 - if (hvcsd->port.count > 0) {
40500 + if (atomic_read(&hvcsd->port.count) > 0) {
40501 spin_unlock_irqrestore(&hvcsd->lock, flags);
40502 printk(KERN_INFO "HVCS: vterm state unchanged. "
40503 "The hvcs device node is still in use.\n");
40504 @@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
40505 }
40506 }
40507
40508 - hvcsd->port.count = 0;
40509 + atomic_set(&hvcsd->port.count, 0);
40510 hvcsd->port.tty = tty;
40511 tty->driver_data = hvcsd;
40512
40513 @@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
40514 unsigned long flags;
40515
40516 spin_lock_irqsave(&hvcsd->lock, flags);
40517 - hvcsd->port.count++;
40518 + atomic_inc(&hvcsd->port.count);
40519 hvcsd->todo_mask |= HVCS_SCHED_READ;
40520 spin_unlock_irqrestore(&hvcsd->lock, flags);
40521
40522 @@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
40523 hvcsd = tty->driver_data;
40524
40525 spin_lock_irqsave(&hvcsd->lock, flags);
40526 - if (--hvcsd->port.count == 0) {
40527 + if (atomic_dec_and_test(&hvcsd->port.count)) {
40528
40529 vio_disable_interrupts(hvcsd->vdev);
40530
40531 @@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
40532
40533 free_irq(irq, hvcsd);
40534 return;
40535 - } else if (hvcsd->port.count < 0) {
40536 + } else if (atomic_read(&hvcsd->port.count) < 0) {
40537 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
40538 " is missmanaged.\n",
40539 - hvcsd->vdev->unit_address, hvcsd->port.count);
40540 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
40541 }
40542
40543 spin_unlock_irqrestore(&hvcsd->lock, flags);
40544 @@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
40545
40546 spin_lock_irqsave(&hvcsd->lock, flags);
40547 /* Preserve this so that we know how many kref refs to put */
40548 - temp_open_count = hvcsd->port.count;
40549 + temp_open_count = atomic_read(&hvcsd->port.count);
40550
40551 /*
40552 * Don't kref put inside the spinlock because the destruction
40553 @@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
40554 tty->driver_data = NULL;
40555 hvcsd->port.tty = NULL;
40556
40557 - hvcsd->port.count = 0;
40558 + atomic_set(&hvcsd->port.count, 0);
40559
40560 /* This will drop any buffered data on the floor which is OK in a hangup
40561 * scenario. */
40562 @@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
40563 * the middle of a write operation? This is a crummy place to do this
40564 * but we want to keep it all in the spinlock.
40565 */
40566 - if (hvcsd->port.count <= 0) {
40567 + if (atomic_read(&hvcsd->port.count) <= 0) {
40568 spin_unlock_irqrestore(&hvcsd->lock, flags);
40569 return -ENODEV;
40570 }
40571 @@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
40572 {
40573 struct hvcs_struct *hvcsd = tty->driver_data;
40574
40575 - if (!hvcsd || hvcsd->port.count <= 0)
40576 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
40577 return 0;
40578
40579 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
40580 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
40581 index 2cde13d..645d78f 100644
40582 --- a/drivers/tty/ipwireless/tty.c
40583 +++ b/drivers/tty/ipwireless/tty.c
40584 @@ -29,6 +29,7 @@
40585 #include <linux/tty_driver.h>
40586 #include <linux/tty_flip.h>
40587 #include <linux/uaccess.h>
40588 +#include <asm/local.h>
40589
40590 #include "tty.h"
40591 #include "network.h"
40592 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
40593 mutex_unlock(&tty->ipw_tty_mutex);
40594 return -ENODEV;
40595 }
40596 - if (tty->port.count == 0)
40597 + if (atomic_read(&tty->port.count) == 0)
40598 tty->tx_bytes_queued = 0;
40599
40600 - tty->port.count++;
40601 + atomic_inc(&tty->port.count);
40602
40603 tty->port.tty = linux_tty;
40604 linux_tty->driver_data = tty;
40605 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
40606
40607 static void do_ipw_close(struct ipw_tty *tty)
40608 {
40609 - tty->port.count--;
40610 -
40611 - if (tty->port.count == 0) {
40612 + if (atomic_dec_return(&tty->port.count) == 0) {
40613 struct tty_struct *linux_tty = tty->port.tty;
40614
40615 if (linux_tty != NULL) {
40616 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
40617 return;
40618
40619 mutex_lock(&tty->ipw_tty_mutex);
40620 - if (tty->port.count == 0) {
40621 + if (atomic_read(&tty->port.count) == 0) {
40622 mutex_unlock(&tty->ipw_tty_mutex);
40623 return;
40624 }
40625 @@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
40626 return;
40627 }
40628
40629 - if (!tty->port.count) {
40630 + if (!atomic_read(&tty->port.count)) {
40631 mutex_unlock(&tty->ipw_tty_mutex);
40632 return;
40633 }
40634 @@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
40635 return -ENODEV;
40636
40637 mutex_lock(&tty->ipw_tty_mutex);
40638 - if (!tty->port.count) {
40639 + if (!atomic_read(&tty->port.count)) {
40640 mutex_unlock(&tty->ipw_tty_mutex);
40641 return -EINVAL;
40642 }
40643 @@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
40644 if (!tty)
40645 return -ENODEV;
40646
40647 - if (!tty->port.count)
40648 + if (!atomic_read(&tty->port.count))
40649 return -EINVAL;
40650
40651 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
40652 @@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
40653 if (!tty)
40654 return 0;
40655
40656 - if (!tty->port.count)
40657 + if (!atomic_read(&tty->port.count))
40658 return 0;
40659
40660 return tty->tx_bytes_queued;
40661 @@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
40662 if (!tty)
40663 return -ENODEV;
40664
40665 - if (!tty->port.count)
40666 + if (!atomic_read(&tty->port.count))
40667 return -EINVAL;
40668
40669 return get_control_lines(tty);
40670 @@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
40671 if (!tty)
40672 return -ENODEV;
40673
40674 - if (!tty->port.count)
40675 + if (!atomic_read(&tty->port.count))
40676 return -EINVAL;
40677
40678 return set_control_lines(tty, set, clear);
40679 @@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
40680 if (!tty)
40681 return -ENODEV;
40682
40683 - if (!tty->port.count)
40684 + if (!atomic_read(&tty->port.count))
40685 return -EINVAL;
40686
40687 /* FIXME: Exactly how is the tty object locked here .. */
40688 @@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
40689 * are gone */
40690 mutex_lock(&ttyj->ipw_tty_mutex);
40691 }
40692 - while (ttyj->port.count)
40693 + while (atomic_read(&ttyj->port.count))
40694 do_ipw_close(ttyj);
40695 ipwireless_disassociate_network_ttys(network,
40696 ttyj->channel_idx);
40697 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
40698 index f9d2850..b006f04 100644
40699 --- a/drivers/tty/moxa.c
40700 +++ b/drivers/tty/moxa.c
40701 @@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
40702 }
40703
40704 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
40705 - ch->port.count++;
40706 + atomic_inc(&ch->port.count);
40707 tty->driver_data = ch;
40708 tty_port_tty_set(&ch->port, tty);
40709 mutex_lock(&ch->port.mutex);
40710 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
40711 index bfd6771..e0d93c4 100644
40712 --- a/drivers/tty/n_gsm.c
40713 +++ b/drivers/tty/n_gsm.c
40714 @@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
40715 spin_lock_init(&dlci->lock);
40716 mutex_init(&dlci->mutex);
40717 dlci->fifo = &dlci->_fifo;
40718 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
40719 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
40720 kfree(dlci);
40721 return NULL;
40722 }
40723 @@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
40724 struct gsm_dlci *dlci = tty->driver_data;
40725 struct tty_port *port = &dlci->port;
40726
40727 - port->count++;
40728 + atomic_inc(&port->count);
40729 dlci_get(dlci);
40730 dlci_get(dlci->gsm->dlci[0]);
40731 mux_get(dlci->gsm);
40732 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
40733 index 19083ef..6e34e97 100644
40734 --- a/drivers/tty/n_tty.c
40735 +++ b/drivers/tty/n_tty.c
40736 @@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
40737 {
40738 *ops = tty_ldisc_N_TTY;
40739 ops->owner = NULL;
40740 - ops->refcount = ops->flags = 0;
40741 + atomic_set(&ops->refcount, 0);
40742 + ops->flags = 0;
40743 }
40744 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
40745 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
40746 index 79ff3a5..1fe9399 100644
40747 --- a/drivers/tty/pty.c
40748 +++ b/drivers/tty/pty.c
40749 @@ -791,8 +791,10 @@ static void __init unix98_pty_init(void)
40750 panic("Couldn't register Unix98 pts driver");
40751
40752 /* Now create the /dev/ptmx special device */
40753 + pax_open_kernel();
40754 tty_default_fops(&ptmx_fops);
40755 - ptmx_fops.open = ptmx_open;
40756 + *(void **)&ptmx_fops.open = ptmx_open;
40757 + pax_close_kernel();
40758
40759 cdev_init(&ptmx_cdev, &ptmx_fops);
40760 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
40761 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
40762 index e42009a..566a036 100644
40763 --- a/drivers/tty/rocket.c
40764 +++ b/drivers/tty/rocket.c
40765 @@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
40766 tty->driver_data = info;
40767 tty_port_tty_set(port, tty);
40768
40769 - if (port->count++ == 0) {
40770 + if (atomic_inc_return(&port->count) == 1) {
40771 atomic_inc(&rp_num_ports_open);
40772
40773 #ifdef ROCKET_DEBUG_OPEN
40774 @@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
40775 #endif
40776 }
40777 #ifdef ROCKET_DEBUG_OPEN
40778 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
40779 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
40780 #endif
40781
40782 /*
40783 @@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
40784 spin_unlock_irqrestore(&info->port.lock, flags);
40785 return;
40786 }
40787 - if (info->port.count)
40788 + if (atomic_read(&info->port.count))
40789 atomic_dec(&rp_num_ports_open);
40790 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
40791 spin_unlock_irqrestore(&info->port.lock, flags);
40792 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
40793 index 1002054..dd644a8 100644
40794 --- a/drivers/tty/serial/kgdboc.c
40795 +++ b/drivers/tty/serial/kgdboc.c
40796 @@ -24,8 +24,9 @@
40797 #define MAX_CONFIG_LEN 40
40798
40799 static struct kgdb_io kgdboc_io_ops;
40800 +static struct kgdb_io kgdboc_io_ops_console;
40801
40802 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
40803 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
40804 static int configured = -1;
40805
40806 static char config[MAX_CONFIG_LEN];
40807 @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
40808 kgdboc_unregister_kbd();
40809 if (configured == 1)
40810 kgdb_unregister_io_module(&kgdboc_io_ops);
40811 + else if (configured == 2)
40812 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
40813 }
40814
40815 static int configure_kgdboc(void)
40816 @@ -160,13 +163,13 @@ static int configure_kgdboc(void)
40817 int err;
40818 char *cptr = config;
40819 struct console *cons;
40820 + int is_console = 0;
40821
40822 err = kgdboc_option_setup(config);
40823 if (err || !strlen(config) || isspace(config[0]))
40824 goto noconfig;
40825
40826 err = -ENODEV;
40827 - kgdboc_io_ops.is_console = 0;
40828 kgdb_tty_driver = NULL;
40829
40830 kgdboc_use_kms = 0;
40831 @@ -187,7 +190,7 @@ static int configure_kgdboc(void)
40832 int idx;
40833 if (cons->device && cons->device(cons, &idx) == p &&
40834 idx == tty_line) {
40835 - kgdboc_io_ops.is_console = 1;
40836 + is_console = 1;
40837 break;
40838 }
40839 cons = cons->next;
40840 @@ -197,7 +200,13 @@ static int configure_kgdboc(void)
40841 kgdb_tty_line = tty_line;
40842
40843 do_register:
40844 - err = kgdb_register_io_module(&kgdboc_io_ops);
40845 + if (is_console) {
40846 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
40847 + configured = 2;
40848 + } else {
40849 + err = kgdb_register_io_module(&kgdboc_io_ops);
40850 + configured = 1;
40851 + }
40852 if (err)
40853 goto noconfig;
40854
40855 @@ -205,8 +214,6 @@ do_register:
40856 if (err)
40857 goto nmi_con_failed;
40858
40859 - configured = 1;
40860 -
40861 return 0;
40862
40863 nmi_con_failed:
40864 @@ -223,7 +230,7 @@ noconfig:
40865 static int __init init_kgdboc(void)
40866 {
40867 /* Already configured? */
40868 - if (configured == 1)
40869 + if (configured >= 1)
40870 return 0;
40871
40872 return configure_kgdboc();
40873 @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
40874 if (config[len - 1] == '\n')
40875 config[len - 1] = '\0';
40876
40877 - if (configured == 1)
40878 + if (configured >= 1)
40879 cleanup_kgdboc();
40880
40881 /* Go and configure with the new params. */
40882 @@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
40883 .post_exception = kgdboc_post_exp_handler,
40884 };
40885
40886 +static struct kgdb_io kgdboc_io_ops_console = {
40887 + .name = "kgdboc",
40888 + .read_char = kgdboc_get_char,
40889 + .write_char = kgdboc_put_char,
40890 + .pre_exception = kgdboc_pre_exp_handler,
40891 + .post_exception = kgdboc_post_exp_handler,
40892 + .is_console = 1
40893 +};
40894 +
40895 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
40896 /* This is only available if kgdboc is a built in for early debugging */
40897 static int __init kgdboc_early_init(char *opt)
40898 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
40899 index e514b3a..c73d614 100644
40900 --- a/drivers/tty/serial/samsung.c
40901 +++ b/drivers/tty/serial/samsung.c
40902 @@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
40903 }
40904 }
40905
40906 +static int s3c64xx_serial_startup(struct uart_port *port);
40907 static int s3c24xx_serial_startup(struct uart_port *port)
40908 {
40909 struct s3c24xx_uart_port *ourport = to_ourport(port);
40910 int ret;
40911
40912 + /* Startup sequence is different for s3c64xx and higher SoC's */
40913 + if (s3c24xx_serial_has_interrupt_mask(port))
40914 + return s3c64xx_serial_startup(port);
40915 +
40916 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
40917 port->mapbase, port->membase);
40918
40919 @@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
40920 /* setup info for port */
40921 port->dev = &platdev->dev;
40922
40923 - /* Startup sequence is different for s3c64xx and higher SoC's */
40924 - if (s3c24xx_serial_has_interrupt_mask(port))
40925 - s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
40926 -
40927 port->uartclk = 1;
40928
40929 if (cfg->uart_flags & UPF_CONS_FLOW) {
40930 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
40931 index 2c7230a..2104f16 100644
40932 --- a/drivers/tty/serial/serial_core.c
40933 +++ b/drivers/tty/serial/serial_core.c
40934 @@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
40935 uart_flush_buffer(tty);
40936 uart_shutdown(tty, state);
40937 spin_lock_irqsave(&port->lock, flags);
40938 - port->count = 0;
40939 + atomic_set(&port->count, 0);
40940 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
40941 spin_unlock_irqrestore(&port->lock, flags);
40942 tty_port_tty_set(port, NULL);
40943 @@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
40944 goto end;
40945 }
40946
40947 - port->count++;
40948 + atomic_inc(&port->count);
40949 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
40950 retval = -ENXIO;
40951 goto err_dec_count;
40952 @@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
40953 /*
40954 * Make sure the device is in D0 state.
40955 */
40956 - if (port->count == 1)
40957 + if (atomic_read(&port->count) == 1)
40958 uart_change_pm(state, 0);
40959
40960 /*
40961 @@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
40962 end:
40963 return retval;
40964 err_dec_count:
40965 - port->count--;
40966 + atomic_inc(&port->count);
40967 mutex_unlock(&port->mutex);
40968 goto end;
40969 }
40970 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
40971 index 9e071f6..f30ae69 100644
40972 --- a/drivers/tty/synclink.c
40973 +++ b/drivers/tty/synclink.c
40974 @@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
40975
40976 if (debug_level >= DEBUG_LEVEL_INFO)
40977 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
40978 - __FILE__,__LINE__, info->device_name, info->port.count);
40979 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
40980
40981 if (tty_port_close_start(&info->port, tty, filp) == 0)
40982 goto cleanup;
40983 @@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
40984 cleanup:
40985 if (debug_level >= DEBUG_LEVEL_INFO)
40986 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
40987 - tty->driver->name, info->port.count);
40988 + tty->driver->name, atomic_read(&info->port.count));
40989
40990 } /* end of mgsl_close() */
40991
40992 @@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
40993
40994 mgsl_flush_buffer(tty);
40995 shutdown(info);
40996 -
40997 - info->port.count = 0;
40998 +
40999 + atomic_set(&info->port.count, 0);
41000 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
41001 info->port.tty = NULL;
41002
41003 @@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
41004
41005 if (debug_level >= DEBUG_LEVEL_INFO)
41006 printk("%s(%d):block_til_ready before block on %s count=%d\n",
41007 - __FILE__,__LINE__, tty->driver->name, port->count );
41008 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
41009
41010 spin_lock_irqsave(&info->irq_spinlock, flags);
41011 if (!tty_hung_up_p(filp)) {
41012 extra_count = true;
41013 - port->count--;
41014 + atomic_dec(&port->count);
41015 }
41016 spin_unlock_irqrestore(&info->irq_spinlock, flags);
41017 port->blocked_open++;
41018 @@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
41019
41020 if (debug_level >= DEBUG_LEVEL_INFO)
41021 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
41022 - __FILE__,__LINE__, tty->driver->name, port->count );
41023 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
41024
41025 tty_unlock(tty);
41026 schedule();
41027 @@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
41028
41029 /* FIXME: Racy on hangup during close wait */
41030 if (extra_count)
41031 - port->count++;
41032 + atomic_inc(&port->count);
41033 port->blocked_open--;
41034
41035 if (debug_level >= DEBUG_LEVEL_INFO)
41036 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
41037 - __FILE__,__LINE__, tty->driver->name, port->count );
41038 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
41039
41040 if (!retval)
41041 port->flags |= ASYNC_NORMAL_ACTIVE;
41042 @@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
41043
41044 if (debug_level >= DEBUG_LEVEL_INFO)
41045 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
41046 - __FILE__,__LINE__,tty->driver->name, info->port.count);
41047 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
41048
41049 /* If port is closing, signal caller to try again */
41050 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
41051 @@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
41052 spin_unlock_irqrestore(&info->netlock, flags);
41053 goto cleanup;
41054 }
41055 - info->port.count++;
41056 + atomic_inc(&info->port.count);
41057 spin_unlock_irqrestore(&info->netlock, flags);
41058
41059 - if (info->port.count == 1) {
41060 + if (atomic_read(&info->port.count) == 1) {
41061 /* 1st open on this device, init hardware */
41062 retval = startup(info);
41063 if (retval < 0)
41064 @@ -3451,8 +3451,8 @@ cleanup:
41065 if (retval) {
41066 if (tty->count == 1)
41067 info->port.tty = NULL; /* tty layer will release tty struct */
41068 - if(info->port.count)
41069 - info->port.count--;
41070 + if (atomic_read(&info->port.count))
41071 + atomic_dec(&info->port.count);
41072 }
41073
41074 return retval;
41075 @@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
41076 unsigned short new_crctype;
41077
41078 /* return error if TTY interface open */
41079 - if (info->port.count)
41080 + if (atomic_read(&info->port.count))
41081 return -EBUSY;
41082
41083 switch (encoding)
41084 @@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
41085
41086 /* arbitrate between network and tty opens */
41087 spin_lock_irqsave(&info->netlock, flags);
41088 - if (info->port.count != 0 || info->netcount != 0) {
41089 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
41090 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
41091 spin_unlock_irqrestore(&info->netlock, flags);
41092 return -EBUSY;
41093 @@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
41094 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
41095
41096 /* return error if TTY interface open */
41097 - if (info->port.count)
41098 + if (atomic_read(&info->port.count))
41099 return -EBUSY;
41100
41101 if (cmd != SIOCWANDEV)
41102 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
41103 index aba1e59..877ac33 100644
41104 --- a/drivers/tty/synclink_gt.c
41105 +++ b/drivers/tty/synclink_gt.c
41106 @@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
41107 tty->driver_data = info;
41108 info->port.tty = tty;
41109
41110 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
41111 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
41112
41113 /* If port is closing, signal caller to try again */
41114 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
41115 @@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
41116 mutex_unlock(&info->port.mutex);
41117 goto cleanup;
41118 }
41119 - info->port.count++;
41120 + atomic_inc(&info->port.count);
41121 spin_unlock_irqrestore(&info->netlock, flags);
41122
41123 - if (info->port.count == 1) {
41124 + if (atomic_read(&info->port.count) == 1) {
41125 /* 1st open on this device, init hardware */
41126 retval = startup(info);
41127 if (retval < 0) {
41128 @@ -716,8 +716,8 @@ cleanup:
41129 if (retval) {
41130 if (tty->count == 1)
41131 info->port.tty = NULL; /* tty layer will release tty struct */
41132 - if(info->port.count)
41133 - info->port.count--;
41134 + if(atomic_read(&info->port.count))
41135 + atomic_dec(&info->port.count);
41136 }
41137
41138 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
41139 @@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
41140
41141 if (sanity_check(info, tty->name, "close"))
41142 return;
41143 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
41144 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
41145
41146 if (tty_port_close_start(&info->port, tty, filp) == 0)
41147 goto cleanup;
41148 @@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
41149 tty_port_close_end(&info->port, tty);
41150 info->port.tty = NULL;
41151 cleanup:
41152 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
41153 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
41154 }
41155
41156 static void hangup(struct tty_struct *tty)
41157 @@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
41158 shutdown(info);
41159
41160 spin_lock_irqsave(&info->port.lock, flags);
41161 - info->port.count = 0;
41162 + atomic_set(&info->port.count, 0);
41163 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
41164 info->port.tty = NULL;
41165 spin_unlock_irqrestore(&info->port.lock, flags);
41166 @@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
41167 unsigned short new_crctype;
41168
41169 /* return error if TTY interface open */
41170 - if (info->port.count)
41171 + if (atomic_read(&info->port.count))
41172 return -EBUSY;
41173
41174 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
41175 @@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
41176
41177 /* arbitrate between network and tty opens */
41178 spin_lock_irqsave(&info->netlock, flags);
41179 - if (info->port.count != 0 || info->netcount != 0) {
41180 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
41181 DBGINFO(("%s hdlc_open busy\n", dev->name));
41182 spin_unlock_irqrestore(&info->netlock, flags);
41183 return -EBUSY;
41184 @@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
41185 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
41186
41187 /* return error if TTY interface open */
41188 - if (info->port.count)
41189 + if (atomic_read(&info->port.count))
41190 return -EBUSY;
41191
41192 if (cmd != SIOCWANDEV)
41193 @@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
41194 if (port == NULL)
41195 continue;
41196 spin_lock(&port->lock);
41197 - if ((port->port.count || port->netcount) &&
41198 + if ((atomic_read(&port->port.count) || port->netcount) &&
41199 port->pending_bh && !port->bh_running &&
41200 !port->bh_requested) {
41201 DBGISR(("%s bh queued\n", port->device_name));
41202 @@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
41203 spin_lock_irqsave(&info->lock, flags);
41204 if (!tty_hung_up_p(filp)) {
41205 extra_count = true;
41206 - port->count--;
41207 + atomic_dec(&port->count);
41208 }
41209 spin_unlock_irqrestore(&info->lock, flags);
41210 port->blocked_open++;
41211 @@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
41212 remove_wait_queue(&port->open_wait, &wait);
41213
41214 if (extra_count)
41215 - port->count++;
41216 + atomic_inc(&port->count);
41217 port->blocked_open--;
41218
41219 if (!retval)
41220 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
41221 index fd43fb6..34704ad 100644
41222 --- a/drivers/tty/synclinkmp.c
41223 +++ b/drivers/tty/synclinkmp.c
41224 @@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
41225
41226 if (debug_level >= DEBUG_LEVEL_INFO)
41227 printk("%s(%d):%s open(), old ref count = %d\n",
41228 - __FILE__,__LINE__,tty->driver->name, info->port.count);
41229 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
41230
41231 /* If port is closing, signal caller to try again */
41232 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
41233 @@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
41234 spin_unlock_irqrestore(&info->netlock, flags);
41235 goto cleanup;
41236 }
41237 - info->port.count++;
41238 + atomic_inc(&info->port.count);
41239 spin_unlock_irqrestore(&info->netlock, flags);
41240
41241 - if (info->port.count == 1) {
41242 + if (atomic_read(&info->port.count) == 1) {
41243 /* 1st open on this device, init hardware */
41244 retval = startup(info);
41245 if (retval < 0)
41246 @@ -797,8 +797,8 @@ cleanup:
41247 if (retval) {
41248 if (tty->count == 1)
41249 info->port.tty = NULL; /* tty layer will release tty struct */
41250 - if(info->port.count)
41251 - info->port.count--;
41252 + if(atomic_read(&info->port.count))
41253 + atomic_dec(&info->port.count);
41254 }
41255
41256 return retval;
41257 @@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
41258
41259 if (debug_level >= DEBUG_LEVEL_INFO)
41260 printk("%s(%d):%s close() entry, count=%d\n",
41261 - __FILE__,__LINE__, info->device_name, info->port.count);
41262 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
41263
41264 if (tty_port_close_start(&info->port, tty, filp) == 0)
41265 goto cleanup;
41266 @@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
41267 cleanup:
41268 if (debug_level >= DEBUG_LEVEL_INFO)
41269 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
41270 - tty->driver->name, info->port.count);
41271 + tty->driver->name, atomic_read(&info->port.count));
41272 }
41273
41274 /* Called by tty_hangup() when a hangup is signaled.
41275 @@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
41276 shutdown(info);
41277
41278 spin_lock_irqsave(&info->port.lock, flags);
41279 - info->port.count = 0;
41280 + atomic_set(&info->port.count, 0);
41281 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
41282 info->port.tty = NULL;
41283 spin_unlock_irqrestore(&info->port.lock, flags);
41284 @@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
41285 unsigned short new_crctype;
41286
41287 /* return error if TTY interface open */
41288 - if (info->port.count)
41289 + if (atomic_read(&info->port.count))
41290 return -EBUSY;
41291
41292 switch (encoding)
41293 @@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
41294
41295 /* arbitrate between network and tty opens */
41296 spin_lock_irqsave(&info->netlock, flags);
41297 - if (info->port.count != 0 || info->netcount != 0) {
41298 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
41299 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
41300 spin_unlock_irqrestore(&info->netlock, flags);
41301 return -EBUSY;
41302 @@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
41303 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
41304
41305 /* return error if TTY interface open */
41306 - if (info->port.count)
41307 + if (atomic_read(&info->port.count))
41308 return -EBUSY;
41309
41310 if (cmd != SIOCWANDEV)
41311 @@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
41312 * do not request bottom half processing if the
41313 * device is not open in a normal mode.
41314 */
41315 - if ( port && (port->port.count || port->netcount) &&
41316 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
41317 port->pending_bh && !port->bh_running &&
41318 !port->bh_requested ) {
41319 if ( debug_level >= DEBUG_LEVEL_ISR )
41320 @@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
41321
41322 if (debug_level >= DEBUG_LEVEL_INFO)
41323 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
41324 - __FILE__,__LINE__, tty->driver->name, port->count );
41325 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
41326
41327 spin_lock_irqsave(&info->lock, flags);
41328 if (!tty_hung_up_p(filp)) {
41329 extra_count = true;
41330 - port->count--;
41331 + atomic_dec(&port->count);
41332 }
41333 spin_unlock_irqrestore(&info->lock, flags);
41334 port->blocked_open++;
41335 @@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
41336
41337 if (debug_level >= DEBUG_LEVEL_INFO)
41338 printk("%s(%d):%s block_til_ready() count=%d\n",
41339 - __FILE__,__LINE__, tty->driver->name, port->count );
41340 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
41341
41342 tty_unlock(tty);
41343 schedule();
41344 @@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
41345 remove_wait_queue(&port->open_wait, &wait);
41346
41347 if (extra_count)
41348 - port->count++;
41349 + atomic_inc(&port->count);
41350 port->blocked_open--;
41351
41352 if (debug_level >= DEBUG_LEVEL_INFO)
41353 printk("%s(%d):%s block_til_ready() after, count=%d\n",
41354 - __FILE__,__LINE__, tty->driver->name, port->count );
41355 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
41356
41357 if (!retval)
41358 port->flags |= ASYNC_NORMAL_ACTIVE;
41359 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
41360 index b3c4a25..723916f 100644
41361 --- a/drivers/tty/sysrq.c
41362 +++ b/drivers/tty/sysrq.c
41363 @@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
41364 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
41365 size_t count, loff_t *ppos)
41366 {
41367 - if (count) {
41368 + if (count && capable(CAP_SYS_ADMIN)) {
41369 char c;
41370
41371 if (get_user(c, buf))
41372 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
41373 index da9fde8..c07975f 100644
41374 --- a/drivers/tty/tty_io.c
41375 +++ b/drivers/tty/tty_io.c
41376 @@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
41377
41378 void tty_default_fops(struct file_operations *fops)
41379 {
41380 - *fops = tty_fops;
41381 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
41382 }
41383
41384 /*
41385 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
41386 index c578229..45aa9ee 100644
41387 --- a/drivers/tty/tty_ldisc.c
41388 +++ b/drivers/tty/tty_ldisc.c
41389 @@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
41390 if (atomic_dec_and_test(&ld->users)) {
41391 struct tty_ldisc_ops *ldo = ld->ops;
41392
41393 - ldo->refcount--;
41394 + atomic_dec(&ldo->refcount);
41395 module_put(ldo->owner);
41396 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
41397
41398 @@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
41399 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
41400 tty_ldiscs[disc] = new_ldisc;
41401 new_ldisc->num = disc;
41402 - new_ldisc->refcount = 0;
41403 + atomic_set(&new_ldisc->refcount, 0);
41404 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
41405
41406 return ret;
41407 @@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
41408 return -EINVAL;
41409
41410 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
41411 - if (tty_ldiscs[disc]->refcount)
41412 + if (atomic_read(&tty_ldiscs[disc]->refcount))
41413 ret = -EBUSY;
41414 else
41415 tty_ldiscs[disc] = NULL;
41416 @@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
41417 if (ldops) {
41418 ret = ERR_PTR(-EAGAIN);
41419 if (try_module_get(ldops->owner)) {
41420 - ldops->refcount++;
41421 + atomic_inc(&ldops->refcount);
41422 ret = ldops;
41423 }
41424 }
41425 @@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
41426 unsigned long flags;
41427
41428 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
41429 - ldops->refcount--;
41430 + atomic_dec(&ldops->refcount);
41431 module_put(ldops->owner);
41432 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
41433 }
41434 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
41435 index b7ff59d..7c6105e 100644
41436 --- a/drivers/tty/tty_port.c
41437 +++ b/drivers/tty/tty_port.c
41438 @@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
41439 unsigned long flags;
41440
41441 spin_lock_irqsave(&port->lock, flags);
41442 - port->count = 0;
41443 + atomic_set(&port->count, 0);
41444 port->flags &= ~ASYNC_NORMAL_ACTIVE;
41445 if (port->tty) {
41446 set_bit(TTY_IO_ERROR, &port->tty->flags);
41447 @@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
41448 /* The port lock protects the port counts */
41449 spin_lock_irqsave(&port->lock, flags);
41450 if (!tty_hung_up_p(filp))
41451 - port->count--;
41452 + atomic_dec(&port->count);
41453 port->blocked_open++;
41454 spin_unlock_irqrestore(&port->lock, flags);
41455
41456 @@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
41457 we must not mess that up further */
41458 spin_lock_irqsave(&port->lock, flags);
41459 if (!tty_hung_up_p(filp))
41460 - port->count++;
41461 + atomic_inc(&port->count);
41462 port->blocked_open--;
41463 if (retval == 0)
41464 port->flags |= ASYNC_NORMAL_ACTIVE;
41465 @@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
41466 return 0;
41467 }
41468
41469 - if (tty->count == 1 && port->count != 1) {
41470 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
41471 printk(KERN_WARNING
41472 "tty_port_close_start: tty->count = 1 port count = %d.\n",
41473 - port->count);
41474 - port->count = 1;
41475 + atomic_read(&port->count));
41476 + atomic_set(&port->count, 1);
41477 }
41478 - if (--port->count < 0) {
41479 + if (atomic_dec_return(&port->count) < 0) {
41480 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
41481 - port->count);
41482 - port->count = 0;
41483 + atomic_read(&port->count));
41484 + atomic_set(&port->count, 0);
41485 }
41486
41487 - if (port->count) {
41488 + if (atomic_read(&port->count)) {
41489 spin_unlock_irqrestore(&port->lock, flags);
41490 if (port->ops->drop)
41491 port->ops->drop(port);
41492 @@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
41493 {
41494 spin_lock_irq(&port->lock);
41495 if (!tty_hung_up_p(filp))
41496 - ++port->count;
41497 + atomic_inc(&port->count);
41498 spin_unlock_irq(&port->lock);
41499 tty_port_tty_set(port, tty);
41500
41501 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
41502 index 681765b..d3ccdf2 100644
41503 --- a/drivers/tty/vt/keyboard.c
41504 +++ b/drivers/tty/vt/keyboard.c
41505 @@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
41506 kbd->kbdmode == VC_OFF) &&
41507 value != KVAL(K_SAK))
41508 return; /* SAK is allowed even in raw mode */
41509 +
41510 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
41511 + {
41512 + void *func = fn_handler[value];
41513 + if (func == fn_show_state || func == fn_show_ptregs ||
41514 + func == fn_show_mem)
41515 + return;
41516 + }
41517 +#endif
41518 +
41519 fn_handler[value](vc);
41520 }
41521
41522 @@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
41523 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
41524 return -EFAULT;
41525
41526 - if (!capable(CAP_SYS_TTY_CONFIG))
41527 - perm = 0;
41528 -
41529 switch (cmd) {
41530 case KDGKBENT:
41531 /* Ensure another thread doesn't free it under us */
41532 @@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
41533 spin_unlock_irqrestore(&kbd_event_lock, flags);
41534 return put_user(val, &user_kbe->kb_value);
41535 case KDSKBENT:
41536 + if (!capable(CAP_SYS_TTY_CONFIG))
41537 + perm = 0;
41538 +
41539 if (!perm)
41540 return -EPERM;
41541 if (!i && v == K_NOSUCHMAP) {
41542 @@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
41543 int i, j, k;
41544 int ret;
41545
41546 - if (!capable(CAP_SYS_TTY_CONFIG))
41547 - perm = 0;
41548 -
41549 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
41550 if (!kbs) {
41551 ret = -ENOMEM;
41552 @@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
41553 kfree(kbs);
41554 return ((p && *p) ? -EOVERFLOW : 0);
41555 case KDSKBSENT:
41556 + if (!capable(CAP_SYS_TTY_CONFIG))
41557 + perm = 0;
41558 +
41559 if (!perm) {
41560 ret = -EPERM;
41561 goto reterr;
41562 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
41563 index 5110f36..8dc0a74 100644
41564 --- a/drivers/uio/uio.c
41565 +++ b/drivers/uio/uio.c
41566 @@ -25,6 +25,7 @@
41567 #include <linux/kobject.h>
41568 #include <linux/cdev.h>
41569 #include <linux/uio_driver.h>
41570 +#include <asm/local.h>
41571
41572 #define UIO_MAX_DEVICES (1U << MINORBITS)
41573
41574 @@ -32,10 +33,10 @@ struct uio_device {
41575 struct module *owner;
41576 struct device *dev;
41577 int minor;
41578 - atomic_t event;
41579 + atomic_unchecked_t event;
41580 struct fasync_struct *async_queue;
41581 wait_queue_head_t wait;
41582 - int vma_count;
41583 + local_t vma_count;
41584 struct uio_info *info;
41585 struct kobject *map_dir;
41586 struct kobject *portio_dir;
41587 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
41588 struct device_attribute *attr, char *buf)
41589 {
41590 struct uio_device *idev = dev_get_drvdata(dev);
41591 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
41592 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
41593 }
41594
41595 static struct device_attribute uio_class_attributes[] = {
41596 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
41597 {
41598 struct uio_device *idev = info->uio_dev;
41599
41600 - atomic_inc(&idev->event);
41601 + atomic_inc_unchecked(&idev->event);
41602 wake_up_interruptible(&idev->wait);
41603 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
41604 }
41605 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
41606 }
41607
41608 listener->dev = idev;
41609 - listener->event_count = atomic_read(&idev->event);
41610 + listener->event_count = atomic_read_unchecked(&idev->event);
41611 filep->private_data = listener;
41612
41613 if (idev->info->open) {
41614 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
41615 return -EIO;
41616
41617 poll_wait(filep, &idev->wait, wait);
41618 - if (listener->event_count != atomic_read(&idev->event))
41619 + if (listener->event_count != atomic_read_unchecked(&idev->event))
41620 return POLLIN | POLLRDNORM;
41621 return 0;
41622 }
41623 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
41624 do {
41625 set_current_state(TASK_INTERRUPTIBLE);
41626
41627 - event_count = atomic_read(&idev->event);
41628 + event_count = atomic_read_unchecked(&idev->event);
41629 if (event_count != listener->event_count) {
41630 if (copy_to_user(buf, &event_count, count))
41631 retval = -EFAULT;
41632 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
41633 static void uio_vma_open(struct vm_area_struct *vma)
41634 {
41635 struct uio_device *idev = vma->vm_private_data;
41636 - idev->vma_count++;
41637 + local_inc(&idev->vma_count);
41638 }
41639
41640 static void uio_vma_close(struct vm_area_struct *vma)
41641 {
41642 struct uio_device *idev = vma->vm_private_data;
41643 - idev->vma_count--;
41644 + local_dec(&idev->vma_count);
41645 }
41646
41647 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41648 @@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
41649 idev->owner = owner;
41650 idev->info = info;
41651 init_waitqueue_head(&idev->wait);
41652 - atomic_set(&idev->event, 0);
41653 + atomic_set_unchecked(&idev->event, 0);
41654
41655 ret = uio_get_minor(idev);
41656 if (ret)
41657 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
41658 index b7eb86a..36d28af 100644
41659 --- a/drivers/usb/atm/cxacru.c
41660 +++ b/drivers/usb/atm/cxacru.c
41661 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
41662 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
41663 if (ret < 2)
41664 return -EINVAL;
41665 - if (index < 0 || index > 0x7f)
41666 + if (index > 0x7f)
41667 return -EINVAL;
41668 pos += tmp;
41669
41670 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
41671 index 35f10bf..6a38a0b 100644
41672 --- a/drivers/usb/atm/usbatm.c
41673 +++ b/drivers/usb/atm/usbatm.c
41674 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
41675 if (printk_ratelimit())
41676 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
41677 __func__, vpi, vci);
41678 - atomic_inc(&vcc->stats->rx_err);
41679 + atomic_inc_unchecked(&vcc->stats->rx_err);
41680 return;
41681 }
41682
41683 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
41684 if (length > ATM_MAX_AAL5_PDU) {
41685 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
41686 __func__, length, vcc);
41687 - atomic_inc(&vcc->stats->rx_err);
41688 + atomic_inc_unchecked(&vcc->stats->rx_err);
41689 goto out;
41690 }
41691
41692 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
41693 if (sarb->len < pdu_length) {
41694 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
41695 __func__, pdu_length, sarb->len, vcc);
41696 - atomic_inc(&vcc->stats->rx_err);
41697 + atomic_inc_unchecked(&vcc->stats->rx_err);
41698 goto out;
41699 }
41700
41701 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
41702 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
41703 __func__, vcc);
41704 - atomic_inc(&vcc->stats->rx_err);
41705 + atomic_inc_unchecked(&vcc->stats->rx_err);
41706 goto out;
41707 }
41708
41709 @@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
41710 if (printk_ratelimit())
41711 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
41712 __func__, length);
41713 - atomic_inc(&vcc->stats->rx_drop);
41714 + atomic_inc_unchecked(&vcc->stats->rx_drop);
41715 goto out;
41716 }
41717
41718 @@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
41719
41720 vcc->push(vcc, skb);
41721
41722 - atomic_inc(&vcc->stats->rx);
41723 + atomic_inc_unchecked(&vcc->stats->rx);
41724 out:
41725 skb_trim(sarb, 0);
41726 }
41727 @@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
41728 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
41729
41730 usbatm_pop(vcc, skb);
41731 - atomic_inc(&vcc->stats->tx);
41732 + atomic_inc_unchecked(&vcc->stats->tx);
41733
41734 skb = skb_dequeue(&instance->sndqueue);
41735 }
41736 @@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
41737 if (!left--)
41738 return sprintf(page,
41739 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
41740 - atomic_read(&atm_dev->stats.aal5.tx),
41741 - atomic_read(&atm_dev->stats.aal5.tx_err),
41742 - atomic_read(&atm_dev->stats.aal5.rx),
41743 - atomic_read(&atm_dev->stats.aal5.rx_err),
41744 - atomic_read(&atm_dev->stats.aal5.rx_drop));
41745 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
41746 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
41747 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
41748 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
41749 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
41750
41751 if (!left--) {
41752 if (instance->disconnected)
41753 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
41754 index cbacea9..246cccd 100644
41755 --- a/drivers/usb/core/devices.c
41756 +++ b/drivers/usb/core/devices.c
41757 @@ -126,7 +126,7 @@ static const char format_endpt[] =
41758 * time it gets called.
41759 */
41760 static struct device_connect_event {
41761 - atomic_t count;
41762 + atomic_unchecked_t count;
41763 wait_queue_head_t wait;
41764 } device_event = {
41765 .count = ATOMIC_INIT(1),
41766 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
41767
41768 void usbfs_conn_disc_event(void)
41769 {
41770 - atomic_add(2, &device_event.count);
41771 + atomic_add_unchecked(2, &device_event.count);
41772 wake_up(&device_event.wait);
41773 }
41774
41775 @@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
41776
41777 poll_wait(file, &device_event.wait, wait);
41778
41779 - event_count = atomic_read(&device_event.count);
41780 + event_count = atomic_read_unchecked(&device_event.count);
41781 if (file->f_version != event_count) {
41782 file->f_version = event_count;
41783 return POLLIN | POLLRDNORM;
41784 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
41785 index 8e64adf..9a33a3c 100644
41786 --- a/drivers/usb/core/hcd.c
41787 +++ b/drivers/usb/core/hcd.c
41788 @@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
41789 */
41790 usb_get_urb(urb);
41791 atomic_inc(&urb->use_count);
41792 - atomic_inc(&urb->dev->urbnum);
41793 + atomic_inc_unchecked(&urb->dev->urbnum);
41794 usbmon_urb_submit(&hcd->self, urb);
41795
41796 /* NOTE requirements on root-hub callers (usbfs and the hub
41797 @@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
41798 urb->hcpriv = NULL;
41799 INIT_LIST_HEAD(&urb->urb_list);
41800 atomic_dec(&urb->use_count);
41801 - atomic_dec(&urb->dev->urbnum);
41802 + atomic_dec_unchecked(&urb->dev->urbnum);
41803 if (atomic_read(&urb->reject))
41804 wake_up(&usb_kill_urb_queue);
41805 usb_put_urb(urb);
41806 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
41807 index 818e4a0..0fc9589 100644
41808 --- a/drivers/usb/core/sysfs.c
41809 +++ b/drivers/usb/core/sysfs.c
41810 @@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
41811 struct usb_device *udev;
41812
41813 udev = to_usb_device(dev);
41814 - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
41815 + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
41816 }
41817 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
41818
41819 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
41820 index f81b925..78d22ec 100644
41821 --- a/drivers/usb/core/usb.c
41822 +++ b/drivers/usb/core/usb.c
41823 @@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
41824 set_dev_node(&dev->dev, dev_to_node(bus->controller));
41825 dev->state = USB_STATE_ATTACHED;
41826 dev->lpm_disable_count = 1;
41827 - atomic_set(&dev->urbnum, 0);
41828 + atomic_set_unchecked(&dev->urbnum, 0);
41829
41830 INIT_LIST_HEAD(&dev->ep0.urb_list);
41831 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
41832 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
41833 index 5e29dde..eca992f 100644
41834 --- a/drivers/usb/early/ehci-dbgp.c
41835 +++ b/drivers/usb/early/ehci-dbgp.c
41836 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
41837
41838 #ifdef CONFIG_KGDB
41839 static struct kgdb_io kgdbdbgp_io_ops;
41840 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
41841 +static struct kgdb_io kgdbdbgp_io_ops_console;
41842 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
41843 #else
41844 #define dbgp_kgdb_mode (0)
41845 #endif
41846 @@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
41847 .write_char = kgdbdbgp_write_char,
41848 };
41849
41850 +static struct kgdb_io kgdbdbgp_io_ops_console = {
41851 + .name = "kgdbdbgp",
41852 + .read_char = kgdbdbgp_read_char,
41853 + .write_char = kgdbdbgp_write_char,
41854 + .is_console = 1
41855 +};
41856 +
41857 static int kgdbdbgp_wait_time;
41858
41859 static int __init kgdbdbgp_parse_config(char *str)
41860 @@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
41861 ptr++;
41862 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
41863 }
41864 - kgdb_register_io_module(&kgdbdbgp_io_ops);
41865 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
41866 + if (early_dbgp_console.index != -1)
41867 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
41868 + else
41869 + kgdb_register_io_module(&kgdbdbgp_io_ops);
41870
41871 return 0;
41872 }
41873 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
41874 index 598dcc1..032dd4f 100644
41875 --- a/drivers/usb/gadget/u_serial.c
41876 +++ b/drivers/usb/gadget/u_serial.c
41877 @@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
41878 spin_lock_irq(&port->port_lock);
41879
41880 /* already open? Great. */
41881 - if (port->port.count) {
41882 + if (atomic_read(&port->port.count)) {
41883 status = 0;
41884 - port->port.count++;
41885 + atomic_inc(&port->port.count);
41886
41887 /* currently opening/closing? wait ... */
41888 } else if (port->openclose) {
41889 @@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
41890 tty->driver_data = port;
41891 port->port.tty = tty;
41892
41893 - port->port.count = 1;
41894 + atomic_set(&port->port.count, 1);
41895 port->openclose = false;
41896
41897 /* if connected, start the I/O stream */
41898 @@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
41899
41900 spin_lock_irq(&port->port_lock);
41901
41902 - if (port->port.count != 1) {
41903 - if (port->port.count == 0)
41904 + if (atomic_read(&port->port.count) != 1) {
41905 + if (atomic_read(&port->port.count) == 0)
41906 WARN_ON(1);
41907 else
41908 - --port->port.count;
41909 + atomic_dec(&port->port.count);
41910 goto exit;
41911 }
41912
41913 @@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
41914 * and sleep if necessary
41915 */
41916 port->openclose = true;
41917 - port->port.count = 0;
41918 + atomic_set(&port->port.count, 0);
41919
41920 gser = port->port_usb;
41921 if (gser && gser->disconnect)
41922 @@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
41923 int cond;
41924
41925 spin_lock_irq(&port->port_lock);
41926 - cond = (port->port.count == 0) && !port->openclose;
41927 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
41928 spin_unlock_irq(&port->port_lock);
41929 return cond;
41930 }
41931 @@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
41932 /* if it's already open, start I/O ... and notify the serial
41933 * protocol about open/close status (connect/disconnect).
41934 */
41935 - if (port->port.count) {
41936 + if (atomic_read(&port->port.count)) {
41937 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
41938 gs_start_io(port);
41939 if (gser->connect)
41940 @@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
41941
41942 port->port_usb = NULL;
41943 gser->ioport = NULL;
41944 - if (port->port.count > 0 || port->openclose) {
41945 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
41946 wake_up_interruptible(&port->drain_wait);
41947 if (port->port.tty)
41948 tty_hangup(port->port.tty);
41949 @@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
41950
41951 /* finally, free any unused/unusable I/O buffers */
41952 spin_lock_irqsave(&port->port_lock, flags);
41953 - if (port->port.count == 0 && !port->openclose)
41954 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
41955 gs_buf_free(&port->port_write_buf);
41956 gs_free_requests(gser->out, &port->read_pool, NULL);
41957 gs_free_requests(gser->out, &port->read_queue, NULL);
41958 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
41959 index 5f3bcd3..bfca43f 100644
41960 --- a/drivers/usb/serial/console.c
41961 +++ b/drivers/usb/serial/console.c
41962 @@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
41963
41964 info->port = port;
41965
41966 - ++port->port.count;
41967 + atomic_inc(&port->port.count);
41968 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
41969 if (serial->type->set_termios) {
41970 /*
41971 @@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
41972 }
41973 /* Now that any required fake tty operations are completed restore
41974 * the tty port count */
41975 - --port->port.count;
41976 + atomic_dec(&port->port.count);
41977 /* The console is special in terms of closing the device so
41978 * indicate this port is now acting as a system console. */
41979 port->port.console = 1;
41980 @@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
41981 free_tty:
41982 kfree(tty);
41983 reset_open_count:
41984 - port->port.count = 0;
41985 + atomic_set(&port->port.count, 0);
41986 usb_autopm_put_interface(serial->interface);
41987 error_get_interface:
41988 usb_serial_put(serial);
41989 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
41990 index d6bea3e..60b250e 100644
41991 --- a/drivers/usb/wusbcore/wa-hc.h
41992 +++ b/drivers/usb/wusbcore/wa-hc.h
41993 @@ -192,7 +192,7 @@ struct wahc {
41994 struct list_head xfer_delayed_list;
41995 spinlock_t xfer_list_lock;
41996 struct work_struct xfer_work;
41997 - atomic_t xfer_id_count;
41998 + atomic_unchecked_t xfer_id_count;
41999 };
42000
42001
42002 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42003 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42004 spin_lock_init(&wa->xfer_list_lock);
42005 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42006 - atomic_set(&wa->xfer_id_count, 1);
42007 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42008 }
42009
42010 /**
42011 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42012 index 57c01ab..8a05959 100644
42013 --- a/drivers/usb/wusbcore/wa-xfer.c
42014 +++ b/drivers/usb/wusbcore/wa-xfer.c
42015 @@ -296,7 +296,7 @@ out:
42016 */
42017 static void wa_xfer_id_init(struct wa_xfer *xfer)
42018 {
42019 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42020 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42021 }
42022
42023 /*
42024 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42025 index 8c55011..eed4ae1a 100644
42026 --- a/drivers/video/aty/aty128fb.c
42027 +++ b/drivers/video/aty/aty128fb.c
42028 @@ -149,7 +149,7 @@ enum {
42029 };
42030
42031 /* Must match above enum */
42032 -static char * const r128_family[] = {
42033 +static const char * const r128_family[] = {
42034 "AGP",
42035 "PCI",
42036 "PRO AGP",
42037 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42038 index 5c3960d..15cf8fc 100644
42039 --- a/drivers/video/fbcmap.c
42040 +++ b/drivers/video/fbcmap.c
42041 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42042 rc = -ENODEV;
42043 goto out;
42044 }
42045 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42046 - !info->fbops->fb_setcmap)) {
42047 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42048 rc = -EINVAL;
42049 goto out1;
42050 }
42051 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
42052 index dc61c12..e29796e 100644
42053 --- a/drivers/video/fbmem.c
42054 +++ b/drivers/video/fbmem.c
42055 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42056 image->dx += image->width + 8;
42057 }
42058 } else if (rotate == FB_ROTATE_UD) {
42059 - for (x = 0; x < num && image->dx >= 0; x++) {
42060 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
42061 info->fbops->fb_imageblit(info, image);
42062 image->dx -= image->width + 8;
42063 }
42064 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42065 image->dy += image->height + 8;
42066 }
42067 } else if (rotate == FB_ROTATE_CCW) {
42068 - for (x = 0; x < num && image->dy >= 0; x++) {
42069 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
42070 info->fbops->fb_imageblit(info, image);
42071 image->dy -= image->height + 8;
42072 }
42073 @@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42074 return -EFAULT;
42075 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
42076 return -EINVAL;
42077 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
42078 + if (con2fb.framebuffer >= FB_MAX)
42079 return -EINVAL;
42080 if (!registered_fb[con2fb.framebuffer])
42081 request_module("fb%d", con2fb.framebuffer);
42082 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
42083 index 7672d2e..b56437f 100644
42084 --- a/drivers/video/i810/i810_accel.c
42085 +++ b/drivers/video/i810/i810_accel.c
42086 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
42087 }
42088 }
42089 printk("ringbuffer lockup!!!\n");
42090 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
42091 i810_report_error(mmio);
42092 par->dev_flags |= LOCKUP;
42093 info->pixmap.scan_align = 1;
42094 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
42095 index 3c14e43..eafa544 100644
42096 --- a/drivers/video/logo/logo_linux_clut224.ppm
42097 +++ b/drivers/video/logo/logo_linux_clut224.ppm
42098 @@ -1,1604 +1,1123 @@
42099 P3
42100 -# Standard 224-color Linux logo
42101 80 80
42102 255
42103 - 0 0 0 0 0 0 0 0 0 0 0 0
42104 - 0 0 0 0 0 0 0 0 0 0 0 0
42105 - 0 0 0 0 0 0 0 0 0 0 0 0
42106 - 0 0 0 0 0 0 0 0 0 0 0 0
42107 - 0 0 0 0 0 0 0 0 0 0 0 0
42108 - 0 0 0 0 0 0 0 0 0 0 0 0
42109 - 0 0 0 0 0 0 0 0 0 0 0 0
42110 - 0 0 0 0 0 0 0 0 0 0 0 0
42111 - 0 0 0 0 0 0 0 0 0 0 0 0
42112 - 6 6 6 6 6 6 10 10 10 10 10 10
42113 - 10 10 10 6 6 6 6 6 6 6 6 6
42114 - 0 0 0 0 0 0 0 0 0 0 0 0
42115 - 0 0 0 0 0 0 0 0 0 0 0 0
42116 - 0 0 0 0 0 0 0 0 0 0 0 0
42117 - 0 0 0 0 0 0 0 0 0 0 0 0
42118 - 0 0 0 0 0 0 0 0 0 0 0 0
42119 - 0 0 0 0 0 0 0 0 0 0 0 0
42120 - 0 0 0 0 0 0 0 0 0 0 0 0
42121 - 0 0 0 0 0 0 0 0 0 0 0 0
42122 - 0 0 0 0 0 0 0 0 0 0 0 0
42123 - 0 0 0 0 0 0 0 0 0 0 0 0
42124 - 0 0 0 0 0 0 0 0 0 0 0 0
42125 - 0 0 0 0 0 0 0 0 0 0 0 0
42126 - 0 0 0 0 0 0 0 0 0 0 0 0
42127 - 0 0 0 0 0 0 0 0 0 0 0 0
42128 - 0 0 0 0 0 0 0 0 0 0 0 0
42129 - 0 0 0 0 0 0 0 0 0 0 0 0
42130 - 0 0 0 0 0 0 0 0 0 0 0 0
42131 - 0 0 0 6 6 6 10 10 10 14 14 14
42132 - 22 22 22 26 26 26 30 30 30 34 34 34
42133 - 30 30 30 30 30 30 26 26 26 18 18 18
42134 - 14 14 14 10 10 10 6 6 6 0 0 0
42135 - 0 0 0 0 0 0 0 0 0 0 0 0
42136 - 0 0 0 0 0 0 0 0 0 0 0 0
42137 - 0 0 0 0 0 0 0 0 0 0 0 0
42138 - 0 0 0 0 0 0 0 0 0 0 0 0
42139 - 0 0 0 0 0 0 0 0 0 0 0 0
42140 - 0 0 0 0 0 0 0 0 0 0 0 0
42141 - 0 0 0 0 0 0 0 0 0 0 0 0
42142 - 0 0 0 0 0 0 0 0 0 0 0 0
42143 - 0 0 0 0 0 0 0 0 0 0 0 0
42144 - 0 0 0 0 0 1 0 0 1 0 0 0
42145 - 0 0 0 0 0 0 0 0 0 0 0 0
42146 - 0 0 0 0 0 0 0 0 0 0 0 0
42147 - 0 0 0 0 0 0 0 0 0 0 0 0
42148 - 0 0 0 0 0 0 0 0 0 0 0 0
42149 - 0 0 0 0 0 0 0 0 0 0 0 0
42150 - 0 0 0 0 0 0 0 0 0 0 0 0
42151 - 6 6 6 14 14 14 26 26 26 42 42 42
42152 - 54 54 54 66 66 66 78 78 78 78 78 78
42153 - 78 78 78 74 74 74 66 66 66 54 54 54
42154 - 42 42 42 26 26 26 18 18 18 10 10 10
42155 - 6 6 6 0 0 0 0 0 0 0 0 0
42156 - 0 0 0 0 0 0 0 0 0 0 0 0
42157 - 0 0 0 0 0 0 0 0 0 0 0 0
42158 - 0 0 0 0 0 0 0 0 0 0 0 0
42159 - 0 0 0 0 0 0 0 0 0 0 0 0
42160 - 0 0 0 0 0 0 0 0 0 0 0 0
42161 - 0 0 0 0 0 0 0 0 0 0 0 0
42162 - 0 0 0 0 0 0 0 0 0 0 0 0
42163 - 0 0 0 0 0 0 0 0 0 0 0 0
42164 - 0 0 1 0 0 0 0 0 0 0 0 0
42165 - 0 0 0 0 0 0 0 0 0 0 0 0
42166 - 0 0 0 0 0 0 0 0 0 0 0 0
42167 - 0 0 0 0 0 0 0 0 0 0 0 0
42168 - 0 0 0 0 0 0 0 0 0 0 0 0
42169 - 0 0 0 0 0 0 0 0 0 0 0 0
42170 - 0 0 0 0 0 0 0 0 0 10 10 10
42171 - 22 22 22 42 42 42 66 66 66 86 86 86
42172 - 66 66 66 38 38 38 38 38 38 22 22 22
42173 - 26 26 26 34 34 34 54 54 54 66 66 66
42174 - 86 86 86 70 70 70 46 46 46 26 26 26
42175 - 14 14 14 6 6 6 0 0 0 0 0 0
42176 - 0 0 0 0 0 0 0 0 0 0 0 0
42177 - 0 0 0 0 0 0 0 0 0 0 0 0
42178 - 0 0 0 0 0 0 0 0 0 0 0 0
42179 - 0 0 0 0 0 0 0 0 0 0 0 0
42180 - 0 0 0 0 0 0 0 0 0 0 0 0
42181 - 0 0 0 0 0 0 0 0 0 0 0 0
42182 - 0 0 0 0 0 0 0 0 0 0 0 0
42183 - 0 0 0 0 0 0 0 0 0 0 0 0
42184 - 0 0 1 0 0 1 0 0 1 0 0 0
42185 - 0 0 0 0 0 0 0 0 0 0 0 0
42186 - 0 0 0 0 0 0 0 0 0 0 0 0
42187 - 0 0 0 0 0 0 0 0 0 0 0 0
42188 - 0 0 0 0 0 0 0 0 0 0 0 0
42189 - 0 0 0 0 0 0 0 0 0 0 0 0
42190 - 0 0 0 0 0 0 10 10 10 26 26 26
42191 - 50 50 50 82 82 82 58 58 58 6 6 6
42192 - 2 2 6 2 2 6 2 2 6 2 2 6
42193 - 2 2 6 2 2 6 2 2 6 2 2 6
42194 - 6 6 6 54 54 54 86 86 86 66 66 66
42195 - 38 38 38 18 18 18 6 6 6 0 0 0
42196 - 0 0 0 0 0 0 0 0 0 0 0 0
42197 - 0 0 0 0 0 0 0 0 0 0 0 0
42198 - 0 0 0 0 0 0 0 0 0 0 0 0
42199 - 0 0 0 0 0 0 0 0 0 0 0 0
42200 - 0 0 0 0 0 0 0 0 0 0 0 0
42201 - 0 0 0 0 0 0 0 0 0 0 0 0
42202 - 0 0 0 0 0 0 0 0 0 0 0 0
42203 - 0 0 0 0 0 0 0 0 0 0 0 0
42204 - 0 0 0 0 0 0 0 0 0 0 0 0
42205 - 0 0 0 0 0 0 0 0 0 0 0 0
42206 - 0 0 0 0 0 0 0 0 0 0 0 0
42207 - 0 0 0 0 0 0 0 0 0 0 0 0
42208 - 0 0 0 0 0 0 0 0 0 0 0 0
42209 - 0 0 0 0 0 0 0 0 0 0 0 0
42210 - 0 0 0 6 6 6 22 22 22 50 50 50
42211 - 78 78 78 34 34 34 2 2 6 2 2 6
42212 - 2 2 6 2 2 6 2 2 6 2 2 6
42213 - 2 2 6 2 2 6 2 2 6 2 2 6
42214 - 2 2 6 2 2 6 6 6 6 70 70 70
42215 - 78 78 78 46 46 46 22 22 22 6 6 6
42216 - 0 0 0 0 0 0 0 0 0 0 0 0
42217 - 0 0 0 0 0 0 0 0 0 0 0 0
42218 - 0 0 0 0 0 0 0 0 0 0 0 0
42219 - 0 0 0 0 0 0 0 0 0 0 0 0
42220 - 0 0 0 0 0 0 0 0 0 0 0 0
42221 - 0 0 0 0 0 0 0 0 0 0 0 0
42222 - 0 0 0 0 0 0 0 0 0 0 0 0
42223 - 0 0 0 0 0 0 0 0 0 0 0 0
42224 - 0 0 1 0 0 1 0 0 1 0 0 0
42225 - 0 0 0 0 0 0 0 0 0 0 0 0
42226 - 0 0 0 0 0 0 0 0 0 0 0 0
42227 - 0 0 0 0 0 0 0 0 0 0 0 0
42228 - 0 0 0 0 0 0 0 0 0 0 0 0
42229 - 0 0 0 0 0 0 0 0 0 0 0 0
42230 - 6 6 6 18 18 18 42 42 42 82 82 82
42231 - 26 26 26 2 2 6 2 2 6 2 2 6
42232 - 2 2 6 2 2 6 2 2 6 2 2 6
42233 - 2 2 6 2 2 6 2 2 6 14 14 14
42234 - 46 46 46 34 34 34 6 6 6 2 2 6
42235 - 42 42 42 78 78 78 42 42 42 18 18 18
42236 - 6 6 6 0 0 0 0 0 0 0 0 0
42237 - 0 0 0 0 0 0 0 0 0 0 0 0
42238 - 0 0 0 0 0 0 0 0 0 0 0 0
42239 - 0 0 0 0 0 0 0 0 0 0 0 0
42240 - 0 0 0 0 0 0 0 0 0 0 0 0
42241 - 0 0 0 0 0 0 0 0 0 0 0 0
42242 - 0 0 0 0 0 0 0 0 0 0 0 0
42243 - 0 0 0 0 0 0 0 0 0 0 0 0
42244 - 0 0 1 0 0 0 0 0 1 0 0 0
42245 - 0 0 0 0 0 0 0 0 0 0 0 0
42246 - 0 0 0 0 0 0 0 0 0 0 0 0
42247 - 0 0 0 0 0 0 0 0 0 0 0 0
42248 - 0 0 0 0 0 0 0 0 0 0 0 0
42249 - 0 0 0 0 0 0 0 0 0 0 0 0
42250 - 10 10 10 30 30 30 66 66 66 58 58 58
42251 - 2 2 6 2 2 6 2 2 6 2 2 6
42252 - 2 2 6 2 2 6 2 2 6 2 2 6
42253 - 2 2 6 2 2 6 2 2 6 26 26 26
42254 - 86 86 86 101 101 101 46 46 46 10 10 10
42255 - 2 2 6 58 58 58 70 70 70 34 34 34
42256 - 10 10 10 0 0 0 0 0 0 0 0 0
42257 - 0 0 0 0 0 0 0 0 0 0 0 0
42258 - 0 0 0 0 0 0 0 0 0 0 0 0
42259 - 0 0 0 0 0 0 0 0 0 0 0 0
42260 - 0 0 0 0 0 0 0 0 0 0 0 0
42261 - 0 0 0 0 0 0 0 0 0 0 0 0
42262 - 0 0 0 0 0 0 0 0 0 0 0 0
42263 - 0 0 0 0 0 0 0 0 0 0 0 0
42264 - 0 0 1 0 0 1 0 0 1 0 0 0
42265 - 0 0 0 0 0 0 0 0 0 0 0 0
42266 - 0 0 0 0 0 0 0 0 0 0 0 0
42267 - 0 0 0 0 0 0 0 0 0 0 0 0
42268 - 0 0 0 0 0 0 0 0 0 0 0 0
42269 - 0 0 0 0 0 0 0 0 0 0 0 0
42270 - 14 14 14 42 42 42 86 86 86 10 10 10
42271 - 2 2 6 2 2 6 2 2 6 2 2 6
42272 - 2 2 6 2 2 6 2 2 6 2 2 6
42273 - 2 2 6 2 2 6 2 2 6 30 30 30
42274 - 94 94 94 94 94 94 58 58 58 26 26 26
42275 - 2 2 6 6 6 6 78 78 78 54 54 54
42276 - 22 22 22 6 6 6 0 0 0 0 0 0
42277 - 0 0 0 0 0 0 0 0 0 0 0 0
42278 - 0 0 0 0 0 0 0 0 0 0 0 0
42279 - 0 0 0 0 0 0 0 0 0 0 0 0
42280 - 0 0 0 0 0 0 0 0 0 0 0 0
42281 - 0 0 0 0 0 0 0 0 0 0 0 0
42282 - 0 0 0 0 0 0 0 0 0 0 0 0
42283 - 0 0 0 0 0 0 0 0 0 0 0 0
42284 - 0 0 0 0 0 0 0 0 0 0 0 0
42285 - 0 0 0 0 0 0 0 0 0 0 0 0
42286 - 0 0 0 0 0 0 0 0 0 0 0 0
42287 - 0 0 0 0 0 0 0 0 0 0 0 0
42288 - 0 0 0 0 0 0 0 0 0 0 0 0
42289 - 0 0 0 0 0 0 0 0 0 6 6 6
42290 - 22 22 22 62 62 62 62 62 62 2 2 6
42291 - 2 2 6 2 2 6 2 2 6 2 2 6
42292 - 2 2 6 2 2 6 2 2 6 2 2 6
42293 - 2 2 6 2 2 6 2 2 6 26 26 26
42294 - 54 54 54 38 38 38 18 18 18 10 10 10
42295 - 2 2 6 2 2 6 34 34 34 82 82 82
42296 - 38 38 38 14 14 14 0 0 0 0 0 0
42297 - 0 0 0 0 0 0 0 0 0 0 0 0
42298 - 0 0 0 0 0 0 0 0 0 0 0 0
42299 - 0 0 0 0 0 0 0 0 0 0 0 0
42300 - 0 0 0 0 0 0 0 0 0 0 0 0
42301 - 0 0 0 0 0 0 0 0 0 0 0 0
42302 - 0 0 0 0 0 0 0 0 0 0 0 0
42303 - 0 0 0 0 0 0 0 0 0 0 0 0
42304 - 0 0 0 0 0 1 0 0 1 0 0 0
42305 - 0 0 0 0 0 0 0 0 0 0 0 0
42306 - 0 0 0 0 0 0 0 0 0 0 0 0
42307 - 0 0 0 0 0 0 0 0 0 0 0 0
42308 - 0 0 0 0 0 0 0 0 0 0 0 0
42309 - 0 0 0 0 0 0 0 0 0 6 6 6
42310 - 30 30 30 78 78 78 30 30 30 2 2 6
42311 - 2 2 6 2 2 6 2 2 6 2 2 6
42312 - 2 2 6 2 2 6 2 2 6 2 2 6
42313 - 2 2 6 2 2 6 2 2 6 10 10 10
42314 - 10 10 10 2 2 6 2 2 6 2 2 6
42315 - 2 2 6 2 2 6 2 2 6 78 78 78
42316 - 50 50 50 18 18 18 6 6 6 0 0 0
42317 - 0 0 0 0 0 0 0 0 0 0 0 0
42318 - 0 0 0 0 0 0 0 0 0 0 0 0
42319 - 0 0 0 0 0 0 0 0 0 0 0 0
42320 - 0 0 0 0 0 0 0 0 0 0 0 0
42321 - 0 0 0 0 0 0 0 0 0 0 0 0
42322 - 0 0 0 0 0 0 0 0 0 0 0 0
42323 - 0 0 0 0 0 0 0 0 0 0 0 0
42324 - 0 0 1 0 0 0 0 0 0 0 0 0
42325 - 0 0 0 0 0 0 0 0 0 0 0 0
42326 - 0 0 0 0 0 0 0 0 0 0 0 0
42327 - 0 0 0 0 0 0 0 0 0 0 0 0
42328 - 0 0 0 0 0 0 0 0 0 0 0 0
42329 - 0 0 0 0 0 0 0 0 0 10 10 10
42330 - 38 38 38 86 86 86 14 14 14 2 2 6
42331 - 2 2 6 2 2 6 2 2 6 2 2 6
42332 - 2 2 6 2 2 6 2 2 6 2 2 6
42333 - 2 2 6 2 2 6 2 2 6 2 2 6
42334 - 2 2 6 2 2 6 2 2 6 2 2 6
42335 - 2 2 6 2 2 6 2 2 6 54 54 54
42336 - 66 66 66 26 26 26 6 6 6 0 0 0
42337 - 0 0 0 0 0 0 0 0 0 0 0 0
42338 - 0 0 0 0 0 0 0 0 0 0 0 0
42339 - 0 0 0 0 0 0 0 0 0 0 0 0
42340 - 0 0 0 0 0 0 0 0 0 0 0 0
42341 - 0 0 0 0 0 0 0 0 0 0 0 0
42342 - 0 0 0 0 0 0 0 0 0 0 0 0
42343 - 0 0 0 0 0 0 0 0 0 0 0 0
42344 - 0 0 0 0 0 1 0 0 1 0 0 0
42345 - 0 0 0 0 0 0 0 0 0 0 0 0
42346 - 0 0 0 0 0 0 0 0 0 0 0 0
42347 - 0 0 0 0 0 0 0 0 0 0 0 0
42348 - 0 0 0 0 0 0 0 0 0 0 0 0
42349 - 0 0 0 0 0 0 0 0 0 14 14 14
42350 - 42 42 42 82 82 82 2 2 6 2 2 6
42351 - 2 2 6 6 6 6 10 10 10 2 2 6
42352 - 2 2 6 2 2 6 2 2 6 2 2 6
42353 - 2 2 6 2 2 6 2 2 6 6 6 6
42354 - 14 14 14 10 10 10 2 2 6 2 2 6
42355 - 2 2 6 2 2 6 2 2 6 18 18 18
42356 - 82 82 82 34 34 34 10 10 10 0 0 0
42357 - 0 0 0 0 0 0 0 0 0 0 0 0
42358 - 0 0 0 0 0 0 0 0 0 0 0 0
42359 - 0 0 0 0 0 0 0 0 0 0 0 0
42360 - 0 0 0 0 0 0 0 0 0 0 0 0
42361 - 0 0 0 0 0 0 0 0 0 0 0 0
42362 - 0 0 0 0 0 0 0 0 0 0 0 0
42363 - 0 0 0 0 0 0 0 0 0 0 0 0
42364 - 0 0 1 0 0 0 0 0 0 0 0 0
42365 - 0 0 0 0 0 0 0 0 0 0 0 0
42366 - 0 0 0 0 0 0 0 0 0 0 0 0
42367 - 0 0 0 0 0 0 0 0 0 0 0 0
42368 - 0 0 0 0 0 0 0 0 0 0 0 0
42369 - 0 0 0 0 0 0 0 0 0 14 14 14
42370 - 46 46 46 86 86 86 2 2 6 2 2 6
42371 - 6 6 6 6 6 6 22 22 22 34 34 34
42372 - 6 6 6 2 2 6 2 2 6 2 2 6
42373 - 2 2 6 2 2 6 18 18 18 34 34 34
42374 - 10 10 10 50 50 50 22 22 22 2 2 6
42375 - 2 2 6 2 2 6 2 2 6 10 10 10
42376 - 86 86 86 42 42 42 14 14 14 0 0 0
42377 - 0 0 0 0 0 0 0 0 0 0 0 0
42378 - 0 0 0 0 0 0 0 0 0 0 0 0
42379 - 0 0 0 0 0 0 0 0 0 0 0 0
42380 - 0 0 0 0 0 0 0 0 0 0 0 0
42381 - 0 0 0 0 0 0 0 0 0 0 0 0
42382 - 0 0 0 0 0 0 0 0 0 0 0 0
42383 - 0 0 0 0 0 0 0 0 0 0 0 0
42384 - 0 0 1 0 0 1 0 0 1 0 0 0
42385 - 0 0 0 0 0 0 0 0 0 0 0 0
42386 - 0 0 0 0 0 0 0 0 0 0 0 0
42387 - 0 0 0 0 0 0 0 0 0 0 0 0
42388 - 0 0 0 0 0 0 0 0 0 0 0 0
42389 - 0 0 0 0 0 0 0 0 0 14 14 14
42390 - 46 46 46 86 86 86 2 2 6 2 2 6
42391 - 38 38 38 116 116 116 94 94 94 22 22 22
42392 - 22 22 22 2 2 6 2 2 6 2 2 6
42393 - 14 14 14 86 86 86 138 138 138 162 162 162
42394 -154 154 154 38 38 38 26 26 26 6 6 6
42395 - 2 2 6 2 2 6 2 2 6 2 2 6
42396 - 86 86 86 46 46 46 14 14 14 0 0 0
42397 - 0 0 0 0 0 0 0 0 0 0 0 0
42398 - 0 0 0 0 0 0 0 0 0 0 0 0
42399 - 0 0 0 0 0 0 0 0 0 0 0 0
42400 - 0 0 0 0 0 0 0 0 0 0 0 0
42401 - 0 0 0 0 0 0 0 0 0 0 0 0
42402 - 0 0 0 0 0 0 0 0 0 0 0 0
42403 - 0 0 0 0 0 0 0 0 0 0 0 0
42404 - 0 0 0 0 0 0 0 0 0 0 0 0
42405 - 0 0 0 0 0 0 0 0 0 0 0 0
42406 - 0 0 0 0 0 0 0 0 0 0 0 0
42407 - 0 0 0 0 0 0 0 0 0 0 0 0
42408 - 0 0 0 0 0 0 0 0 0 0 0 0
42409 - 0 0 0 0 0 0 0 0 0 14 14 14
42410 - 46 46 46 86 86 86 2 2 6 14 14 14
42411 -134 134 134 198 198 198 195 195 195 116 116 116
42412 - 10 10 10 2 2 6 2 2 6 6 6 6
42413 -101 98 89 187 187 187 210 210 210 218 218 218
42414 -214 214 214 134 134 134 14 14 14 6 6 6
42415 - 2 2 6 2 2 6 2 2 6 2 2 6
42416 - 86 86 86 50 50 50 18 18 18 6 6 6
42417 - 0 0 0 0 0 0 0 0 0 0 0 0
42418 - 0 0 0 0 0 0 0 0 0 0 0 0
42419 - 0 0 0 0 0 0 0 0 0 0 0 0
42420 - 0 0 0 0 0 0 0 0 0 0 0 0
42421 - 0 0 0 0 0 0 0 0 0 0 0 0
42422 - 0 0 0 0 0 0 0 0 0 0 0 0
42423 - 0 0 0 0 0 0 0 0 1 0 0 0
42424 - 0 0 1 0 0 1 0 0 1 0 0 0
42425 - 0 0 0 0 0 0 0 0 0 0 0 0
42426 - 0 0 0 0 0 0 0 0 0 0 0 0
42427 - 0 0 0 0 0 0 0 0 0 0 0 0
42428 - 0 0 0 0 0 0 0 0 0 0 0 0
42429 - 0 0 0 0 0 0 0 0 0 14 14 14
42430 - 46 46 46 86 86 86 2 2 6 54 54 54
42431 -218 218 218 195 195 195 226 226 226 246 246 246
42432 - 58 58 58 2 2 6 2 2 6 30 30 30
42433 -210 210 210 253 253 253 174 174 174 123 123 123
42434 -221 221 221 234 234 234 74 74 74 2 2 6
42435 - 2 2 6 2 2 6 2 2 6 2 2 6
42436 - 70 70 70 58 58 58 22 22 22 6 6 6
42437 - 0 0 0 0 0 0 0 0 0 0 0 0
42438 - 0 0 0 0 0 0 0 0 0 0 0 0
42439 - 0 0 0 0 0 0 0 0 0 0 0 0
42440 - 0 0 0 0 0 0 0 0 0 0 0 0
42441 - 0 0 0 0 0 0 0 0 0 0 0 0
42442 - 0 0 0 0 0 0 0 0 0 0 0 0
42443 - 0 0 0 0 0 0 0 0 0 0 0 0
42444 - 0 0 0 0 0 0 0 0 0 0 0 0
42445 - 0 0 0 0 0 0 0 0 0 0 0 0
42446 - 0 0 0 0 0 0 0 0 0 0 0 0
42447 - 0 0 0 0 0 0 0 0 0 0 0 0
42448 - 0 0 0 0 0 0 0 0 0 0 0 0
42449 - 0 0 0 0 0 0 0 0 0 14 14 14
42450 - 46 46 46 82 82 82 2 2 6 106 106 106
42451 -170 170 170 26 26 26 86 86 86 226 226 226
42452 -123 123 123 10 10 10 14 14 14 46 46 46
42453 -231 231 231 190 190 190 6 6 6 70 70 70
42454 - 90 90 90 238 238 238 158 158 158 2 2 6
42455 - 2 2 6 2 2 6 2 2 6 2 2 6
42456 - 70 70 70 58 58 58 22 22 22 6 6 6
42457 - 0 0 0 0 0 0 0 0 0 0 0 0
42458 - 0 0 0 0 0 0 0 0 0 0 0 0
42459 - 0 0 0 0 0 0 0 0 0 0 0 0
42460 - 0 0 0 0 0 0 0 0 0 0 0 0
42461 - 0 0 0 0 0 0 0 0 0 0 0 0
42462 - 0 0 0 0 0 0 0 0 0 0 0 0
42463 - 0 0 0 0 0 0 0 0 1 0 0 0
42464 - 0 0 1 0 0 1 0 0 1 0 0 0
42465 - 0 0 0 0 0 0 0 0 0 0 0 0
42466 - 0 0 0 0 0 0 0 0 0 0 0 0
42467 - 0 0 0 0 0 0 0 0 0 0 0 0
42468 - 0 0 0 0 0 0 0 0 0 0 0 0
42469 - 0 0 0 0 0 0 0 0 0 14 14 14
42470 - 42 42 42 86 86 86 6 6 6 116 116 116
42471 -106 106 106 6 6 6 70 70 70 149 149 149
42472 -128 128 128 18 18 18 38 38 38 54 54 54
42473 -221 221 221 106 106 106 2 2 6 14 14 14
42474 - 46 46 46 190 190 190 198 198 198 2 2 6
42475 - 2 2 6 2 2 6 2 2 6 2 2 6
42476 - 74 74 74 62 62 62 22 22 22 6 6 6
42477 - 0 0 0 0 0 0 0 0 0 0 0 0
42478 - 0 0 0 0 0 0 0 0 0 0 0 0
42479 - 0 0 0 0 0 0 0 0 0 0 0 0
42480 - 0 0 0 0 0 0 0 0 0 0 0 0
42481 - 0 0 0 0 0 0 0 0 0 0 0 0
42482 - 0 0 0 0 0 0 0 0 0 0 0 0
42483 - 0 0 0 0 0 0 0 0 1 0 0 0
42484 - 0 0 1 0 0 0 0 0 1 0 0 0
42485 - 0 0 0 0 0 0 0 0 0 0 0 0
42486 - 0 0 0 0 0 0 0 0 0 0 0 0
42487 - 0 0 0 0 0 0 0 0 0 0 0 0
42488 - 0 0 0 0 0 0 0 0 0 0 0 0
42489 - 0 0 0 0 0 0 0 0 0 14 14 14
42490 - 42 42 42 94 94 94 14 14 14 101 101 101
42491 -128 128 128 2 2 6 18 18 18 116 116 116
42492 -118 98 46 121 92 8 121 92 8 98 78 10
42493 -162 162 162 106 106 106 2 2 6 2 2 6
42494 - 2 2 6 195 195 195 195 195 195 6 6 6
42495 - 2 2 6 2 2 6 2 2 6 2 2 6
42496 - 74 74 74 62 62 62 22 22 22 6 6 6
42497 - 0 0 0 0 0 0 0 0 0 0 0 0
42498 - 0 0 0 0 0 0 0 0 0 0 0 0
42499 - 0 0 0 0 0 0 0 0 0 0 0 0
42500 - 0 0 0 0 0 0 0 0 0 0 0 0
42501 - 0 0 0 0 0 0 0 0 0 0 0 0
42502 - 0 0 0 0 0 0 0 0 0 0 0 0
42503 - 0 0 0 0 0 0 0 0 1 0 0 1
42504 - 0 0 1 0 0 0 0 0 1 0 0 0
42505 - 0 0 0 0 0 0 0 0 0 0 0 0
42506 - 0 0 0 0 0 0 0 0 0 0 0 0
42507 - 0 0 0 0 0 0 0 0 0 0 0 0
42508 - 0 0 0 0 0 0 0 0 0 0 0 0
42509 - 0 0 0 0 0 0 0 0 0 10 10 10
42510 - 38 38 38 90 90 90 14 14 14 58 58 58
42511 -210 210 210 26 26 26 54 38 6 154 114 10
42512 -226 170 11 236 186 11 225 175 15 184 144 12
42513 -215 174 15 175 146 61 37 26 9 2 2 6
42514 - 70 70 70 246 246 246 138 138 138 2 2 6
42515 - 2 2 6 2 2 6 2 2 6 2 2 6
42516 - 70 70 70 66 66 66 26 26 26 6 6 6
42517 - 0 0 0 0 0 0 0 0 0 0 0 0
42518 - 0 0 0 0 0 0 0 0 0 0 0 0
42519 - 0 0 0 0 0 0 0 0 0 0 0 0
42520 - 0 0 0 0 0 0 0 0 0 0 0 0
42521 - 0 0 0 0 0 0 0 0 0 0 0 0
42522 - 0 0 0 0 0 0 0 0 0 0 0 0
42523 - 0 0 0 0 0 0 0 0 0 0 0 0
42524 - 0 0 0 0 0 0 0 0 0 0 0 0
42525 - 0 0 0 0 0 0 0 0 0 0 0 0
42526 - 0 0 0 0 0 0 0 0 0 0 0 0
42527 - 0 0 0 0 0 0 0 0 0 0 0 0
42528 - 0 0 0 0 0 0 0 0 0 0 0 0
42529 - 0 0 0 0 0 0 0 0 0 10 10 10
42530 - 38 38 38 86 86 86 14 14 14 10 10 10
42531 -195 195 195 188 164 115 192 133 9 225 175 15
42532 -239 182 13 234 190 10 232 195 16 232 200 30
42533 -245 207 45 241 208 19 232 195 16 184 144 12
42534 -218 194 134 211 206 186 42 42 42 2 2 6
42535 - 2 2 6 2 2 6 2 2 6 2 2 6
42536 - 50 50 50 74 74 74 30 30 30 6 6 6
42537 - 0 0 0 0 0 0 0 0 0 0 0 0
42538 - 0 0 0 0 0 0 0 0 0 0 0 0
42539 - 0 0 0 0 0 0 0 0 0 0 0 0
42540 - 0 0 0 0 0 0 0 0 0 0 0 0
42541 - 0 0 0 0 0 0 0 0 0 0 0 0
42542 - 0 0 0 0 0 0 0 0 0 0 0 0
42543 - 0 0 0 0 0 0 0 0 0 0 0 0
42544 - 0 0 0 0 0 0 0 0 0 0 0 0
42545 - 0 0 0 0 0 0 0 0 0 0 0 0
42546 - 0 0 0 0 0 0 0 0 0 0 0 0
42547 - 0 0 0 0 0 0 0 0 0 0 0 0
42548 - 0 0 0 0 0 0 0 0 0 0 0 0
42549 - 0 0 0 0 0 0 0 0 0 10 10 10
42550 - 34 34 34 86 86 86 14 14 14 2 2 6
42551 -121 87 25 192 133 9 219 162 10 239 182 13
42552 -236 186 11 232 195 16 241 208 19 244 214 54
42553 -246 218 60 246 218 38 246 215 20 241 208 19
42554 -241 208 19 226 184 13 121 87 25 2 2 6
42555 - 2 2 6 2 2 6 2 2 6 2 2 6
42556 - 50 50 50 82 82 82 34 34 34 10 10 10
42557 - 0 0 0 0 0 0 0 0 0 0 0 0
42558 - 0 0 0 0 0 0 0 0 0 0 0 0
42559 - 0 0 0 0 0 0 0 0 0 0 0 0
42560 - 0 0 0 0 0 0 0 0 0 0 0 0
42561 - 0 0 0 0 0 0 0 0 0 0 0 0
42562 - 0 0 0 0 0 0 0 0 0 0 0 0
42563 - 0 0 0 0 0 0 0 0 0 0 0 0
42564 - 0 0 0 0 0 0 0 0 0 0 0 0
42565 - 0 0 0 0 0 0 0 0 0 0 0 0
42566 - 0 0 0 0 0 0 0 0 0 0 0 0
42567 - 0 0 0 0 0 0 0 0 0 0 0 0
42568 - 0 0 0 0 0 0 0 0 0 0 0 0
42569 - 0 0 0 0 0 0 0 0 0 10 10 10
42570 - 34 34 34 82 82 82 30 30 30 61 42 6
42571 -180 123 7 206 145 10 230 174 11 239 182 13
42572 -234 190 10 238 202 15 241 208 19 246 218 74
42573 -246 218 38 246 215 20 246 215 20 246 215 20
42574 -226 184 13 215 174 15 184 144 12 6 6 6
42575 - 2 2 6 2 2 6 2 2 6 2 2 6
42576 - 26 26 26 94 94 94 42 42 42 14 14 14
42577 - 0 0 0 0 0 0 0 0 0 0 0 0
42578 - 0 0 0 0 0 0 0 0 0 0 0 0
42579 - 0 0 0 0 0 0 0 0 0 0 0 0
42580 - 0 0 0 0 0 0 0 0 0 0 0 0
42581 - 0 0 0 0 0 0 0 0 0 0 0 0
42582 - 0 0 0 0 0 0 0 0 0 0 0 0
42583 - 0 0 0 0 0 0 0 0 0 0 0 0
42584 - 0 0 0 0 0 0 0 0 0 0 0 0
42585 - 0 0 0 0 0 0 0 0 0 0 0 0
42586 - 0 0 0 0 0 0 0 0 0 0 0 0
42587 - 0 0 0 0 0 0 0 0 0 0 0 0
42588 - 0 0 0 0 0 0 0 0 0 0 0 0
42589 - 0 0 0 0 0 0 0 0 0 10 10 10
42590 - 30 30 30 78 78 78 50 50 50 104 69 6
42591 -192 133 9 216 158 10 236 178 12 236 186 11
42592 -232 195 16 241 208 19 244 214 54 245 215 43
42593 -246 215 20 246 215 20 241 208 19 198 155 10
42594 -200 144 11 216 158 10 156 118 10 2 2 6
42595 - 2 2 6 2 2 6 2 2 6 2 2 6
42596 - 6 6 6 90 90 90 54 54 54 18 18 18
42597 - 6 6 6 0 0 0 0 0 0 0 0 0
42598 - 0 0 0 0 0 0 0 0 0 0 0 0
42599 - 0 0 0 0 0 0 0 0 0 0 0 0
42600 - 0 0 0 0 0 0 0 0 0 0 0 0
42601 - 0 0 0 0 0 0 0 0 0 0 0 0
42602 - 0 0 0 0 0 0 0 0 0 0 0 0
42603 - 0 0 0 0 0 0 0 0 0 0 0 0
42604 - 0 0 0 0 0 0 0 0 0 0 0 0
42605 - 0 0 0 0 0 0 0 0 0 0 0 0
42606 - 0 0 0 0 0 0 0 0 0 0 0 0
42607 - 0 0 0 0 0 0 0 0 0 0 0 0
42608 - 0 0 0 0 0 0 0 0 0 0 0 0
42609 - 0 0 0 0 0 0 0 0 0 10 10 10
42610 - 30 30 30 78 78 78 46 46 46 22 22 22
42611 -137 92 6 210 162 10 239 182 13 238 190 10
42612 -238 202 15 241 208 19 246 215 20 246 215 20
42613 -241 208 19 203 166 17 185 133 11 210 150 10
42614 -216 158 10 210 150 10 102 78 10 2 2 6
42615 - 6 6 6 54 54 54 14 14 14 2 2 6
42616 - 2 2 6 62 62 62 74 74 74 30 30 30
42617 - 10 10 10 0 0 0 0 0 0 0 0 0
42618 - 0 0 0 0 0 0 0 0 0 0 0 0
42619 - 0 0 0 0 0 0 0 0 0 0 0 0
42620 - 0 0 0 0 0 0 0 0 0 0 0 0
42621 - 0 0 0 0 0 0 0 0 0 0 0 0
42622 - 0 0 0 0 0 0 0 0 0 0 0 0
42623 - 0 0 0 0 0 0 0 0 0 0 0 0
42624 - 0 0 0 0 0 0 0 0 0 0 0 0
42625 - 0 0 0 0 0 0 0 0 0 0 0 0
42626 - 0 0 0 0 0 0 0 0 0 0 0 0
42627 - 0 0 0 0 0 0 0 0 0 0 0 0
42628 - 0 0 0 0 0 0 0 0 0 0 0 0
42629 - 0 0 0 0 0 0 0 0 0 10 10 10
42630 - 34 34 34 78 78 78 50 50 50 6 6 6
42631 - 94 70 30 139 102 15 190 146 13 226 184 13
42632 -232 200 30 232 195 16 215 174 15 190 146 13
42633 -168 122 10 192 133 9 210 150 10 213 154 11
42634 -202 150 34 182 157 106 101 98 89 2 2 6
42635 - 2 2 6 78 78 78 116 116 116 58 58 58
42636 - 2 2 6 22 22 22 90 90 90 46 46 46
42637 - 18 18 18 6 6 6 0 0 0 0 0 0
42638 - 0 0 0 0 0 0 0 0 0 0 0 0
42639 - 0 0 0 0 0 0 0 0 0 0 0 0
42640 - 0 0 0 0 0 0 0 0 0 0 0 0
42641 - 0 0 0 0 0 0 0 0 0 0 0 0
42642 - 0 0 0 0 0 0 0 0 0 0 0 0
42643 - 0 0 0 0 0 0 0 0 0 0 0 0
42644 - 0 0 0 0 0 0 0 0 0 0 0 0
42645 - 0 0 0 0 0 0 0 0 0 0 0 0
42646 - 0 0 0 0 0 0 0 0 0 0 0 0
42647 - 0 0 0 0 0 0 0 0 0 0 0 0
42648 - 0 0 0 0 0 0 0 0 0 0 0 0
42649 - 0 0 0 0 0 0 0 0 0 10 10 10
42650 - 38 38 38 86 86 86 50 50 50 6 6 6
42651 -128 128 128 174 154 114 156 107 11 168 122 10
42652 -198 155 10 184 144 12 197 138 11 200 144 11
42653 -206 145 10 206 145 10 197 138 11 188 164 115
42654 -195 195 195 198 198 198 174 174 174 14 14 14
42655 - 2 2 6 22 22 22 116 116 116 116 116 116
42656 - 22 22 22 2 2 6 74 74 74 70 70 70
42657 - 30 30 30 10 10 10 0 0 0 0 0 0
42658 - 0 0 0 0 0 0 0 0 0 0 0 0
42659 - 0 0 0 0 0 0 0 0 0 0 0 0
42660 - 0 0 0 0 0 0 0 0 0 0 0 0
42661 - 0 0 0 0 0 0 0 0 0 0 0 0
42662 - 0 0 0 0 0 0 0 0 0 0 0 0
42663 - 0 0 0 0 0 0 0 0 0 0 0 0
42664 - 0 0 0 0 0 0 0 0 0 0 0 0
42665 - 0 0 0 0 0 0 0 0 0 0 0 0
42666 - 0 0 0 0 0 0 0 0 0 0 0 0
42667 - 0 0 0 0 0 0 0 0 0 0 0 0
42668 - 0 0 0 0 0 0 0 0 0 0 0 0
42669 - 0 0 0 0 0 0 6 6 6 18 18 18
42670 - 50 50 50 101 101 101 26 26 26 10 10 10
42671 -138 138 138 190 190 190 174 154 114 156 107 11
42672 -197 138 11 200 144 11 197 138 11 192 133 9
42673 -180 123 7 190 142 34 190 178 144 187 187 187
42674 -202 202 202 221 221 221 214 214 214 66 66 66
42675 - 2 2 6 2 2 6 50 50 50 62 62 62
42676 - 6 6 6 2 2 6 10 10 10 90 90 90
42677 - 50 50 50 18 18 18 6 6 6 0 0 0
42678 - 0 0 0 0 0 0 0 0 0 0 0 0
42679 - 0 0 0 0 0 0 0 0 0 0 0 0
42680 - 0 0 0 0 0 0 0 0 0 0 0 0
42681 - 0 0 0 0 0 0 0 0 0 0 0 0
42682 - 0 0 0 0 0 0 0 0 0 0 0 0
42683 - 0 0 0 0 0 0 0 0 0 0 0 0
42684 - 0 0 0 0 0 0 0 0 0 0 0 0
42685 - 0 0 0 0 0 0 0 0 0 0 0 0
42686 - 0 0 0 0 0 0 0 0 0 0 0 0
42687 - 0 0 0 0 0 0 0 0 0 0 0 0
42688 - 0 0 0 0 0 0 0 0 0 0 0 0
42689 - 0 0 0 0 0 0 10 10 10 34 34 34
42690 - 74 74 74 74 74 74 2 2 6 6 6 6
42691 -144 144 144 198 198 198 190 190 190 178 166 146
42692 -154 121 60 156 107 11 156 107 11 168 124 44
42693 -174 154 114 187 187 187 190 190 190 210 210 210
42694 -246 246 246 253 253 253 253 253 253 182 182 182
42695 - 6 6 6 2 2 6 2 2 6 2 2 6
42696 - 2 2 6 2 2 6 2 2 6 62 62 62
42697 - 74 74 74 34 34 34 14 14 14 0 0 0
42698 - 0 0 0 0 0 0 0 0 0 0 0 0
42699 - 0 0 0 0 0 0 0 0 0 0 0 0
42700 - 0 0 0 0 0 0 0 0 0 0 0 0
42701 - 0 0 0 0 0 0 0 0 0 0 0 0
42702 - 0 0 0 0 0 0 0 0 0 0 0 0
42703 - 0 0 0 0 0 0 0 0 0 0 0 0
42704 - 0 0 0 0 0 0 0 0 0 0 0 0
42705 - 0 0 0 0 0 0 0 0 0 0 0 0
42706 - 0 0 0 0 0 0 0 0 0 0 0 0
42707 - 0 0 0 0 0 0 0 0 0 0 0 0
42708 - 0 0 0 0 0 0 0 0 0 0 0 0
42709 - 0 0 0 10 10 10 22 22 22 54 54 54
42710 - 94 94 94 18 18 18 2 2 6 46 46 46
42711 -234 234 234 221 221 221 190 190 190 190 190 190
42712 -190 190 190 187 187 187 187 187 187 190 190 190
42713 -190 190 190 195 195 195 214 214 214 242 242 242
42714 -253 253 253 253 253 253 253 253 253 253 253 253
42715 - 82 82 82 2 2 6 2 2 6 2 2 6
42716 - 2 2 6 2 2 6 2 2 6 14 14 14
42717 - 86 86 86 54 54 54 22 22 22 6 6 6
42718 - 0 0 0 0 0 0 0 0 0 0 0 0
42719 - 0 0 0 0 0 0 0 0 0 0 0 0
42720 - 0 0 0 0 0 0 0 0 0 0 0 0
42721 - 0 0 0 0 0 0 0 0 0 0 0 0
42722 - 0 0 0 0 0 0 0 0 0 0 0 0
42723 - 0 0 0 0 0 0 0 0 0 0 0 0
42724 - 0 0 0 0 0 0 0 0 0 0 0 0
42725 - 0 0 0 0 0 0 0 0 0 0 0 0
42726 - 0 0 0 0 0 0 0 0 0 0 0 0
42727 - 0 0 0 0 0 0 0 0 0 0 0 0
42728 - 0 0 0 0 0 0 0 0 0 0 0 0
42729 - 6 6 6 18 18 18 46 46 46 90 90 90
42730 - 46 46 46 18 18 18 6 6 6 182 182 182
42731 -253 253 253 246 246 246 206 206 206 190 190 190
42732 -190 190 190 190 190 190 190 190 190 190 190 190
42733 -206 206 206 231 231 231 250 250 250 253 253 253
42734 -253 253 253 253 253 253 253 253 253 253 253 253
42735 -202 202 202 14 14 14 2 2 6 2 2 6
42736 - 2 2 6 2 2 6 2 2 6 2 2 6
42737 - 42 42 42 86 86 86 42 42 42 18 18 18
42738 - 6 6 6 0 0 0 0 0 0 0 0 0
42739 - 0 0 0 0 0 0 0 0 0 0 0 0
42740 - 0 0 0 0 0 0 0 0 0 0 0 0
42741 - 0 0 0 0 0 0 0 0 0 0 0 0
42742 - 0 0 0 0 0 0 0 0 0 0 0 0
42743 - 0 0 0 0 0 0 0 0 0 0 0 0
42744 - 0 0 0 0 0 0 0 0 0 0 0 0
42745 - 0 0 0 0 0 0 0 0 0 0 0 0
42746 - 0 0 0 0 0 0 0 0 0 0 0 0
42747 - 0 0 0 0 0 0 0 0 0 0 0 0
42748 - 0 0 0 0 0 0 0 0 0 6 6 6
42749 - 14 14 14 38 38 38 74 74 74 66 66 66
42750 - 2 2 6 6 6 6 90 90 90 250 250 250
42751 -253 253 253 253 253 253 238 238 238 198 198 198
42752 -190 190 190 190 190 190 195 195 195 221 221 221
42753 -246 246 246 253 253 253 253 253 253 253 253 253
42754 -253 253 253 253 253 253 253 253 253 253 253 253
42755 -253 253 253 82 82 82 2 2 6 2 2 6
42756 - 2 2 6 2 2 6 2 2 6 2 2 6
42757 - 2 2 6 78 78 78 70 70 70 34 34 34
42758 - 14 14 14 6 6 6 0 0 0 0 0 0
42759 - 0 0 0 0 0 0 0 0 0 0 0 0
42760 - 0 0 0 0 0 0 0 0 0 0 0 0
42761 - 0 0 0 0 0 0 0 0 0 0 0 0
42762 - 0 0 0 0 0 0 0 0 0 0 0 0
42763 - 0 0 0 0 0 0 0 0 0 0 0 0
42764 - 0 0 0 0 0 0 0 0 0 0 0 0
42765 - 0 0 0 0 0 0 0 0 0 0 0 0
42766 - 0 0 0 0 0 0 0 0 0 0 0 0
42767 - 0 0 0 0 0 0 0 0 0 0 0 0
42768 - 0 0 0 0 0 0 0 0 0 14 14 14
42769 - 34 34 34 66 66 66 78 78 78 6 6 6
42770 - 2 2 6 18 18 18 218 218 218 253 253 253
42771 -253 253 253 253 253 253 253 253 253 246 246 246
42772 -226 226 226 231 231 231 246 246 246 253 253 253
42773 -253 253 253 253 253 253 253 253 253 253 253 253
42774 -253 253 253 253 253 253 253 253 253 253 253 253
42775 -253 253 253 178 178 178 2 2 6 2 2 6
42776 - 2 2 6 2 2 6 2 2 6 2 2 6
42777 - 2 2 6 18 18 18 90 90 90 62 62 62
42778 - 30 30 30 10 10 10 0 0 0 0 0 0
42779 - 0 0 0 0 0 0 0 0 0 0 0 0
42780 - 0 0 0 0 0 0 0 0 0 0 0 0
42781 - 0 0 0 0 0 0 0 0 0 0 0 0
42782 - 0 0 0 0 0 0 0 0 0 0 0 0
42783 - 0 0 0 0 0 0 0 0 0 0 0 0
42784 - 0 0 0 0 0 0 0 0 0 0 0 0
42785 - 0 0 0 0 0 0 0 0 0 0 0 0
42786 - 0 0 0 0 0 0 0 0 0 0 0 0
42787 - 0 0 0 0 0 0 0 0 0 0 0 0
42788 - 0 0 0 0 0 0 10 10 10 26 26 26
42789 - 58 58 58 90 90 90 18 18 18 2 2 6
42790 - 2 2 6 110 110 110 253 253 253 253 253 253
42791 -253 253 253 253 253 253 253 253 253 253 253 253
42792 -250 250 250 253 253 253 253 253 253 253 253 253
42793 -253 253 253 253 253 253 253 253 253 253 253 253
42794 -253 253 253 253 253 253 253 253 253 253 253 253
42795 -253 253 253 231 231 231 18 18 18 2 2 6
42796 - 2 2 6 2 2 6 2 2 6 2 2 6
42797 - 2 2 6 2 2 6 18 18 18 94 94 94
42798 - 54 54 54 26 26 26 10 10 10 0 0 0
42799 - 0 0 0 0 0 0 0 0 0 0 0 0
42800 - 0 0 0 0 0 0 0 0 0 0 0 0
42801 - 0 0 0 0 0 0 0 0 0 0 0 0
42802 - 0 0 0 0 0 0 0 0 0 0 0 0
42803 - 0 0 0 0 0 0 0 0 0 0 0 0
42804 - 0 0 0 0 0 0 0 0 0 0 0 0
42805 - 0 0 0 0 0 0 0 0 0 0 0 0
42806 - 0 0 0 0 0 0 0 0 0 0 0 0
42807 - 0 0 0 0 0 0 0 0 0 0 0 0
42808 - 0 0 0 6 6 6 22 22 22 50 50 50
42809 - 90 90 90 26 26 26 2 2 6 2 2 6
42810 - 14 14 14 195 195 195 250 250 250 253 253 253
42811 -253 253 253 253 253 253 253 253 253 253 253 253
42812 -253 253 253 253 253 253 253 253 253 253 253 253
42813 -253 253 253 253 253 253 253 253 253 253 253 253
42814 -253 253 253 253 253 253 253 253 253 253 253 253
42815 -250 250 250 242 242 242 54 54 54 2 2 6
42816 - 2 2 6 2 2 6 2 2 6 2 2 6
42817 - 2 2 6 2 2 6 2 2 6 38 38 38
42818 - 86 86 86 50 50 50 22 22 22 6 6 6
42819 - 0 0 0 0 0 0 0 0 0 0 0 0
42820 - 0 0 0 0 0 0 0 0 0 0 0 0
42821 - 0 0 0 0 0 0 0 0 0 0 0 0
42822 - 0 0 0 0 0 0 0 0 0 0 0 0
42823 - 0 0 0 0 0 0 0 0 0 0 0 0
42824 - 0 0 0 0 0 0 0 0 0 0 0 0
42825 - 0 0 0 0 0 0 0 0 0 0 0 0
42826 - 0 0 0 0 0 0 0 0 0 0 0 0
42827 - 0 0 0 0 0 0 0 0 0 0 0 0
42828 - 6 6 6 14 14 14 38 38 38 82 82 82
42829 - 34 34 34 2 2 6 2 2 6 2 2 6
42830 - 42 42 42 195 195 195 246 246 246 253 253 253
42831 -253 253 253 253 253 253 253 253 253 250 250 250
42832 -242 242 242 242 242 242 250 250 250 253 253 253
42833 -253 253 253 253 253 253 253 253 253 253 253 253
42834 -253 253 253 250 250 250 246 246 246 238 238 238
42835 -226 226 226 231 231 231 101 101 101 6 6 6
42836 - 2 2 6 2 2 6 2 2 6 2 2 6
42837 - 2 2 6 2 2 6 2 2 6 2 2 6
42838 - 38 38 38 82 82 82 42 42 42 14 14 14
42839 - 6 6 6 0 0 0 0 0 0 0 0 0
42840 - 0 0 0 0 0 0 0 0 0 0 0 0
42841 - 0 0 0 0 0 0 0 0 0 0 0 0
42842 - 0 0 0 0 0 0 0 0 0 0 0 0
42843 - 0 0 0 0 0 0 0 0 0 0 0 0
42844 - 0 0 0 0 0 0 0 0 0 0 0 0
42845 - 0 0 0 0 0 0 0 0 0 0 0 0
42846 - 0 0 0 0 0 0 0 0 0 0 0 0
42847 - 0 0 0 0 0 0 0 0 0 0 0 0
42848 - 10 10 10 26 26 26 62 62 62 66 66 66
42849 - 2 2 6 2 2 6 2 2 6 6 6 6
42850 - 70 70 70 170 170 170 206 206 206 234 234 234
42851 -246 246 246 250 250 250 250 250 250 238 238 238
42852 -226 226 226 231 231 231 238 238 238 250 250 250
42853 -250 250 250 250 250 250 246 246 246 231 231 231
42854 -214 214 214 206 206 206 202 202 202 202 202 202
42855 -198 198 198 202 202 202 182 182 182 18 18 18
42856 - 2 2 6 2 2 6 2 2 6 2 2 6
42857 - 2 2 6 2 2 6 2 2 6 2 2 6
42858 - 2 2 6 62 62 62 66 66 66 30 30 30
42859 - 10 10 10 0 0 0 0 0 0 0 0 0
42860 - 0 0 0 0 0 0 0 0 0 0 0 0
42861 - 0 0 0 0 0 0 0 0 0 0 0 0
42862 - 0 0 0 0 0 0 0 0 0 0 0 0
42863 - 0 0 0 0 0 0 0 0 0 0 0 0
42864 - 0 0 0 0 0 0 0 0 0 0 0 0
42865 - 0 0 0 0 0 0 0 0 0 0 0 0
42866 - 0 0 0 0 0 0 0 0 0 0 0 0
42867 - 0 0 0 0 0 0 0 0 0 0 0 0
42868 - 14 14 14 42 42 42 82 82 82 18 18 18
42869 - 2 2 6 2 2 6 2 2 6 10 10 10
42870 - 94 94 94 182 182 182 218 218 218 242 242 242
42871 -250 250 250 253 253 253 253 253 253 250 250 250
42872 -234 234 234 253 253 253 253 253 253 253 253 253
42873 -253 253 253 253 253 253 253 253 253 246 246 246
42874 -238 238 238 226 226 226 210 210 210 202 202 202
42875 -195 195 195 195 195 195 210 210 210 158 158 158
42876 - 6 6 6 14 14 14 50 50 50 14 14 14
42877 - 2 2 6 2 2 6 2 2 6 2 2 6
42878 - 2 2 6 6 6 6 86 86 86 46 46 46
42879 - 18 18 18 6 6 6 0 0 0 0 0 0
42880 - 0 0 0 0 0 0 0 0 0 0 0 0
42881 - 0 0 0 0 0 0 0 0 0 0 0 0
42882 - 0 0 0 0 0 0 0 0 0 0 0 0
42883 - 0 0 0 0 0 0 0 0 0 0 0 0
42884 - 0 0 0 0 0 0 0 0 0 0 0 0
42885 - 0 0 0 0 0 0 0 0 0 0 0 0
42886 - 0 0 0 0 0 0 0 0 0 0 0 0
42887 - 0 0 0 0 0 0 0 0 0 6 6 6
42888 - 22 22 22 54 54 54 70 70 70 2 2 6
42889 - 2 2 6 10 10 10 2 2 6 22 22 22
42890 -166 166 166 231 231 231 250 250 250 253 253 253
42891 -253 253 253 253 253 253 253 253 253 250 250 250
42892 -242 242 242 253 253 253 253 253 253 253 253 253
42893 -253 253 253 253 253 253 253 253 253 253 253 253
42894 -253 253 253 253 253 253 253 253 253 246 246 246
42895 -231 231 231 206 206 206 198 198 198 226 226 226
42896 - 94 94 94 2 2 6 6 6 6 38 38 38
42897 - 30 30 30 2 2 6 2 2 6 2 2 6
42898 - 2 2 6 2 2 6 62 62 62 66 66 66
42899 - 26 26 26 10 10 10 0 0 0 0 0 0
42900 - 0 0 0 0 0 0 0 0 0 0 0 0
42901 - 0 0 0 0 0 0 0 0 0 0 0 0
42902 - 0 0 0 0 0 0 0 0 0 0 0 0
42903 - 0 0 0 0 0 0 0 0 0 0 0 0
42904 - 0 0 0 0 0 0 0 0 0 0 0 0
42905 - 0 0 0 0 0 0 0 0 0 0 0 0
42906 - 0 0 0 0 0 0 0 0 0 0 0 0
42907 - 0 0 0 0 0 0 0 0 0 10 10 10
42908 - 30 30 30 74 74 74 50 50 50 2 2 6
42909 - 26 26 26 26 26 26 2 2 6 106 106 106
42910 -238 238 238 253 253 253 253 253 253 253 253 253
42911 -253 253 253 253 253 253 253 253 253 253 253 253
42912 -253 253 253 253 253 253 253 253 253 253 253 253
42913 -253 253 253 253 253 253 253 253 253 253 253 253
42914 -253 253 253 253 253 253 253 253 253 253 253 253
42915 -253 253 253 246 246 246 218 218 218 202 202 202
42916 -210 210 210 14 14 14 2 2 6 2 2 6
42917 - 30 30 30 22 22 22 2 2 6 2 2 6
42918 - 2 2 6 2 2 6 18 18 18 86 86 86
42919 - 42 42 42 14 14 14 0 0 0 0 0 0
42920 - 0 0 0 0 0 0 0 0 0 0 0 0
42921 - 0 0 0 0 0 0 0 0 0 0 0 0
42922 - 0 0 0 0 0 0 0 0 0 0 0 0
42923 - 0 0 0 0 0 0 0 0 0 0 0 0
42924 - 0 0 0 0 0 0 0 0 0 0 0 0
42925 - 0 0 0 0 0 0 0 0 0 0 0 0
42926 - 0 0 0 0 0 0 0 0 0 0 0 0
42927 - 0 0 0 0 0 0 0 0 0 14 14 14
42928 - 42 42 42 90 90 90 22 22 22 2 2 6
42929 - 42 42 42 2 2 6 18 18 18 218 218 218
42930 -253 253 253 253 253 253 253 253 253 253 253 253
42931 -253 253 253 253 253 253 253 253 253 253 253 253
42932 -253 253 253 253 253 253 253 253 253 253 253 253
42933 -253 253 253 253 253 253 253 253 253 253 253 253
42934 -253 253 253 253 253 253 253 253 253 253 253 253
42935 -253 253 253 253 253 253 250 250 250 221 221 221
42936 -218 218 218 101 101 101 2 2 6 14 14 14
42937 - 18 18 18 38 38 38 10 10 10 2 2 6
42938 - 2 2 6 2 2 6 2 2 6 78 78 78
42939 - 58 58 58 22 22 22 6 6 6 0 0 0
42940 - 0 0 0 0 0 0 0 0 0 0 0 0
42941 - 0 0 0 0 0 0 0 0 0 0 0 0
42942 - 0 0 0 0 0 0 0 0 0 0 0 0
42943 - 0 0 0 0 0 0 0 0 0 0 0 0
42944 - 0 0 0 0 0 0 0 0 0 0 0 0
42945 - 0 0 0 0 0 0 0 0 0 0 0 0
42946 - 0 0 0 0 0 0 0 0 0 0 0 0
42947 - 0 0 0 0 0 0 6 6 6 18 18 18
42948 - 54 54 54 82 82 82 2 2 6 26 26 26
42949 - 22 22 22 2 2 6 123 123 123 253 253 253
42950 -253 253 253 253 253 253 253 253 253 253 253 253
42951 -253 253 253 253 253 253 253 253 253 253 253 253
42952 -253 253 253 253 253 253 253 253 253 253 253 253
42953 -253 253 253 253 253 253 253 253 253 253 253 253
42954 -253 253 253 253 253 253 253 253 253 253 253 253
42955 -253 253 253 253 253 253 253 253 253 250 250 250
42956 -238 238 238 198 198 198 6 6 6 38 38 38
42957 - 58 58 58 26 26 26 38 38 38 2 2 6
42958 - 2 2 6 2 2 6 2 2 6 46 46 46
42959 - 78 78 78 30 30 30 10 10 10 0 0 0
42960 - 0 0 0 0 0 0 0 0 0 0 0 0
42961 - 0 0 0 0 0 0 0 0 0 0 0 0
42962 - 0 0 0 0 0 0 0 0 0 0 0 0
42963 - 0 0 0 0 0 0 0 0 0 0 0 0
42964 - 0 0 0 0 0 0 0 0 0 0 0 0
42965 - 0 0 0 0 0 0 0 0 0 0 0 0
42966 - 0 0 0 0 0 0 0 0 0 0 0 0
42967 - 0 0 0 0 0 0 10 10 10 30 30 30
42968 - 74 74 74 58 58 58 2 2 6 42 42 42
42969 - 2 2 6 22 22 22 231 231 231 253 253 253
42970 -253 253 253 253 253 253 253 253 253 253 253 253
42971 -253 253 253 253 253 253 253 253 253 250 250 250
42972 -253 253 253 253 253 253 253 253 253 253 253 253
42973 -253 253 253 253 253 253 253 253 253 253 253 253
42974 -253 253 253 253 253 253 253 253 253 253 253 253
42975 -253 253 253 253 253 253 253 253 253 253 253 253
42976 -253 253 253 246 246 246 46 46 46 38 38 38
42977 - 42 42 42 14 14 14 38 38 38 14 14 14
42978 - 2 2 6 2 2 6 2 2 6 6 6 6
42979 - 86 86 86 46 46 46 14 14 14 0 0 0
42980 - 0 0 0 0 0 0 0 0 0 0 0 0
42981 - 0 0 0 0 0 0 0 0 0 0 0 0
42982 - 0 0 0 0 0 0 0 0 0 0 0 0
42983 - 0 0 0 0 0 0 0 0 0 0 0 0
42984 - 0 0 0 0 0 0 0 0 0 0 0 0
42985 - 0 0 0 0 0 0 0 0 0 0 0 0
42986 - 0 0 0 0 0 0 0 0 0 0 0 0
42987 - 0 0 0 6 6 6 14 14 14 42 42 42
42988 - 90 90 90 18 18 18 18 18 18 26 26 26
42989 - 2 2 6 116 116 116 253 253 253 253 253 253
42990 -253 253 253 253 253 253 253 253 253 253 253 253
42991 -253 253 253 253 253 253 250 250 250 238 238 238
42992 -253 253 253 253 253 253 253 253 253 253 253 253
42993 -253 253 253 253 253 253 253 253 253 253 253 253
42994 -253 253 253 253 253 253 253 253 253 253 253 253
42995 -253 253 253 253 253 253 253 253 253 253 253 253
42996 -253 253 253 253 253 253 94 94 94 6 6 6
42997 - 2 2 6 2 2 6 10 10 10 34 34 34
42998 - 2 2 6 2 2 6 2 2 6 2 2 6
42999 - 74 74 74 58 58 58 22 22 22 6 6 6
43000 - 0 0 0 0 0 0 0 0 0 0 0 0
43001 - 0 0 0 0 0 0 0 0 0 0 0 0
43002 - 0 0 0 0 0 0 0 0 0 0 0 0
43003 - 0 0 0 0 0 0 0 0 0 0 0 0
43004 - 0 0 0 0 0 0 0 0 0 0 0 0
43005 - 0 0 0 0 0 0 0 0 0 0 0 0
43006 - 0 0 0 0 0 0 0 0 0 0 0 0
43007 - 0 0 0 10 10 10 26 26 26 66 66 66
43008 - 82 82 82 2 2 6 38 38 38 6 6 6
43009 - 14 14 14 210 210 210 253 253 253 253 253 253
43010 -253 253 253 253 253 253 253 253 253 253 253 253
43011 -253 253 253 253 253 253 246 246 246 242 242 242
43012 -253 253 253 253 253 253 253 253 253 253 253 253
43013 -253 253 253 253 253 253 253 253 253 253 253 253
43014 -253 253 253 253 253 253 253 253 253 253 253 253
43015 -253 253 253 253 253 253 253 253 253 253 253 253
43016 -253 253 253 253 253 253 144 144 144 2 2 6
43017 - 2 2 6 2 2 6 2 2 6 46 46 46
43018 - 2 2 6 2 2 6 2 2 6 2 2 6
43019 - 42 42 42 74 74 74 30 30 30 10 10 10
43020 - 0 0 0 0 0 0 0 0 0 0 0 0
43021 - 0 0 0 0 0 0 0 0 0 0 0 0
43022 - 0 0 0 0 0 0 0 0 0 0 0 0
43023 - 0 0 0 0 0 0 0 0 0 0 0 0
43024 - 0 0 0 0 0 0 0 0 0 0 0 0
43025 - 0 0 0 0 0 0 0 0 0 0 0 0
43026 - 0 0 0 0 0 0 0 0 0 0 0 0
43027 - 6 6 6 14 14 14 42 42 42 90 90 90
43028 - 26 26 26 6 6 6 42 42 42 2 2 6
43029 - 74 74 74 250 250 250 253 253 253 253 253 253
43030 -253 253 253 253 253 253 253 253 253 253 253 253
43031 -253 253 253 253 253 253 242 242 242 242 242 242
43032 -253 253 253 253 253 253 253 253 253 253 253 253
43033 -253 253 253 253 253 253 253 253 253 253 253 253
43034 -253 253 253 253 253 253 253 253 253 253 253 253
43035 -253 253 253 253 253 253 253 253 253 253 253 253
43036 -253 253 253 253 253 253 182 182 182 2 2 6
43037 - 2 2 6 2 2 6 2 2 6 46 46 46
43038 - 2 2 6 2 2 6 2 2 6 2 2 6
43039 - 10 10 10 86 86 86 38 38 38 10 10 10
43040 - 0 0 0 0 0 0 0 0 0 0 0 0
43041 - 0 0 0 0 0 0 0 0 0 0 0 0
43042 - 0 0 0 0 0 0 0 0 0 0 0 0
43043 - 0 0 0 0 0 0 0 0 0 0 0 0
43044 - 0 0 0 0 0 0 0 0 0 0 0 0
43045 - 0 0 0 0 0 0 0 0 0 0 0 0
43046 - 0 0 0 0 0 0 0 0 0 0 0 0
43047 - 10 10 10 26 26 26 66 66 66 82 82 82
43048 - 2 2 6 22 22 22 18 18 18 2 2 6
43049 -149 149 149 253 253 253 253 253 253 253 253 253
43050 -253 253 253 253 253 253 253 253 253 253 253 253
43051 -253 253 253 253 253 253 234 234 234 242 242 242
43052 -253 253 253 253 253 253 253 253 253 253 253 253
43053 -253 253 253 253 253 253 253 253 253 253 253 253
43054 -253 253 253 253 253 253 253 253 253 253 253 253
43055 -253 253 253 253 253 253 253 253 253 253 253 253
43056 -253 253 253 253 253 253 206 206 206 2 2 6
43057 - 2 2 6 2 2 6 2 2 6 38 38 38
43058 - 2 2 6 2 2 6 2 2 6 2 2 6
43059 - 6 6 6 86 86 86 46 46 46 14 14 14
43060 - 0 0 0 0 0 0 0 0 0 0 0 0
43061 - 0 0 0 0 0 0 0 0 0 0 0 0
43062 - 0 0 0 0 0 0 0 0 0 0 0 0
43063 - 0 0 0 0 0 0 0 0 0 0 0 0
43064 - 0 0 0 0 0 0 0 0 0 0 0 0
43065 - 0 0 0 0 0 0 0 0 0 0 0 0
43066 - 0 0 0 0 0 0 0 0 0 6 6 6
43067 - 18 18 18 46 46 46 86 86 86 18 18 18
43068 - 2 2 6 34 34 34 10 10 10 6 6 6
43069 -210 210 210 253 253 253 253 253 253 253 253 253
43070 -253 253 253 253 253 253 253 253 253 253 253 253
43071 -253 253 253 253 253 253 234 234 234 242 242 242
43072 -253 253 253 253 253 253 253 253 253 253 253 253
43073 -253 253 253 253 253 253 253 253 253 253 253 253
43074 -253 253 253 253 253 253 253 253 253 253 253 253
43075 -253 253 253 253 253 253 253 253 253 253 253 253
43076 -253 253 253 253 253 253 221 221 221 6 6 6
43077 - 2 2 6 2 2 6 6 6 6 30 30 30
43078 - 2 2 6 2 2 6 2 2 6 2 2 6
43079 - 2 2 6 82 82 82 54 54 54 18 18 18
43080 - 6 6 6 0 0 0 0 0 0 0 0 0
43081 - 0 0 0 0 0 0 0 0 0 0 0 0
43082 - 0 0 0 0 0 0 0 0 0 0 0 0
43083 - 0 0 0 0 0 0 0 0 0 0 0 0
43084 - 0 0 0 0 0 0 0 0 0 0 0 0
43085 - 0 0 0 0 0 0 0 0 0 0 0 0
43086 - 0 0 0 0 0 0 0 0 0 10 10 10
43087 - 26 26 26 66 66 66 62 62 62 2 2 6
43088 - 2 2 6 38 38 38 10 10 10 26 26 26
43089 -238 238 238 253 253 253 253 253 253 253 253 253
43090 -253 253 253 253 253 253 253 253 253 253 253 253
43091 -253 253 253 253 253 253 231 231 231 238 238 238
43092 -253 253 253 253 253 253 253 253 253 253 253 253
43093 -253 253 253 253 253 253 253 253 253 253 253 253
43094 -253 253 253 253 253 253 253 253 253 253 253 253
43095 -253 253 253 253 253 253 253 253 253 253 253 253
43096 -253 253 253 253 253 253 231 231 231 6 6 6
43097 - 2 2 6 2 2 6 10 10 10 30 30 30
43098 - 2 2 6 2 2 6 2 2 6 2 2 6
43099 - 2 2 6 66 66 66 58 58 58 22 22 22
43100 - 6 6 6 0 0 0 0 0 0 0 0 0
43101 - 0 0 0 0 0 0 0 0 0 0 0 0
43102 - 0 0 0 0 0 0 0 0 0 0 0 0
43103 - 0 0 0 0 0 0 0 0 0 0 0 0
43104 - 0 0 0 0 0 0 0 0 0 0 0 0
43105 - 0 0 0 0 0 0 0 0 0 0 0 0
43106 - 0 0 0 0 0 0 0 0 0 10 10 10
43107 - 38 38 38 78 78 78 6 6 6 2 2 6
43108 - 2 2 6 46 46 46 14 14 14 42 42 42
43109 -246 246 246 253 253 253 253 253 253 253 253 253
43110 -253 253 253 253 253 253 253 253 253 253 253 253
43111 -253 253 253 253 253 253 231 231 231 242 242 242
43112 -253 253 253 253 253 253 253 253 253 253 253 253
43113 -253 253 253 253 253 253 253 253 253 253 253 253
43114 -253 253 253 253 253 253 253 253 253 253 253 253
43115 -253 253 253 253 253 253 253 253 253 253 253 253
43116 -253 253 253 253 253 253 234 234 234 10 10 10
43117 - 2 2 6 2 2 6 22 22 22 14 14 14
43118 - 2 2 6 2 2 6 2 2 6 2 2 6
43119 - 2 2 6 66 66 66 62 62 62 22 22 22
43120 - 6 6 6 0 0 0 0 0 0 0 0 0
43121 - 0 0 0 0 0 0 0 0 0 0 0 0
43122 - 0 0 0 0 0 0 0 0 0 0 0 0
43123 - 0 0 0 0 0 0 0 0 0 0 0 0
43124 - 0 0 0 0 0 0 0 0 0 0 0 0
43125 - 0 0 0 0 0 0 0 0 0 0 0 0
43126 - 0 0 0 0 0 0 6 6 6 18 18 18
43127 - 50 50 50 74 74 74 2 2 6 2 2 6
43128 - 14 14 14 70 70 70 34 34 34 62 62 62
43129 -250 250 250 253 253 253 253 253 253 253 253 253
43130 -253 253 253 253 253 253 253 253 253 253 253 253
43131 -253 253 253 253 253 253 231 231 231 246 246 246
43132 -253 253 253 253 253 253 253 253 253 253 253 253
43133 -253 253 253 253 253 253 253 253 253 253 253 253
43134 -253 253 253 253 253 253 253 253 253 253 253 253
43135 -253 253 253 253 253 253 253 253 253 253 253 253
43136 -253 253 253 253 253 253 234 234 234 14 14 14
43137 - 2 2 6 2 2 6 30 30 30 2 2 6
43138 - 2 2 6 2 2 6 2 2 6 2 2 6
43139 - 2 2 6 66 66 66 62 62 62 22 22 22
43140 - 6 6 6 0 0 0 0 0 0 0 0 0
43141 - 0 0 0 0 0 0 0 0 0 0 0 0
43142 - 0 0 0 0 0 0 0 0 0 0 0 0
43143 - 0 0 0 0 0 0 0 0 0 0 0 0
43144 - 0 0 0 0 0 0 0 0 0 0 0 0
43145 - 0 0 0 0 0 0 0 0 0 0 0 0
43146 - 0 0 0 0 0 0 6 6 6 18 18 18
43147 - 54 54 54 62 62 62 2 2 6 2 2 6
43148 - 2 2 6 30 30 30 46 46 46 70 70 70
43149 -250 250 250 253 253 253 253 253 253 253 253 253
43150 -253 253 253 253 253 253 253 253 253 253 253 253
43151 -253 253 253 253 253 253 231 231 231 246 246 246
43152 -253 253 253 253 253 253 253 253 253 253 253 253
43153 -253 253 253 253 253 253 253 253 253 253 253 253
43154 -253 253 253 253 253 253 253 253 253 253 253 253
43155 -253 253 253 253 253 253 253 253 253 253 253 253
43156 -253 253 253 253 253 253 226 226 226 10 10 10
43157 - 2 2 6 6 6 6 30 30 30 2 2 6
43158 - 2 2 6 2 2 6 2 2 6 2 2 6
43159 - 2 2 6 66 66 66 58 58 58 22 22 22
43160 - 6 6 6 0 0 0 0 0 0 0 0 0
43161 - 0 0 0 0 0 0 0 0 0 0 0 0
43162 - 0 0 0 0 0 0 0 0 0 0 0 0
43163 - 0 0 0 0 0 0 0 0 0 0 0 0
43164 - 0 0 0 0 0 0 0 0 0 0 0 0
43165 - 0 0 0 0 0 0 0 0 0 0 0 0
43166 - 0 0 0 0 0 0 6 6 6 22 22 22
43167 - 58 58 58 62 62 62 2 2 6 2 2 6
43168 - 2 2 6 2 2 6 30 30 30 78 78 78
43169 -250 250 250 253 253 253 253 253 253 253 253 253
43170 -253 253 253 253 253 253 253 253 253 253 253 253
43171 -253 253 253 253 253 253 231 231 231 246 246 246
43172 -253 253 253 253 253 253 253 253 253 253 253 253
43173 -253 253 253 253 253 253 253 253 253 253 253 253
43174 -253 253 253 253 253 253 253 253 253 253 253 253
43175 -253 253 253 253 253 253 253 253 253 253 253 253
43176 -253 253 253 253 253 253 206 206 206 2 2 6
43177 - 22 22 22 34 34 34 18 14 6 22 22 22
43178 - 26 26 26 18 18 18 6 6 6 2 2 6
43179 - 2 2 6 82 82 82 54 54 54 18 18 18
43180 - 6 6 6 0 0 0 0 0 0 0 0 0
43181 - 0 0 0 0 0 0 0 0 0 0 0 0
43182 - 0 0 0 0 0 0 0 0 0 0 0 0
43183 - 0 0 0 0 0 0 0 0 0 0 0 0
43184 - 0 0 0 0 0 0 0 0 0 0 0 0
43185 - 0 0 0 0 0 0 0 0 0 0 0 0
43186 - 0 0 0 0 0 0 6 6 6 26 26 26
43187 - 62 62 62 106 106 106 74 54 14 185 133 11
43188 -210 162 10 121 92 8 6 6 6 62 62 62
43189 -238 238 238 253 253 253 253 253 253 253 253 253
43190 -253 253 253 253 253 253 253 253 253 253 253 253
43191 -253 253 253 253 253 253 231 231 231 246 246 246
43192 -253 253 253 253 253 253 253 253 253 253 253 253
43193 -253 253 253 253 253 253 253 253 253 253 253 253
43194 -253 253 253 253 253 253 253 253 253 253 253 253
43195 -253 253 253 253 253 253 253 253 253 253 253 253
43196 -253 253 253 253 253 253 158 158 158 18 18 18
43197 - 14 14 14 2 2 6 2 2 6 2 2 6
43198 - 6 6 6 18 18 18 66 66 66 38 38 38
43199 - 6 6 6 94 94 94 50 50 50 18 18 18
43200 - 6 6 6 0 0 0 0 0 0 0 0 0
43201 - 0 0 0 0 0 0 0 0 0 0 0 0
43202 - 0 0 0 0 0 0 0 0 0 0 0 0
43203 - 0 0 0 0 0 0 0 0 0 0 0 0
43204 - 0 0 0 0 0 0 0 0 0 0 0 0
43205 - 0 0 0 0 0 0 0 0 0 6 6 6
43206 - 10 10 10 10 10 10 18 18 18 38 38 38
43207 - 78 78 78 142 134 106 216 158 10 242 186 14
43208 -246 190 14 246 190 14 156 118 10 10 10 10
43209 - 90 90 90 238 238 238 253 253 253 253 253 253
43210 -253 253 253 253 253 253 253 253 253 253 253 253
43211 -253 253 253 253 253 253 231 231 231 250 250 250
43212 -253 253 253 253 253 253 253 253 253 253 253 253
43213 -253 253 253 253 253 253 253 253 253 253 253 253
43214 -253 253 253 253 253 253 253 253 253 253 253 253
43215 -253 253 253 253 253 253 253 253 253 246 230 190
43216 -238 204 91 238 204 91 181 142 44 37 26 9
43217 - 2 2 6 2 2 6 2 2 6 2 2 6
43218 - 2 2 6 2 2 6 38 38 38 46 46 46
43219 - 26 26 26 106 106 106 54 54 54 18 18 18
43220 - 6 6 6 0 0 0 0 0 0 0 0 0
43221 - 0 0 0 0 0 0 0 0 0 0 0 0
43222 - 0 0 0 0 0 0 0 0 0 0 0 0
43223 - 0 0 0 0 0 0 0 0 0 0 0 0
43224 - 0 0 0 0 0 0 0 0 0 0 0 0
43225 - 0 0 0 6 6 6 14 14 14 22 22 22
43226 - 30 30 30 38 38 38 50 50 50 70 70 70
43227 -106 106 106 190 142 34 226 170 11 242 186 14
43228 -246 190 14 246 190 14 246 190 14 154 114 10
43229 - 6 6 6 74 74 74 226 226 226 253 253 253
43230 -253 253 253 253 253 253 253 253 253 253 253 253
43231 -253 253 253 253 253 253 231 231 231 250 250 250
43232 -253 253 253 253 253 253 253 253 253 253 253 253
43233 -253 253 253 253 253 253 253 253 253 253 253 253
43234 -253 253 253 253 253 253 253 253 253 253 253 253
43235 -253 253 253 253 253 253 253 253 253 228 184 62
43236 -241 196 14 241 208 19 232 195 16 38 30 10
43237 - 2 2 6 2 2 6 2 2 6 2 2 6
43238 - 2 2 6 6 6 6 30 30 30 26 26 26
43239 -203 166 17 154 142 90 66 66 66 26 26 26
43240 - 6 6 6 0 0 0 0 0 0 0 0 0
43241 - 0 0 0 0 0 0 0 0 0 0 0 0
43242 - 0 0 0 0 0 0 0 0 0 0 0 0
43243 - 0 0 0 0 0 0 0 0 0 0 0 0
43244 - 0 0 0 0 0 0 0 0 0 0 0 0
43245 - 6 6 6 18 18 18 38 38 38 58 58 58
43246 - 78 78 78 86 86 86 101 101 101 123 123 123
43247 -175 146 61 210 150 10 234 174 13 246 186 14
43248 -246 190 14 246 190 14 246 190 14 238 190 10
43249 -102 78 10 2 2 6 46 46 46 198 198 198
43250 -253 253 253 253 253 253 253 253 253 253 253 253
43251 -253 253 253 253 253 253 234 234 234 242 242 242
43252 -253 253 253 253 253 253 253 253 253 253 253 253
43253 -253 253 253 253 253 253 253 253 253 253 253 253
43254 -253 253 253 253 253 253 253 253 253 253 253 253
43255 -253 253 253 253 253 253 253 253 253 224 178 62
43256 -242 186 14 241 196 14 210 166 10 22 18 6
43257 - 2 2 6 2 2 6 2 2 6 2 2 6
43258 - 2 2 6 2 2 6 6 6 6 121 92 8
43259 -238 202 15 232 195 16 82 82 82 34 34 34
43260 - 10 10 10 0 0 0 0 0 0 0 0 0
43261 - 0 0 0 0 0 0 0 0 0 0 0 0
43262 - 0 0 0 0 0 0 0 0 0 0 0 0
43263 - 0 0 0 0 0 0 0 0 0 0 0 0
43264 - 0 0 0 0 0 0 0 0 0 0 0 0
43265 - 14 14 14 38 38 38 70 70 70 154 122 46
43266 -190 142 34 200 144 11 197 138 11 197 138 11
43267 -213 154 11 226 170 11 242 186 14 246 190 14
43268 -246 190 14 246 190 14 246 190 14 246 190 14
43269 -225 175 15 46 32 6 2 2 6 22 22 22
43270 -158 158 158 250 250 250 253 253 253 253 253 253
43271 -253 253 253 253 253 253 253 253 253 253 253 253
43272 -253 253 253 253 253 253 253 253 253 253 253 253
43273 -253 253 253 253 253 253 253 253 253 253 253 253
43274 -253 253 253 253 253 253 253 253 253 253 253 253
43275 -253 253 253 250 250 250 242 242 242 224 178 62
43276 -239 182 13 236 186 11 213 154 11 46 32 6
43277 - 2 2 6 2 2 6 2 2 6 2 2 6
43278 - 2 2 6 2 2 6 61 42 6 225 175 15
43279 -238 190 10 236 186 11 112 100 78 42 42 42
43280 - 14 14 14 0 0 0 0 0 0 0 0 0
43281 - 0 0 0 0 0 0 0 0 0 0 0 0
43282 - 0 0 0 0 0 0 0 0 0 0 0 0
43283 - 0 0 0 0 0 0 0 0 0 0 0 0
43284 - 0 0 0 0 0 0 0 0 0 6 6 6
43285 - 22 22 22 54 54 54 154 122 46 213 154 11
43286 -226 170 11 230 174 11 226 170 11 226 170 11
43287 -236 178 12 242 186 14 246 190 14 246 190 14
43288 -246 190 14 246 190 14 246 190 14 246 190 14
43289 -241 196 14 184 144 12 10 10 10 2 2 6
43290 - 6 6 6 116 116 116 242 242 242 253 253 253
43291 -253 253 253 253 253 253 253 253 253 253 253 253
43292 -253 253 253 253 253 253 253 253 253 253 253 253
43293 -253 253 253 253 253 253 253 253 253 253 253 253
43294 -253 253 253 253 253 253 253 253 253 253 253 253
43295 -253 253 253 231 231 231 198 198 198 214 170 54
43296 -236 178 12 236 178 12 210 150 10 137 92 6
43297 - 18 14 6 2 2 6 2 2 6 2 2 6
43298 - 6 6 6 70 47 6 200 144 11 236 178 12
43299 -239 182 13 239 182 13 124 112 88 58 58 58
43300 - 22 22 22 6 6 6 0 0 0 0 0 0
43301 - 0 0 0 0 0 0 0 0 0 0 0 0
43302 - 0 0 0 0 0 0 0 0 0 0 0 0
43303 - 0 0 0 0 0 0 0 0 0 0 0 0
43304 - 0 0 0 0 0 0 0 0 0 10 10 10
43305 - 30 30 30 70 70 70 180 133 36 226 170 11
43306 -239 182 13 242 186 14 242 186 14 246 186 14
43307 -246 190 14 246 190 14 246 190 14 246 190 14
43308 -246 190 14 246 190 14 246 190 14 246 190 14
43309 -246 190 14 232 195 16 98 70 6 2 2 6
43310 - 2 2 6 2 2 6 66 66 66 221 221 221
43311 -253 253 253 253 253 253 253 253 253 253 253 253
43312 -253 253 253 253 253 253 253 253 253 253 253 253
43313 -253 253 253 253 253 253 253 253 253 253 253 253
43314 -253 253 253 253 253 253 253 253 253 253 253 253
43315 -253 253 253 206 206 206 198 198 198 214 166 58
43316 -230 174 11 230 174 11 216 158 10 192 133 9
43317 -163 110 8 116 81 8 102 78 10 116 81 8
43318 -167 114 7 197 138 11 226 170 11 239 182 13
43319 -242 186 14 242 186 14 162 146 94 78 78 78
43320 - 34 34 34 14 14 14 6 6 6 0 0 0
43321 - 0 0 0 0 0 0 0 0 0 0 0 0
43322 - 0 0 0 0 0 0 0 0 0 0 0 0
43323 - 0 0 0 0 0 0 0 0 0 0 0 0
43324 - 0 0 0 0 0 0 0 0 0 6 6 6
43325 - 30 30 30 78 78 78 190 142 34 226 170 11
43326 -239 182 13 246 190 14 246 190 14 246 190 14
43327 -246 190 14 246 190 14 246 190 14 246 190 14
43328 -246 190 14 246 190 14 246 190 14 246 190 14
43329 -246 190 14 241 196 14 203 166 17 22 18 6
43330 - 2 2 6 2 2 6 2 2 6 38 38 38
43331 -218 218 218 253 253 253 253 253 253 253 253 253
43332 -253 253 253 253 253 253 253 253 253 253 253 253
43333 -253 253 253 253 253 253 253 253 253 253 253 253
43334 -253 253 253 253 253 253 253 253 253 253 253 253
43335 -250 250 250 206 206 206 198 198 198 202 162 69
43336 -226 170 11 236 178 12 224 166 10 210 150 10
43337 -200 144 11 197 138 11 192 133 9 197 138 11
43338 -210 150 10 226 170 11 242 186 14 246 190 14
43339 -246 190 14 246 186 14 225 175 15 124 112 88
43340 - 62 62 62 30 30 30 14 14 14 6 6 6
43341 - 0 0 0 0 0 0 0 0 0 0 0 0
43342 - 0 0 0 0 0 0 0 0 0 0 0 0
43343 - 0 0 0 0 0 0 0 0 0 0 0 0
43344 - 0 0 0 0 0 0 0 0 0 10 10 10
43345 - 30 30 30 78 78 78 174 135 50 224 166 10
43346 -239 182 13 246 190 14 246 190 14 246 190 14
43347 -246 190 14 246 190 14 246 190 14 246 190 14
43348 -246 190 14 246 190 14 246 190 14 246 190 14
43349 -246 190 14 246 190 14 241 196 14 139 102 15
43350 - 2 2 6 2 2 6 2 2 6 2 2 6
43351 - 78 78 78 250 250 250 253 253 253 253 253 253
43352 -253 253 253 253 253 253 253 253 253 253 253 253
43353 -253 253 253 253 253 253 253 253 253 253 253 253
43354 -253 253 253 253 253 253 253 253 253 253 253 253
43355 -250 250 250 214 214 214 198 198 198 190 150 46
43356 -219 162 10 236 178 12 234 174 13 224 166 10
43357 -216 158 10 213 154 11 213 154 11 216 158 10
43358 -226 170 11 239 182 13 246 190 14 246 190 14
43359 -246 190 14 246 190 14 242 186 14 206 162 42
43360 -101 101 101 58 58 58 30 30 30 14 14 14
43361 - 6 6 6 0 0 0 0 0 0 0 0 0
43362 - 0 0 0 0 0 0 0 0 0 0 0 0
43363 - 0 0 0 0 0 0 0 0 0 0 0 0
43364 - 0 0 0 0 0 0 0 0 0 10 10 10
43365 - 30 30 30 74 74 74 174 135 50 216 158 10
43366 -236 178 12 246 190 14 246 190 14 246 190 14
43367 -246 190 14 246 190 14 246 190 14 246 190 14
43368 -246 190 14 246 190 14 246 190 14 246 190 14
43369 -246 190 14 246 190 14 241 196 14 226 184 13
43370 - 61 42 6 2 2 6 2 2 6 2 2 6
43371 - 22 22 22 238 238 238 253 253 253 253 253 253
43372 -253 253 253 253 253 253 253 253 253 253 253 253
43373 -253 253 253 253 253 253 253 253 253 253 253 253
43374 -253 253 253 253 253 253 253 253 253 253 253 253
43375 -253 253 253 226 226 226 187 187 187 180 133 36
43376 -216 158 10 236 178 12 239 182 13 236 178 12
43377 -230 174 11 226 170 11 226 170 11 230 174 11
43378 -236 178 12 242 186 14 246 190 14 246 190 14
43379 -246 190 14 246 190 14 246 186 14 239 182 13
43380 -206 162 42 106 106 106 66 66 66 34 34 34
43381 - 14 14 14 6 6 6 0 0 0 0 0 0
43382 - 0 0 0 0 0 0 0 0 0 0 0 0
43383 - 0 0 0 0 0 0 0 0 0 0 0 0
43384 - 0 0 0 0 0 0 0 0 0 6 6 6
43385 - 26 26 26 70 70 70 163 133 67 213 154 11
43386 -236 178 12 246 190 14 246 190 14 246 190 14
43387 -246 190 14 246 190 14 246 190 14 246 190 14
43388 -246 190 14 246 190 14 246 190 14 246 190 14
43389 -246 190 14 246 190 14 246 190 14 241 196 14
43390 -190 146 13 18 14 6 2 2 6 2 2 6
43391 - 46 46 46 246 246 246 253 253 253 253 253 253
43392 -253 253 253 253 253 253 253 253 253 253 253 253
43393 -253 253 253 253 253 253 253 253 253 253 253 253
43394 -253 253 253 253 253 253 253 253 253 253 253 253
43395 -253 253 253 221 221 221 86 86 86 156 107 11
43396 -216 158 10 236 178 12 242 186 14 246 186 14
43397 -242 186 14 239 182 13 239 182 13 242 186 14
43398 -242 186 14 246 186 14 246 190 14 246 190 14
43399 -246 190 14 246 190 14 246 190 14 246 190 14
43400 -242 186 14 225 175 15 142 122 72 66 66 66
43401 - 30 30 30 10 10 10 0 0 0 0 0 0
43402 - 0 0 0 0 0 0 0 0 0 0 0 0
43403 - 0 0 0 0 0 0 0 0 0 0 0 0
43404 - 0 0 0 0 0 0 0 0 0 6 6 6
43405 - 26 26 26 70 70 70 163 133 67 210 150 10
43406 -236 178 12 246 190 14 246 190 14 246 190 14
43407 -246 190 14 246 190 14 246 190 14 246 190 14
43408 -246 190 14 246 190 14 246 190 14 246 190 14
43409 -246 190 14 246 190 14 246 190 14 246 190 14
43410 -232 195 16 121 92 8 34 34 34 106 106 106
43411 -221 221 221 253 253 253 253 253 253 253 253 253
43412 -253 253 253 253 253 253 253 253 253 253 253 253
43413 -253 253 253 253 253 253 253 253 253 253 253 253
43414 -253 253 253 253 253 253 253 253 253 253 253 253
43415 -242 242 242 82 82 82 18 14 6 163 110 8
43416 -216 158 10 236 178 12 242 186 14 246 190 14
43417 -246 190 14 246 190 14 246 190 14 246 190 14
43418 -246 190 14 246 190 14 246 190 14 246 190 14
43419 -246 190 14 246 190 14 246 190 14 246 190 14
43420 -246 190 14 246 190 14 242 186 14 163 133 67
43421 - 46 46 46 18 18 18 6 6 6 0 0 0
43422 - 0 0 0 0 0 0 0 0 0 0 0 0
43423 - 0 0 0 0 0 0 0 0 0 0 0 0
43424 - 0 0 0 0 0 0 0 0 0 10 10 10
43425 - 30 30 30 78 78 78 163 133 67 210 150 10
43426 -236 178 12 246 186 14 246 190 14 246 190 14
43427 -246 190 14 246 190 14 246 190 14 246 190 14
43428 -246 190 14 246 190 14 246 190 14 246 190 14
43429 -246 190 14 246 190 14 246 190 14 246 190 14
43430 -241 196 14 215 174 15 190 178 144 253 253 253
43431 -253 253 253 253 253 253 253 253 253 253 253 253
43432 -253 253 253 253 253 253 253 253 253 253 253 253
43433 -253 253 253 253 253 253 253 253 253 253 253 253
43434 -253 253 253 253 253 253 253 253 253 218 218 218
43435 - 58 58 58 2 2 6 22 18 6 167 114 7
43436 -216 158 10 236 178 12 246 186 14 246 190 14
43437 -246 190 14 246 190 14 246 190 14 246 190 14
43438 -246 190 14 246 190 14 246 190 14 246 190 14
43439 -246 190 14 246 190 14 246 190 14 246 190 14
43440 -246 190 14 246 186 14 242 186 14 190 150 46
43441 - 54 54 54 22 22 22 6 6 6 0 0 0
43442 - 0 0 0 0 0 0 0 0 0 0 0 0
43443 - 0 0 0 0 0 0 0 0 0 0 0 0
43444 - 0 0 0 0 0 0 0 0 0 14 14 14
43445 - 38 38 38 86 86 86 180 133 36 213 154 11
43446 -236 178 12 246 186 14 246 190 14 246 190 14
43447 -246 190 14 246 190 14 246 190 14 246 190 14
43448 -246 190 14 246 190 14 246 190 14 246 190 14
43449 -246 190 14 246 190 14 246 190 14 246 190 14
43450 -246 190 14 232 195 16 190 146 13 214 214 214
43451 -253 253 253 253 253 253 253 253 253 253 253 253
43452 -253 253 253 253 253 253 253 253 253 253 253 253
43453 -253 253 253 253 253 253 253 253 253 253 253 253
43454 -253 253 253 250 250 250 170 170 170 26 26 26
43455 - 2 2 6 2 2 6 37 26 9 163 110 8
43456 -219 162 10 239 182 13 246 186 14 246 190 14
43457 -246 190 14 246 190 14 246 190 14 246 190 14
43458 -246 190 14 246 190 14 246 190 14 246 190 14
43459 -246 190 14 246 190 14 246 190 14 246 190 14
43460 -246 186 14 236 178 12 224 166 10 142 122 72
43461 - 46 46 46 18 18 18 6 6 6 0 0 0
43462 - 0 0 0 0 0 0 0 0 0 0 0 0
43463 - 0 0 0 0 0 0 0 0 0 0 0 0
43464 - 0 0 0 0 0 0 6 6 6 18 18 18
43465 - 50 50 50 109 106 95 192 133 9 224 166 10
43466 -242 186 14 246 190 14 246 190 14 246 190 14
43467 -246 190 14 246 190 14 246 190 14 246 190 14
43468 -246 190 14 246 190 14 246 190 14 246 190 14
43469 -246 190 14 246 190 14 246 190 14 246 190 14
43470 -242 186 14 226 184 13 210 162 10 142 110 46
43471 -226 226 226 253 253 253 253 253 253 253 253 253
43472 -253 253 253 253 253 253 253 253 253 253 253 253
43473 -253 253 253 253 253 253 253 253 253 253 253 253
43474 -198 198 198 66 66 66 2 2 6 2 2 6
43475 - 2 2 6 2 2 6 50 34 6 156 107 11
43476 -219 162 10 239 182 13 246 186 14 246 190 14
43477 -246 190 14 246 190 14 246 190 14 246 190 14
43478 -246 190 14 246 190 14 246 190 14 246 190 14
43479 -246 190 14 246 190 14 246 190 14 242 186 14
43480 -234 174 13 213 154 11 154 122 46 66 66 66
43481 - 30 30 30 10 10 10 0 0 0 0 0 0
43482 - 0 0 0 0 0 0 0 0 0 0 0 0
43483 - 0 0 0 0 0 0 0 0 0 0 0 0
43484 - 0 0 0 0 0 0 6 6 6 22 22 22
43485 - 58 58 58 154 121 60 206 145 10 234 174 13
43486 -242 186 14 246 186 14 246 190 14 246 190 14
43487 -246 190 14 246 190 14 246 190 14 246 190 14
43488 -246 190 14 246 190 14 246 190 14 246 190 14
43489 -246 190 14 246 190 14 246 190 14 246 190 14
43490 -246 186 14 236 178 12 210 162 10 163 110 8
43491 - 61 42 6 138 138 138 218 218 218 250 250 250
43492 -253 253 253 253 253 253 253 253 253 250 250 250
43493 -242 242 242 210 210 210 144 144 144 66 66 66
43494 - 6 6 6 2 2 6 2 2 6 2 2 6
43495 - 2 2 6 2 2 6 61 42 6 163 110 8
43496 -216 158 10 236 178 12 246 190 14 246 190 14
43497 -246 190 14 246 190 14 246 190 14 246 190 14
43498 -246 190 14 246 190 14 246 190 14 246 190 14
43499 -246 190 14 239 182 13 230 174 11 216 158 10
43500 -190 142 34 124 112 88 70 70 70 38 38 38
43501 - 18 18 18 6 6 6 0 0 0 0 0 0
43502 - 0 0 0 0 0 0 0 0 0 0 0 0
43503 - 0 0 0 0 0 0 0 0 0 0 0 0
43504 - 0 0 0 0 0 0 6 6 6 22 22 22
43505 - 62 62 62 168 124 44 206 145 10 224 166 10
43506 -236 178 12 239 182 13 242 186 14 242 186 14
43507 -246 186 14 246 190 14 246 190 14 246 190 14
43508 -246 190 14 246 190 14 246 190 14 246 190 14
43509 -246 190 14 246 190 14 246 190 14 246 190 14
43510 -246 190 14 236 178 12 216 158 10 175 118 6
43511 - 80 54 7 2 2 6 6 6 6 30 30 30
43512 - 54 54 54 62 62 62 50 50 50 38 38 38
43513 - 14 14 14 2 2 6 2 2 6 2 2 6
43514 - 2 2 6 2 2 6 2 2 6 2 2 6
43515 - 2 2 6 6 6 6 80 54 7 167 114 7
43516 -213 154 11 236 178 12 246 190 14 246 190 14
43517 -246 190 14 246 190 14 246 190 14 246 190 14
43518 -246 190 14 242 186 14 239 182 13 239 182 13
43519 -230 174 11 210 150 10 174 135 50 124 112 88
43520 - 82 82 82 54 54 54 34 34 34 18 18 18
43521 - 6 6 6 0 0 0 0 0 0 0 0 0
43522 - 0 0 0 0 0 0 0 0 0 0 0 0
43523 - 0 0 0 0 0 0 0 0 0 0 0 0
43524 - 0 0 0 0 0 0 6 6 6 18 18 18
43525 - 50 50 50 158 118 36 192 133 9 200 144 11
43526 -216 158 10 219 162 10 224 166 10 226 170 11
43527 -230 174 11 236 178 12 239 182 13 239 182 13
43528 -242 186 14 246 186 14 246 190 14 246 190 14
43529 -246 190 14 246 190 14 246 190 14 246 190 14
43530 -246 186 14 230 174 11 210 150 10 163 110 8
43531 -104 69 6 10 10 10 2 2 6 2 2 6
43532 - 2 2 6 2 2 6 2 2 6 2 2 6
43533 - 2 2 6 2 2 6 2 2 6 2 2 6
43534 - 2 2 6 2 2 6 2 2 6 2 2 6
43535 - 2 2 6 6 6 6 91 60 6 167 114 7
43536 -206 145 10 230 174 11 242 186 14 246 190 14
43537 -246 190 14 246 190 14 246 186 14 242 186 14
43538 -239 182 13 230 174 11 224 166 10 213 154 11
43539 -180 133 36 124 112 88 86 86 86 58 58 58
43540 - 38 38 38 22 22 22 10 10 10 6 6 6
43541 - 0 0 0 0 0 0 0 0 0 0 0 0
43542 - 0 0 0 0 0 0 0 0 0 0 0 0
43543 - 0 0 0 0 0 0 0 0 0 0 0 0
43544 - 0 0 0 0 0 0 0 0 0 14 14 14
43545 - 34 34 34 70 70 70 138 110 50 158 118 36
43546 -167 114 7 180 123 7 192 133 9 197 138 11
43547 -200 144 11 206 145 10 213 154 11 219 162 10
43548 -224 166 10 230 174 11 239 182 13 242 186 14
43549 -246 186 14 246 186 14 246 186 14 246 186 14
43550 -239 182 13 216 158 10 185 133 11 152 99 6
43551 -104 69 6 18 14 6 2 2 6 2 2 6
43552 - 2 2 6 2 2 6 2 2 6 2 2 6
43553 - 2 2 6 2 2 6 2 2 6 2 2 6
43554 - 2 2 6 2 2 6 2 2 6 2 2 6
43555 - 2 2 6 6 6 6 80 54 7 152 99 6
43556 -192 133 9 219 162 10 236 178 12 239 182 13
43557 -246 186 14 242 186 14 239 182 13 236 178 12
43558 -224 166 10 206 145 10 192 133 9 154 121 60
43559 - 94 94 94 62 62 62 42 42 42 22 22 22
43560 - 14 14 14 6 6 6 0 0 0 0 0 0
43561 - 0 0 0 0 0 0 0 0 0 0 0 0
43562 - 0 0 0 0 0 0 0 0 0 0 0 0
43563 - 0 0 0 0 0 0 0 0 0 0 0 0
43564 - 0 0 0 0 0 0 0 0 0 6 6 6
43565 - 18 18 18 34 34 34 58 58 58 78 78 78
43566 -101 98 89 124 112 88 142 110 46 156 107 11
43567 -163 110 8 167 114 7 175 118 6 180 123 7
43568 -185 133 11 197 138 11 210 150 10 219 162 10
43569 -226 170 11 236 178 12 236 178 12 234 174 13
43570 -219 162 10 197 138 11 163 110 8 130 83 6
43571 - 91 60 6 10 10 10 2 2 6 2 2 6
43572 - 18 18 18 38 38 38 38 38 38 38 38 38
43573 - 38 38 38 38 38 38 38 38 38 38 38 38
43574 - 38 38 38 38 38 38 26 26 26 2 2 6
43575 - 2 2 6 6 6 6 70 47 6 137 92 6
43576 -175 118 6 200 144 11 219 162 10 230 174 11
43577 -234 174 13 230 174 11 219 162 10 210 150 10
43578 -192 133 9 163 110 8 124 112 88 82 82 82
43579 - 50 50 50 30 30 30 14 14 14 6 6 6
43580 - 0 0 0 0 0 0 0 0 0 0 0 0
43581 - 0 0 0 0 0 0 0 0 0 0 0 0
43582 - 0 0 0 0 0 0 0 0 0 0 0 0
43583 - 0 0 0 0 0 0 0 0 0 0 0 0
43584 - 0 0 0 0 0 0 0 0 0 0 0 0
43585 - 6 6 6 14 14 14 22 22 22 34 34 34
43586 - 42 42 42 58 58 58 74 74 74 86 86 86
43587 -101 98 89 122 102 70 130 98 46 121 87 25
43588 -137 92 6 152 99 6 163 110 8 180 123 7
43589 -185 133 11 197 138 11 206 145 10 200 144 11
43590 -180 123 7 156 107 11 130 83 6 104 69 6
43591 - 50 34 6 54 54 54 110 110 110 101 98 89
43592 - 86 86 86 82 82 82 78 78 78 78 78 78
43593 - 78 78 78 78 78 78 78 78 78 78 78 78
43594 - 78 78 78 82 82 82 86 86 86 94 94 94
43595 -106 106 106 101 101 101 86 66 34 124 80 6
43596 -156 107 11 180 123 7 192 133 9 200 144 11
43597 -206 145 10 200 144 11 192 133 9 175 118 6
43598 -139 102 15 109 106 95 70 70 70 42 42 42
43599 - 22 22 22 10 10 10 0 0 0 0 0 0
43600 - 0 0 0 0 0 0 0 0 0 0 0 0
43601 - 0 0 0 0 0 0 0 0 0 0 0 0
43602 - 0 0 0 0 0 0 0 0 0 0 0 0
43603 - 0 0 0 0 0 0 0 0 0 0 0 0
43604 - 0 0 0 0 0 0 0 0 0 0 0 0
43605 - 0 0 0 0 0 0 6 6 6 10 10 10
43606 - 14 14 14 22 22 22 30 30 30 38 38 38
43607 - 50 50 50 62 62 62 74 74 74 90 90 90
43608 -101 98 89 112 100 78 121 87 25 124 80 6
43609 -137 92 6 152 99 6 152 99 6 152 99 6
43610 -138 86 6 124 80 6 98 70 6 86 66 30
43611 -101 98 89 82 82 82 58 58 58 46 46 46
43612 - 38 38 38 34 34 34 34 34 34 34 34 34
43613 - 34 34 34 34 34 34 34 34 34 34 34 34
43614 - 34 34 34 34 34 34 38 38 38 42 42 42
43615 - 54 54 54 82 82 82 94 86 76 91 60 6
43616 -134 86 6 156 107 11 167 114 7 175 118 6
43617 -175 118 6 167 114 7 152 99 6 121 87 25
43618 -101 98 89 62 62 62 34 34 34 18 18 18
43619 - 6 6 6 0 0 0 0 0 0 0 0 0
43620 - 0 0 0 0 0 0 0 0 0 0 0 0
43621 - 0 0 0 0 0 0 0 0 0 0 0 0
43622 - 0 0 0 0 0 0 0 0 0 0 0 0
43623 - 0 0 0 0 0 0 0 0 0 0 0 0
43624 - 0 0 0 0 0 0 0 0 0 0 0 0
43625 - 0 0 0 0 0 0 0 0 0 0 0 0
43626 - 0 0 0 6 6 6 6 6 6 10 10 10
43627 - 18 18 18 22 22 22 30 30 30 42 42 42
43628 - 50 50 50 66 66 66 86 86 86 101 98 89
43629 -106 86 58 98 70 6 104 69 6 104 69 6
43630 -104 69 6 91 60 6 82 62 34 90 90 90
43631 - 62 62 62 38 38 38 22 22 22 14 14 14
43632 - 10 10 10 10 10 10 10 10 10 10 10 10
43633 - 10 10 10 10 10 10 6 6 6 10 10 10
43634 - 10 10 10 10 10 10 10 10 10 14 14 14
43635 - 22 22 22 42 42 42 70 70 70 89 81 66
43636 - 80 54 7 104 69 6 124 80 6 137 92 6
43637 -134 86 6 116 81 8 100 82 52 86 86 86
43638 - 58 58 58 30 30 30 14 14 14 6 6 6
43639 - 0 0 0 0 0 0 0 0 0 0 0 0
43640 - 0 0 0 0 0 0 0 0 0 0 0 0
43641 - 0 0 0 0 0 0 0 0 0 0 0 0
43642 - 0 0 0 0 0 0 0 0 0 0 0 0
43643 - 0 0 0 0 0 0 0 0 0 0 0 0
43644 - 0 0 0 0 0 0 0 0 0 0 0 0
43645 - 0 0 0 0 0 0 0 0 0 0 0 0
43646 - 0 0 0 0 0 0 0 0 0 0 0 0
43647 - 0 0 0 6 6 6 10 10 10 14 14 14
43648 - 18 18 18 26 26 26 38 38 38 54 54 54
43649 - 70 70 70 86 86 86 94 86 76 89 81 66
43650 - 89 81 66 86 86 86 74 74 74 50 50 50
43651 - 30 30 30 14 14 14 6 6 6 0 0 0
43652 - 0 0 0 0 0 0 0 0 0 0 0 0
43653 - 0 0 0 0 0 0 0 0 0 0 0 0
43654 - 0 0 0 0 0 0 0 0 0 0 0 0
43655 - 6 6 6 18 18 18 34 34 34 58 58 58
43656 - 82 82 82 89 81 66 89 81 66 89 81 66
43657 - 94 86 66 94 86 76 74 74 74 50 50 50
43658 - 26 26 26 14 14 14 6 6 6 0 0 0
43659 - 0 0 0 0 0 0 0 0 0 0 0 0
43660 - 0 0 0 0 0 0 0 0 0 0 0 0
43661 - 0 0 0 0 0 0 0 0 0 0 0 0
43662 - 0 0 0 0 0 0 0 0 0 0 0 0
43663 - 0 0 0 0 0 0 0 0 0 0 0 0
43664 - 0 0 0 0 0 0 0 0 0 0 0 0
43665 - 0 0 0 0 0 0 0 0 0 0 0 0
43666 - 0 0 0 0 0 0 0 0 0 0 0 0
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 6 6 6 6 6 6 14 14 14 18 18 18
43669 - 30 30 30 38 38 38 46 46 46 54 54 54
43670 - 50 50 50 42 42 42 30 30 30 18 18 18
43671 - 10 10 10 0 0 0 0 0 0 0 0 0
43672 - 0 0 0 0 0 0 0 0 0 0 0 0
43673 - 0 0 0 0 0 0 0 0 0 0 0 0
43674 - 0 0 0 0 0 0 0 0 0 0 0 0
43675 - 0 0 0 6 6 6 14 14 14 26 26 26
43676 - 38 38 38 50 50 50 58 58 58 58 58 58
43677 - 54 54 54 42 42 42 30 30 30 18 18 18
43678 - 10 10 10 0 0 0 0 0 0 0 0 0
43679 - 0 0 0 0 0 0 0 0 0 0 0 0
43680 - 0 0 0 0 0 0 0 0 0 0 0 0
43681 - 0 0 0 0 0 0 0 0 0 0 0 0
43682 - 0 0 0 0 0 0 0 0 0 0 0 0
43683 - 0 0 0 0 0 0 0 0 0 0 0 0
43684 - 0 0 0 0 0 0 0 0 0 0 0 0
43685 - 0 0 0 0 0 0 0 0 0 0 0 0
43686 - 0 0 0 0 0 0 0 0 0 0 0 0
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 6 6 6
43689 - 6 6 6 10 10 10 14 14 14 18 18 18
43690 - 18 18 18 14 14 14 10 10 10 6 6 6
43691 - 0 0 0 0 0 0 0 0 0 0 0 0
43692 - 0 0 0 0 0 0 0 0 0 0 0 0
43693 - 0 0 0 0 0 0 0 0 0 0 0 0
43694 - 0 0 0 0 0 0 0 0 0 0 0 0
43695 - 0 0 0 0 0 0 0 0 0 6 6 6
43696 - 14 14 14 18 18 18 22 22 22 22 22 22
43697 - 18 18 18 14 14 14 10 10 10 6 6 6
43698 - 0 0 0 0 0 0 0 0 0 0 0 0
43699 - 0 0 0 0 0 0 0 0 0 0 0 0
43700 - 0 0 0 0 0 0 0 0 0 0 0 0
43701 - 0 0 0 0 0 0 0 0 0 0 0 0
43702 - 0 0 0 0 0 0 0 0 0 0 0 0
43703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43716 +4 4 4 4 4 4
43717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43730 +4 4 4 4 4 4
43731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43744 +4 4 4 4 4 4
43745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43758 +4 4 4 4 4 4
43759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43772 +4 4 4 4 4 4
43773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43786 +4 4 4 4 4 4
43787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43791 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
43792 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
43793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43796 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
43797 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
43798 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
43799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43800 +4 4 4 4 4 4
43801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43805 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
43806 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
43807 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43810 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
43811 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
43812 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
43813 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43814 +4 4 4 4 4 4
43815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43819 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
43820 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
43821 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
43822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43824 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
43825 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
43826 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
43827 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
43828 +4 4 4 4 4 4
43829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43832 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
43833 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
43834 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
43835 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
43836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43837 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
43838 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
43839 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
43840 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
43841 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
43842 +4 4 4 4 4 4
43843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43846 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
43847 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
43848 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
43849 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
43850 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43851 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
43852 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
43853 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
43854 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
43855 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
43856 +4 4 4 4 4 4
43857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43860 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
43861 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
43862 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
43863 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
43864 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
43865 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
43866 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
43867 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
43868 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
43869 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
43870 +4 4 4 4 4 4
43871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43873 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
43874 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
43875 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
43876 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
43877 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
43878 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
43879 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
43880 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
43881 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
43882 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
43883 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
43884 +4 4 4 4 4 4
43885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43887 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
43888 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
43889 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
43890 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
43891 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
43892 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
43893 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
43894 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
43895 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
43896 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
43897 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
43898 +4 4 4 4 4 4
43899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43901 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
43902 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
43903 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
43904 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
43905 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
43906 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
43907 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
43908 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
43909 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
43910 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
43911 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
43912 +4 4 4 4 4 4
43913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43915 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
43916 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
43917 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
43918 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
43919 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
43920 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
43921 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
43922 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
43923 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
43924 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
43925 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
43926 +4 4 4 4 4 4
43927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43928 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
43929 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
43930 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
43931 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
43932 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
43933 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
43934 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
43935 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
43936 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
43937 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
43938 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
43939 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
43940 +4 4 4 4 4 4
43941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43942 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
43943 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
43944 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
43945 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
43946 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
43947 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
43948 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
43949 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
43950 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
43951 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
43952 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
43953 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
43954 +0 0 0 4 4 4
43955 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
43956 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
43957 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
43958 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
43959 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
43960 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
43961 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
43962 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
43963 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
43964 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
43965 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
43966 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
43967 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
43968 +2 0 0 0 0 0
43969 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
43970 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
43971 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
43972 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
43973 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
43974 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
43975 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
43976 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
43977 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
43978 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
43979 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
43980 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
43981 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
43982 +37 38 37 0 0 0
43983 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
43984 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
43985 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
43986 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
43987 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
43988 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
43989 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
43990 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
43991 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
43992 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
43993 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
43994 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
43995 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
43996 +85 115 134 4 0 0
43997 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
43998 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
43999 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
44000 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
44001 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
44002 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
44003 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
44004 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
44005 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
44006 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
44007 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
44008 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
44009 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
44010 +60 73 81 4 0 0
44011 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
44012 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
44013 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
44014 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
44015 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
44016 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
44017 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
44018 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
44019 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
44020 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
44021 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
44022 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
44023 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
44024 +16 19 21 4 0 0
44025 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
44026 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
44027 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
44028 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
44029 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
44030 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
44031 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
44032 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
44033 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
44034 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
44035 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
44036 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
44037 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
44038 +4 0 0 4 3 3
44039 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
44040 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
44041 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
44042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
44043 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
44044 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
44045 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
44046 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
44047 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
44048 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
44049 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
44050 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
44051 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
44052 +3 2 2 4 4 4
44053 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
44054 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
44055 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
44056 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44057 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
44058 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
44059 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
44060 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
44061 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
44062 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
44063 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
44064 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
44065 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
44066 +4 4 4 4 4 4
44067 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
44068 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
44069 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
44070 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
44071 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
44072 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
44073 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
44074 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
44075 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
44076 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
44077 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
44078 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
44079 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
44080 +4 4 4 4 4 4
44081 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
44082 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
44083 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
44084 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
44085 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
44086 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44087 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
44088 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
44089 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
44090 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
44091 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
44092 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
44093 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
44094 +5 5 5 5 5 5
44095 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
44096 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
44097 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
44098 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
44099 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
44100 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44101 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
44102 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
44103 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
44104 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
44105 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
44106 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
44107 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
44108 +5 5 5 4 4 4
44109 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
44110 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
44111 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
44112 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
44113 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44114 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
44115 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
44116 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
44117 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
44118 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
44119 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
44120 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
44121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44122 +4 4 4 4 4 4
44123 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
44124 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
44125 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
44126 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
44127 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
44128 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44129 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44130 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
44131 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
44132 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
44133 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
44134 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
44135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44136 +4 4 4 4 4 4
44137 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
44138 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
44139 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
44140 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
44141 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44142 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
44143 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
44144 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
44145 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
44146 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
44147 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
44148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44150 +4 4 4 4 4 4
44151 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
44152 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
44153 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
44154 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
44155 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44156 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44157 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44158 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
44159 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
44160 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
44161 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
44162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44164 +4 4 4 4 4 4
44165 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
44166 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
44167 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
44168 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
44169 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44170 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
44171 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44172 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
44173 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
44174 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
44175 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44178 +4 4 4 4 4 4
44179 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
44180 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
44181 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
44182 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
44183 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44184 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
44185 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
44186 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
44187 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
44188 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
44189 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
44190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44192 +4 4 4 4 4 4
44193 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
44194 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
44195 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
44196 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
44197 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44198 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
44199 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
44200 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
44201 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
44202 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
44203 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
44204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44206 +4 4 4 4 4 4
44207 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
44208 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
44209 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
44210 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
44211 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
44212 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
44213 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
44214 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
44215 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
44216 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
44217 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44220 +4 4 4 4 4 4
44221 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
44222 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
44223 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
44224 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
44225 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
44226 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
44227 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
44228 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
44229 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
44230 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
44231 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44234 +4 4 4 4 4 4
44235 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
44236 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
44237 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
44238 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
44239 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
44240 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
44241 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
44242 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
44243 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
44244 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
44245 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44248 +4 4 4 4 4 4
44249 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
44250 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
44251 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
44252 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
44253 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
44254 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
44255 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
44256 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
44257 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
44258 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44259 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44262 +4 4 4 4 4 4
44263 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
44264 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
44265 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
44266 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
44267 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
44268 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
44269 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
44270 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
44271 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
44272 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44273 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44276 +4 4 4 4 4 4
44277 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
44278 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
44279 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
44280 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
44281 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
44282 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
44283 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
44284 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
44285 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
44286 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44287 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44290 +4 4 4 4 4 4
44291 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
44292 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
44293 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
44294 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
44295 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
44296 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
44297 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
44298 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
44299 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
44300 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44301 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44304 +4 4 4 4 4 4
44305 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
44306 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
44307 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
44308 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
44309 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
44310 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
44311 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
44312 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
44313 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
44314 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44315 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44318 +4 4 4 4 4 4
44319 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
44320 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
44321 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
44322 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
44323 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
44324 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
44325 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
44326 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
44327 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
44328 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44329 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44332 +4 4 4 4 4 4
44333 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
44334 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
44335 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
44336 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
44337 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
44338 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
44339 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
44340 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
44341 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
44342 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44343 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44346 +4 4 4 4 4 4
44347 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
44348 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
44349 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
44350 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
44351 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
44352 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
44353 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
44354 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
44355 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
44356 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44357 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44360 +4 4 4 4 4 4
44361 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
44362 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
44363 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
44364 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
44365 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
44366 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
44367 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
44368 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
44369 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
44370 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44371 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44374 +4 4 4 4 4 4
44375 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
44376 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
44377 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
44378 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
44379 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
44380 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
44381 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
44382 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
44383 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
44384 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44385 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44388 +4 4 4 4 4 4
44389 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
44390 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
44391 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
44392 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
44393 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
44394 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
44395 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
44396 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
44397 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
44398 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44399 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44402 +4 4 4 4 4 4
44403 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
44404 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
44405 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
44406 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
44407 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
44408 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
44409 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
44410 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
44411 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
44412 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44413 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44416 +4 4 4 4 4 4
44417 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
44418 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
44419 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
44420 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
44421 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
44422 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
44423 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
44424 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
44425 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
44426 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44427 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44430 +4 4 4 4 4 4
44431 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
44432 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
44433 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
44434 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
44435 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
44436 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
44437 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
44438 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
44439 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
44440 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44441 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44444 +4 4 4 4 4 4
44445 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
44446 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
44447 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
44448 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
44449 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
44450 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
44451 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
44452 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
44453 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
44454 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
44455 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44458 +4 4 4 4 4 4
44459 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
44460 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
44461 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
44462 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
44463 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
44464 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
44465 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
44466 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
44467 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
44468 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
44469 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44472 +4 4 4 4 4 4
44473 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
44474 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
44475 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
44476 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
44477 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
44478 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
44479 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
44480 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
44481 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
44482 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
44483 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44486 +4 4 4 4 4 4
44487 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
44488 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
44489 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
44490 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
44491 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
44492 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
44493 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44494 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
44495 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
44496 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
44497 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44500 +4 4 4 4 4 4
44501 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
44502 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
44503 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
44504 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
44505 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
44506 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
44507 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
44508 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
44509 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
44510 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
44511 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44514 +4 4 4 4 4 4
44515 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
44516 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
44517 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
44518 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
44519 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
44520 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
44521 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
44522 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
44523 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
44524 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
44525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44528 +4 4 4 4 4 4
44529 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
44530 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
44531 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
44532 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
44533 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
44534 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
44535 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
44536 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
44537 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
44538 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
44539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44542 +4 4 4 4 4 4
44543 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
44544 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
44545 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
44546 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
44547 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
44548 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
44549 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
44550 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
44551 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
44552 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
44553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44556 +4 4 4 4 4 4
44557 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
44558 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
44559 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
44560 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
44561 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
44562 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
44563 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
44564 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
44565 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44566 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44570 +4 4 4 4 4 4
44571 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
44572 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
44573 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
44574 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
44575 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
44576 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
44577 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
44578 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
44579 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
44580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44584 +4 4 4 4 4 4
44585 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
44586 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
44587 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
44588 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
44589 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
44590 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
44591 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
44592 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
44593 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
44594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44598 +4 4 4 4 4 4
44599 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
44600 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
44601 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
44602 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
44603 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
44604 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
44605 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
44606 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
44607 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44612 +4 4 4 4 4 4
44613 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
44614 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
44615 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
44616 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
44617 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
44618 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
44619 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
44620 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
44621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44626 +4 4 4 4 4 4
44627 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
44628 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
44629 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
44630 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
44631 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
44632 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
44633 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
44634 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
44635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44640 +4 4 4 4 4 4
44641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44642 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
44643 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
44644 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
44645 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
44646 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
44647 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
44648 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
44649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44654 +4 4 4 4 4 4
44655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44656 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
44657 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
44658 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
44659 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
44660 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
44661 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
44662 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
44663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44668 +4 4 4 4 4 4
44669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44670 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
44671 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
44672 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
44673 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
44674 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
44675 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
44676 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44680 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44682 +4 4 4 4 4 4
44683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44685 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
44686 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
44687 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
44688 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
44689 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
44690 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44694 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44696 +4 4 4 4 4 4
44697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44700 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
44701 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
44702 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
44703 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
44704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44710 +4 4 4 4 4 4
44711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44714 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
44715 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
44716 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
44717 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
44718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44724 +4 4 4 4 4 4
44725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44728 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
44729 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
44730 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
44731 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
44732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44738 +4 4 4 4 4 4
44739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44742 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
44743 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
44744 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
44745 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
44746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44752 +4 4 4 4 4 4
44753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44757 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
44758 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
44759 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44766 +4 4 4 4 4 4
44767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44771 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
44772 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
44773 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44780 +4 4 4 4 4 4
44781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44785 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
44786 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
44787 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44794 +4 4 4 4 4 4
44795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44797 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44799 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
44800 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
44801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44808 +4 4 4 4 4 4
44809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44813 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
44814 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
44815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44822 +4 4 4 4 4 4
44823 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
44824 index 86d449e..af6a7f7 100644
44825 --- a/drivers/video/udlfb.c
44826 +++ b/drivers/video/udlfb.c
44827 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
44828 dlfb_urb_completion(urb);
44829
44830 error:
44831 - atomic_add(bytes_sent, &dev->bytes_sent);
44832 - atomic_add(bytes_identical, &dev->bytes_identical);
44833 - atomic_add(width*height*2, &dev->bytes_rendered);
44834 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
44835 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
44836 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
44837 end_cycles = get_cycles();
44838 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
44839 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
44840 >> 10)), /* Kcycles */
44841 &dev->cpu_kcycles_used);
44842
44843 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
44844 dlfb_urb_completion(urb);
44845
44846 error:
44847 - atomic_add(bytes_sent, &dev->bytes_sent);
44848 - atomic_add(bytes_identical, &dev->bytes_identical);
44849 - atomic_add(bytes_rendered, &dev->bytes_rendered);
44850 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
44851 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
44852 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
44853 end_cycles = get_cycles();
44854 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
44855 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
44856 >> 10)), /* Kcycles */
44857 &dev->cpu_kcycles_used);
44858 }
44859 @@ -1372,7 +1372,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
44860 struct fb_info *fb_info = dev_get_drvdata(fbdev);
44861 struct dlfb_data *dev = fb_info->par;
44862 return snprintf(buf, PAGE_SIZE, "%u\n",
44863 - atomic_read(&dev->bytes_rendered));
44864 + atomic_read_unchecked(&dev->bytes_rendered));
44865 }
44866
44867 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
44868 @@ -1380,7 +1380,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
44869 struct fb_info *fb_info = dev_get_drvdata(fbdev);
44870 struct dlfb_data *dev = fb_info->par;
44871 return snprintf(buf, PAGE_SIZE, "%u\n",
44872 - atomic_read(&dev->bytes_identical));
44873 + atomic_read_unchecked(&dev->bytes_identical));
44874 }
44875
44876 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
44877 @@ -1388,7 +1388,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
44878 struct fb_info *fb_info = dev_get_drvdata(fbdev);
44879 struct dlfb_data *dev = fb_info->par;
44880 return snprintf(buf, PAGE_SIZE, "%u\n",
44881 - atomic_read(&dev->bytes_sent));
44882 + atomic_read_unchecked(&dev->bytes_sent));
44883 }
44884
44885 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
44886 @@ -1396,7 +1396,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
44887 struct fb_info *fb_info = dev_get_drvdata(fbdev);
44888 struct dlfb_data *dev = fb_info->par;
44889 return snprintf(buf, PAGE_SIZE, "%u\n",
44890 - atomic_read(&dev->cpu_kcycles_used));
44891 + atomic_read_unchecked(&dev->cpu_kcycles_used));
44892 }
44893
44894 static ssize_t edid_show(
44895 @@ -1456,10 +1456,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
44896 struct fb_info *fb_info = dev_get_drvdata(fbdev);
44897 struct dlfb_data *dev = fb_info->par;
44898
44899 - atomic_set(&dev->bytes_rendered, 0);
44900 - atomic_set(&dev->bytes_identical, 0);
44901 - atomic_set(&dev->bytes_sent, 0);
44902 - atomic_set(&dev->cpu_kcycles_used, 0);
44903 + atomic_set_unchecked(&dev->bytes_rendered, 0);
44904 + atomic_set_unchecked(&dev->bytes_identical, 0);
44905 + atomic_set_unchecked(&dev->bytes_sent, 0);
44906 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
44907
44908 return count;
44909 }
44910 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
44911 index b75db01..5631c6d 100644
44912 --- a/drivers/video/uvesafb.c
44913 +++ b/drivers/video/uvesafb.c
44914 @@ -19,6 +19,7 @@
44915 #include <linux/io.h>
44916 #include <linux/mutex.h>
44917 #include <linux/slab.h>
44918 +#include <linux/moduleloader.h>
44919 #include <video/edid.h>
44920 #include <video/uvesafb.h>
44921 #ifdef CONFIG_X86
44922 @@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
44923 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
44924 par->pmi_setpal = par->ypan = 0;
44925 } else {
44926 +
44927 +#ifdef CONFIG_PAX_KERNEXEC
44928 +#ifdef CONFIG_MODULES
44929 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
44930 +#endif
44931 + if (!par->pmi_code) {
44932 + par->pmi_setpal = par->ypan = 0;
44933 + return 0;
44934 + }
44935 +#endif
44936 +
44937 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
44938 + task->t.regs.edi);
44939 +
44940 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44941 + pax_open_kernel();
44942 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
44943 + pax_close_kernel();
44944 +
44945 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
44946 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
44947 +#else
44948 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
44949 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
44950 +#endif
44951 +
44952 printk(KERN_INFO "uvesafb: protected mode interface info at "
44953 "%04x:%04x\n",
44954 (u16)task->t.regs.es, (u16)task->t.regs.edi);
44955 @@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
44956 par->ypan = ypan;
44957
44958 if (par->pmi_setpal || par->ypan) {
44959 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
44960 if (__supported_pte_mask & _PAGE_NX) {
44961 par->pmi_setpal = par->ypan = 0;
44962 printk(KERN_WARNING "uvesafb: NX protection is actively."
44963 "We have better not to use the PMI.\n");
44964 - } else {
44965 + } else
44966 +#endif
44967 uvesafb_vbe_getpmi(task, par);
44968 - }
44969 }
44970 #else
44971 /* The protected mode interface is not available on non-x86. */
44972 @@ -1836,6 +1860,11 @@ out:
44973 if (par->vbe_modes)
44974 kfree(par->vbe_modes);
44975
44976 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44977 + if (par->pmi_code)
44978 + module_free_exec(NULL, par->pmi_code);
44979 +#endif
44980 +
44981 framebuffer_release(info);
44982 return err;
44983 }
44984 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
44985 kfree(par->vbe_state_orig);
44986 if (par->vbe_state_saved)
44987 kfree(par->vbe_state_saved);
44988 +
44989 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44990 + if (par->pmi_code)
44991 + module_free_exec(NULL, par->pmi_code);
44992 +#endif
44993 +
44994 }
44995
44996 framebuffer_release(info);
44997 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
44998 index 501b340..86bd4cf 100644
44999 --- a/drivers/video/vesafb.c
45000 +++ b/drivers/video/vesafb.c
45001 @@ -9,6 +9,7 @@
45002 */
45003
45004 #include <linux/module.h>
45005 +#include <linux/moduleloader.h>
45006 #include <linux/kernel.h>
45007 #include <linux/errno.h>
45008 #include <linux/string.h>
45009 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45010 static int vram_total __initdata; /* Set total amount of memory */
45011 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45012 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45013 -static void (*pmi_start)(void) __read_mostly;
45014 -static void (*pmi_pal) (void) __read_mostly;
45015 +static void (*pmi_start)(void) __read_only;
45016 +static void (*pmi_pal) (void) __read_only;
45017 static int depth __read_mostly;
45018 static int vga_compat __read_mostly;
45019 /* --------------------------------------------------------------------- */
45020 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45021 unsigned int size_vmode;
45022 unsigned int size_remap;
45023 unsigned int size_total;
45024 + void *pmi_code = NULL;
45025
45026 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45027 return -ENODEV;
45028 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45029 size_remap = size_total;
45030 vesafb_fix.smem_len = size_remap;
45031
45032 -#ifndef __i386__
45033 - screen_info.vesapm_seg = 0;
45034 -#endif
45035 -
45036 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45037 printk(KERN_WARNING
45038 "vesafb: cannot reserve video memory at 0x%lx\n",
45039 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45040 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45041 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45042
45043 +#ifdef __i386__
45044 +
45045 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45046 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
45047 + if (!pmi_code)
45048 +#elif !defined(CONFIG_PAX_KERNEXEC)
45049 + if (0)
45050 +#endif
45051 +
45052 +#endif
45053 + screen_info.vesapm_seg = 0;
45054 +
45055 if (screen_info.vesapm_seg) {
45056 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
45057 - screen_info.vesapm_seg,screen_info.vesapm_off);
45058 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
45059 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
45060 }
45061
45062 if (screen_info.vesapm_seg < 0xc000)
45063 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
45064
45065 if (ypan || pmi_setpal) {
45066 unsigned short *pmi_base;
45067 +
45068 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
45069 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
45070 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
45071 +
45072 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45073 + pax_open_kernel();
45074 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
45075 +#else
45076 + pmi_code = pmi_base;
45077 +#endif
45078 +
45079 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
45080 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
45081 +
45082 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45083 + pmi_start = ktva_ktla(pmi_start);
45084 + pmi_pal = ktva_ktla(pmi_pal);
45085 + pax_close_kernel();
45086 +#endif
45087 +
45088 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
45089 if (pmi_base[3]) {
45090 printk(KERN_INFO "vesafb: pmi: ports = ");
45091 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
45092 info->node, info->fix.id);
45093 return 0;
45094 err:
45095 +
45096 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45097 + module_free_exec(NULL, pmi_code);
45098 +#endif
45099 +
45100 if (info->screen_base)
45101 iounmap(info->screen_base);
45102 framebuffer_release(info);
45103 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
45104 index 88714ae..16c2e11 100644
45105 --- a/drivers/video/via/via_clock.h
45106 +++ b/drivers/video/via/via_clock.h
45107 @@ -56,7 +56,7 @@ struct via_clock {
45108
45109 void (*set_engine_pll_state)(u8 state);
45110 void (*set_engine_pll)(struct via_pll_config config);
45111 -};
45112 +} __no_const;
45113
45114
45115 static inline u32 get_pll_internal_frequency(u32 ref_freq,
45116 diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
45117 index fef20db..d28b1ab 100644
45118 --- a/drivers/xen/xenfs/xenstored.c
45119 +++ b/drivers/xen/xenfs/xenstored.c
45120 @@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
45121 static int xsd_kva_open(struct inode *inode, struct file *file)
45122 {
45123 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
45124 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45125 + NULL);
45126 +#else
45127 xen_store_interface);
45128 +#endif
45129 +
45130 if (!file->private_data)
45131 return -ENOMEM;
45132 return 0;
45133 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
45134 index 890bed5..17ae73e 100644
45135 --- a/fs/9p/vfs_inode.c
45136 +++ b/fs/9p/vfs_inode.c
45137 @@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
45138 void
45139 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45140 {
45141 - char *s = nd_get_link(nd);
45142 + const char *s = nd_get_link(nd);
45143
45144 p9_debug(P9_DEBUG_VFS, " %s %s\n",
45145 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
45146 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
45147 index 0efd152..b5802ad 100644
45148 --- a/fs/Kconfig.binfmt
45149 +++ b/fs/Kconfig.binfmt
45150 @@ -89,7 +89,7 @@ config HAVE_AOUT
45151
45152 config BINFMT_AOUT
45153 tristate "Kernel support for a.out and ECOFF binaries"
45154 - depends on HAVE_AOUT
45155 + depends on HAVE_AOUT && BROKEN
45156 ---help---
45157 A.out (Assembler.OUTput) is a set of formats for libraries and
45158 executables used in the earliest versions of UNIX. Linux used
45159 diff --git a/fs/aio.c b/fs/aio.c
45160 index 71f613c..9d01f1f 100644
45161 --- a/fs/aio.c
45162 +++ b/fs/aio.c
45163 @@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
45164 size += sizeof(struct io_event) * nr_events;
45165 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
45166
45167 - if (nr_pages < 0)
45168 + if (nr_pages <= 0)
45169 return -EINVAL;
45170
45171 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
45172 @@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
45173 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
45174 {
45175 ssize_t ret;
45176 + struct iovec iovstack;
45177
45178 #ifdef CONFIG_COMPAT
45179 if (compat)
45180 ret = compat_rw_copy_check_uvector(type,
45181 (struct compat_iovec __user *)kiocb->ki_buf,
45182 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
45183 + kiocb->ki_nbytes, 1, &iovstack,
45184 &kiocb->ki_iovec);
45185 else
45186 #endif
45187 ret = rw_copy_check_uvector(type,
45188 (struct iovec __user *)kiocb->ki_buf,
45189 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
45190 + kiocb->ki_nbytes, 1, &iovstack,
45191 &kiocb->ki_iovec);
45192 if (ret < 0)
45193 goto out;
45194 @@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
45195 if (ret < 0)
45196 goto out;
45197
45198 + if (kiocb->ki_iovec == &iovstack) {
45199 + kiocb->ki_inline_vec = iovstack;
45200 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
45201 + }
45202 kiocb->ki_nr_segs = kiocb->ki_nbytes;
45203 kiocb->ki_cur_seg = 0;
45204 /* ki_nbytes/left now reflect bytes instead of segs */
45205 diff --git a/fs/attr.c b/fs/attr.c
45206 index 1449adb..a2038c2 100644
45207 --- a/fs/attr.c
45208 +++ b/fs/attr.c
45209 @@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
45210 unsigned long limit;
45211
45212 limit = rlimit(RLIMIT_FSIZE);
45213 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
45214 if (limit != RLIM_INFINITY && offset > limit)
45215 goto out_sig;
45216 if (offset > inode->i_sb->s_maxbytes)
45217 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
45218 index 03bc1d3..6205356 100644
45219 --- a/fs/autofs4/waitq.c
45220 +++ b/fs/autofs4/waitq.c
45221 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
45222 {
45223 unsigned long sigpipe, flags;
45224 mm_segment_t fs;
45225 - const char *data = (const char *)addr;
45226 + const char __user *data = (const char __force_user *)addr;
45227 ssize_t wr = 0;
45228
45229 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
45230 @@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
45231 return 1;
45232 }
45233
45234 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45235 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
45236 +#endif
45237 +
45238 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
45239 enum autofs_notify notify)
45240 {
45241 @@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
45242
45243 /* If this is a direct mount request create a dummy name */
45244 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
45245 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45246 + /* this name does get written to userland via autofs4_write() */
45247 + qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
45248 +#else
45249 qstr.len = sprintf(name, "%p", dentry);
45250 +#endif
45251 else {
45252 qstr.len = autofs4_getpath(sbi, dentry, &name);
45253 if (!qstr.len) {
45254 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
45255 index 2b3bda8..6a2d4be 100644
45256 --- a/fs/befs/linuxvfs.c
45257 +++ b/fs/befs/linuxvfs.c
45258 @@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45259 {
45260 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
45261 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
45262 - char *link = nd_get_link(nd);
45263 + const char *link = nd_get_link(nd);
45264 if (!IS_ERR(link))
45265 kfree(link);
45266 }
45267 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
45268 index 6043567..16a9239 100644
45269 --- a/fs/binfmt_aout.c
45270 +++ b/fs/binfmt_aout.c
45271 @@ -16,6 +16,7 @@
45272 #include <linux/string.h>
45273 #include <linux/fs.h>
45274 #include <linux/file.h>
45275 +#include <linux/security.h>
45276 #include <linux/stat.h>
45277 #include <linux/fcntl.h>
45278 #include <linux/ptrace.h>
45279 @@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
45280 #endif
45281 # define START_STACK(u) ((void __user *)u.start_stack)
45282
45283 + memset(&dump, 0, sizeof(dump));
45284 +
45285 fs = get_fs();
45286 set_fs(KERNEL_DS);
45287 has_dumped = 1;
45288 @@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
45289
45290 /* If the size of the dump file exceeds the rlimit, then see what would happen
45291 if we wrote the stack, but not the data area. */
45292 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
45293 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
45294 dump.u_dsize = 0;
45295
45296 /* Make sure we have enough room to write the stack and data areas. */
45297 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
45298 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
45299 dump.u_ssize = 0;
45300
45301 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
45302 rlim = rlimit(RLIMIT_DATA);
45303 if (rlim >= RLIM_INFINITY)
45304 rlim = ~0;
45305 +
45306 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
45307 if (ex.a_data + ex.a_bss > rlim)
45308 return -ENOMEM;
45309
45310 @@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
45311
45312 install_exec_creds(bprm);
45313
45314 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45315 + current->mm->pax_flags = 0UL;
45316 +#endif
45317 +
45318 +#ifdef CONFIG_PAX_PAGEEXEC
45319 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
45320 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
45321 +
45322 +#ifdef CONFIG_PAX_EMUTRAMP
45323 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
45324 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
45325 +#endif
45326 +
45327 +#ifdef CONFIG_PAX_MPROTECT
45328 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
45329 + current->mm->pax_flags |= MF_PAX_MPROTECT;
45330 +#endif
45331 +
45332 + }
45333 +#endif
45334 +
45335 if (N_MAGIC(ex) == OMAGIC) {
45336 unsigned long text_addr, map_size;
45337 loff_t pos;
45338 @@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
45339 }
45340
45341 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
45342 - PROT_READ | PROT_WRITE | PROT_EXEC,
45343 + PROT_READ | PROT_WRITE,
45344 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
45345 fd_offset + ex.a_text);
45346 if (error != N_DATADDR(ex)) {
45347 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
45348 index 0c42cdb..f4be023 100644
45349 --- a/fs/binfmt_elf.c
45350 +++ b/fs/binfmt_elf.c
45351 @@ -33,6 +33,7 @@
45352 #include <linux/elf.h>
45353 #include <linux/utsname.h>
45354 #include <linux/coredump.h>
45355 +#include <linux/xattr.h>
45356 #include <asm/uaccess.h>
45357 #include <asm/param.h>
45358 #include <asm/page.h>
45359 @@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
45360 #define elf_core_dump NULL
45361 #endif
45362
45363 +#ifdef CONFIG_PAX_MPROTECT
45364 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
45365 +#endif
45366 +
45367 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
45368 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
45369 #else
45370 @@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
45371 .load_binary = load_elf_binary,
45372 .load_shlib = load_elf_library,
45373 .core_dump = elf_core_dump,
45374 +
45375 +#ifdef CONFIG_PAX_MPROTECT
45376 + .handle_mprotect= elf_handle_mprotect,
45377 +#endif
45378 +
45379 .min_coredump = ELF_EXEC_PAGESIZE,
45380 };
45381
45382 @@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
45383
45384 static int set_brk(unsigned long start, unsigned long end)
45385 {
45386 + unsigned long e = end;
45387 +
45388 start = ELF_PAGEALIGN(start);
45389 end = ELF_PAGEALIGN(end);
45390 if (end > start) {
45391 @@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
45392 if (BAD_ADDR(addr))
45393 return addr;
45394 }
45395 - current->mm->start_brk = current->mm->brk = end;
45396 + current->mm->start_brk = current->mm->brk = e;
45397 return 0;
45398 }
45399
45400 @@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
45401 elf_addr_t __user *u_rand_bytes;
45402 const char *k_platform = ELF_PLATFORM;
45403 const char *k_base_platform = ELF_BASE_PLATFORM;
45404 - unsigned char k_rand_bytes[16];
45405 + u32 k_rand_bytes[4];
45406 int items;
45407 elf_addr_t *elf_info;
45408 int ei_index = 0;
45409 const struct cred *cred = current_cred();
45410 struct vm_area_struct *vma;
45411 + unsigned long saved_auxv[AT_VECTOR_SIZE];
45412
45413 /*
45414 * In some cases (e.g. Hyper-Threading), we want to avoid L1
45415 @@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
45416 * Generate 16 random bytes for userspace PRNG seeding.
45417 */
45418 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
45419 - u_rand_bytes = (elf_addr_t __user *)
45420 - STACK_ALLOC(p, sizeof(k_rand_bytes));
45421 + srandom32(k_rand_bytes[0] ^ random32());
45422 + srandom32(k_rand_bytes[1] ^ random32());
45423 + srandom32(k_rand_bytes[2] ^ random32());
45424 + srandom32(k_rand_bytes[3] ^ random32());
45425 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
45426 + u_rand_bytes = (elf_addr_t __user *) p;
45427 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
45428 return -EFAULT;
45429
45430 @@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
45431 return -EFAULT;
45432 current->mm->env_end = p;
45433
45434 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
45435 +
45436 /* Put the elf_info on the stack in the right place. */
45437 sp = (elf_addr_t __user *)envp + 1;
45438 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
45439 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
45440 return -EFAULT;
45441 return 0;
45442 }
45443 @@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
45444 an ELF header */
45445
45446 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
45447 - struct file *interpreter, unsigned long *interp_map_addr,
45448 - unsigned long no_base)
45449 + struct file *interpreter, unsigned long no_base)
45450 {
45451 struct elf_phdr *elf_phdata;
45452 struct elf_phdr *eppnt;
45453 - unsigned long load_addr = 0;
45454 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
45455 int load_addr_set = 0;
45456 unsigned long last_bss = 0, elf_bss = 0;
45457 - unsigned long error = ~0UL;
45458 + unsigned long error = -EINVAL;
45459 unsigned long total_size;
45460 int retval, i, size;
45461
45462 @@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
45463 goto out_close;
45464 }
45465
45466 +#ifdef CONFIG_PAX_SEGMEXEC
45467 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
45468 + pax_task_size = SEGMEXEC_TASK_SIZE;
45469 +#endif
45470 +
45471 eppnt = elf_phdata;
45472 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
45473 if (eppnt->p_type == PT_LOAD) {
45474 @@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
45475 map_addr = elf_map(interpreter, load_addr + vaddr,
45476 eppnt, elf_prot, elf_type, total_size);
45477 total_size = 0;
45478 - if (!*interp_map_addr)
45479 - *interp_map_addr = map_addr;
45480 error = map_addr;
45481 if (BAD_ADDR(map_addr))
45482 goto out_close;
45483 @@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
45484 k = load_addr + eppnt->p_vaddr;
45485 if (BAD_ADDR(k) ||
45486 eppnt->p_filesz > eppnt->p_memsz ||
45487 - eppnt->p_memsz > TASK_SIZE ||
45488 - TASK_SIZE - eppnt->p_memsz < k) {
45489 + eppnt->p_memsz > pax_task_size ||
45490 + pax_task_size - eppnt->p_memsz < k) {
45491 error = -ENOMEM;
45492 goto out_close;
45493 }
45494 @@ -530,6 +551,315 @@ out:
45495 return error;
45496 }
45497
45498 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
45499 +#ifdef CONFIG_PAX_SOFTMODE
45500 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
45501 +{
45502 + unsigned long pax_flags = 0UL;
45503 +
45504 +#ifdef CONFIG_PAX_PAGEEXEC
45505 + if (elf_phdata->p_flags & PF_PAGEEXEC)
45506 + pax_flags |= MF_PAX_PAGEEXEC;
45507 +#endif
45508 +
45509 +#ifdef CONFIG_PAX_SEGMEXEC
45510 + if (elf_phdata->p_flags & PF_SEGMEXEC)
45511 + pax_flags |= MF_PAX_SEGMEXEC;
45512 +#endif
45513 +
45514 +#ifdef CONFIG_PAX_EMUTRAMP
45515 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
45516 + pax_flags |= MF_PAX_EMUTRAMP;
45517 +#endif
45518 +
45519 +#ifdef CONFIG_PAX_MPROTECT
45520 + if (elf_phdata->p_flags & PF_MPROTECT)
45521 + pax_flags |= MF_PAX_MPROTECT;
45522 +#endif
45523 +
45524 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
45525 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
45526 + pax_flags |= MF_PAX_RANDMMAP;
45527 +#endif
45528 +
45529 + return pax_flags;
45530 +}
45531 +#endif
45532 +
45533 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
45534 +{
45535 + unsigned long pax_flags = 0UL;
45536 +
45537 +#ifdef CONFIG_PAX_PAGEEXEC
45538 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
45539 + pax_flags |= MF_PAX_PAGEEXEC;
45540 +#endif
45541 +
45542 +#ifdef CONFIG_PAX_SEGMEXEC
45543 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
45544 + pax_flags |= MF_PAX_SEGMEXEC;
45545 +#endif
45546 +
45547 +#ifdef CONFIG_PAX_EMUTRAMP
45548 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
45549 + pax_flags |= MF_PAX_EMUTRAMP;
45550 +#endif
45551 +
45552 +#ifdef CONFIG_PAX_MPROTECT
45553 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
45554 + pax_flags |= MF_PAX_MPROTECT;
45555 +#endif
45556 +
45557 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
45558 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
45559 + pax_flags |= MF_PAX_RANDMMAP;
45560 +#endif
45561 +
45562 + return pax_flags;
45563 +}
45564 +#endif
45565 +
45566 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
45567 +#ifdef CONFIG_PAX_SOFTMODE
45568 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
45569 +{
45570 + unsigned long pax_flags = 0UL;
45571 +
45572 +#ifdef CONFIG_PAX_PAGEEXEC
45573 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
45574 + pax_flags |= MF_PAX_PAGEEXEC;
45575 +#endif
45576 +
45577 +#ifdef CONFIG_PAX_SEGMEXEC
45578 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
45579 + pax_flags |= MF_PAX_SEGMEXEC;
45580 +#endif
45581 +
45582 +#ifdef CONFIG_PAX_EMUTRAMP
45583 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
45584 + pax_flags |= MF_PAX_EMUTRAMP;
45585 +#endif
45586 +
45587 +#ifdef CONFIG_PAX_MPROTECT
45588 + if (pax_flags_softmode & MF_PAX_MPROTECT)
45589 + pax_flags |= MF_PAX_MPROTECT;
45590 +#endif
45591 +
45592 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
45593 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
45594 + pax_flags |= MF_PAX_RANDMMAP;
45595 +#endif
45596 +
45597 + return pax_flags;
45598 +}
45599 +#endif
45600 +
45601 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
45602 +{
45603 + unsigned long pax_flags = 0UL;
45604 +
45605 +#ifdef CONFIG_PAX_PAGEEXEC
45606 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
45607 + pax_flags |= MF_PAX_PAGEEXEC;
45608 +#endif
45609 +
45610 +#ifdef CONFIG_PAX_SEGMEXEC
45611 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
45612 + pax_flags |= MF_PAX_SEGMEXEC;
45613 +#endif
45614 +
45615 +#ifdef CONFIG_PAX_EMUTRAMP
45616 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
45617 + pax_flags |= MF_PAX_EMUTRAMP;
45618 +#endif
45619 +
45620 +#ifdef CONFIG_PAX_MPROTECT
45621 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
45622 + pax_flags |= MF_PAX_MPROTECT;
45623 +#endif
45624 +
45625 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
45626 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
45627 + pax_flags |= MF_PAX_RANDMMAP;
45628 +#endif
45629 +
45630 + return pax_flags;
45631 +}
45632 +#endif
45633 +
45634 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45635 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
45636 +{
45637 + unsigned long pax_flags = 0UL;
45638 +
45639 +#ifdef CONFIG_PAX_EI_PAX
45640 +
45641 +#ifdef CONFIG_PAX_PAGEEXEC
45642 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
45643 + pax_flags |= MF_PAX_PAGEEXEC;
45644 +#endif
45645 +
45646 +#ifdef CONFIG_PAX_SEGMEXEC
45647 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
45648 + pax_flags |= MF_PAX_SEGMEXEC;
45649 +#endif
45650 +
45651 +#ifdef CONFIG_PAX_EMUTRAMP
45652 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
45653 + pax_flags |= MF_PAX_EMUTRAMP;
45654 +#endif
45655 +
45656 +#ifdef CONFIG_PAX_MPROTECT
45657 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
45658 + pax_flags |= MF_PAX_MPROTECT;
45659 +#endif
45660 +
45661 +#ifdef CONFIG_PAX_ASLR
45662 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
45663 + pax_flags |= MF_PAX_RANDMMAP;
45664 +#endif
45665 +
45666 +#else
45667 +
45668 +#ifdef CONFIG_PAX_PAGEEXEC
45669 + pax_flags |= MF_PAX_PAGEEXEC;
45670 +#endif
45671 +
45672 +#ifdef CONFIG_PAX_SEGMEXEC
45673 + pax_flags |= MF_PAX_SEGMEXEC;
45674 +#endif
45675 +
45676 +#ifdef CONFIG_PAX_MPROTECT
45677 + pax_flags |= MF_PAX_MPROTECT;
45678 +#endif
45679 +
45680 +#ifdef CONFIG_PAX_RANDMMAP
45681 + if (randomize_va_space)
45682 + pax_flags |= MF_PAX_RANDMMAP;
45683 +#endif
45684 +
45685 +#endif
45686 +
45687 + return pax_flags;
45688 +}
45689 +
45690 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
45691 +{
45692 +
45693 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
45694 + unsigned long i;
45695 +
45696 + for (i = 0UL; i < elf_ex->e_phnum; i++)
45697 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
45698 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
45699 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
45700 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
45701 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
45702 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
45703 + return ~0UL;
45704 +
45705 +#ifdef CONFIG_PAX_SOFTMODE
45706 + if (pax_softmode)
45707 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
45708 + else
45709 +#endif
45710 +
45711 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
45712 + break;
45713 + }
45714 +#endif
45715 +
45716 + return ~0UL;
45717 +}
45718 +
45719 +static unsigned long pax_parse_xattr_pax(struct file * const file)
45720 +{
45721 +
45722 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
45723 + ssize_t xattr_size, i;
45724 + unsigned char xattr_value[5];
45725 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
45726 +
45727 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
45728 + if (xattr_size <= 0 || xattr_size > 5)
45729 + return ~0UL;
45730 +
45731 + for (i = 0; i < xattr_size; i++)
45732 + switch (xattr_value[i]) {
45733 + default:
45734 + return ~0UL;
45735 +
45736 +#define parse_flag(option1, option2, flag) \
45737 + case option1: \
45738 + if (pax_flags_hardmode & MF_PAX_##flag) \
45739 + return ~0UL; \
45740 + pax_flags_hardmode |= MF_PAX_##flag; \
45741 + break; \
45742 + case option2: \
45743 + if (pax_flags_softmode & MF_PAX_##flag) \
45744 + return ~0UL; \
45745 + pax_flags_softmode |= MF_PAX_##flag; \
45746 + break;
45747 +
45748 + parse_flag('p', 'P', PAGEEXEC);
45749 + parse_flag('e', 'E', EMUTRAMP);
45750 + parse_flag('m', 'M', MPROTECT);
45751 + parse_flag('r', 'R', RANDMMAP);
45752 + parse_flag('s', 'S', SEGMEXEC);
45753 +
45754 +#undef parse_flag
45755 + }
45756 +
45757 + if (pax_flags_hardmode & pax_flags_softmode)
45758 + return ~0UL;
45759 +
45760 +#ifdef CONFIG_PAX_SOFTMODE
45761 + if (pax_softmode)
45762 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
45763 + else
45764 +#endif
45765 +
45766 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
45767 +#else
45768 + return ~0UL;
45769 +#endif
45770 +
45771 +}
45772 +
45773 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
45774 +{
45775 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
45776 +
45777 + pax_flags = pax_parse_ei_pax(elf_ex);
45778 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
45779 + xattr_pax_flags = pax_parse_xattr_pax(file);
45780 +
45781 + if (pt_pax_flags == ~0UL)
45782 + pt_pax_flags = xattr_pax_flags;
45783 + else if (xattr_pax_flags == ~0UL)
45784 + xattr_pax_flags = pt_pax_flags;
45785 + if (pt_pax_flags != xattr_pax_flags)
45786 + return -EINVAL;
45787 + if (pt_pax_flags != ~0UL)
45788 + pax_flags = pt_pax_flags;
45789 +
45790 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
45791 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
45792 + if ((__supported_pte_mask & _PAGE_NX))
45793 + pax_flags &= ~MF_PAX_SEGMEXEC;
45794 + else
45795 + pax_flags &= ~MF_PAX_PAGEEXEC;
45796 + }
45797 +#endif
45798 +
45799 + if (0 > pax_check_flags(&pax_flags))
45800 + return -EINVAL;
45801 +
45802 + current->mm->pax_flags = pax_flags;
45803 + return 0;
45804 +}
45805 +#endif
45806 +
45807 /*
45808 * These are the functions used to load ELF style executables and shared
45809 * libraries. There is no binary dependent code anywhere else.
45810 @@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
45811 {
45812 unsigned int random_variable = 0;
45813
45814 +#ifdef CONFIG_PAX_RANDUSTACK
45815 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
45816 + return stack_top - current->mm->delta_stack;
45817 +#endif
45818 +
45819 if ((current->flags & PF_RANDOMIZE) &&
45820 !(current->personality & ADDR_NO_RANDOMIZE)) {
45821 random_variable = get_random_int() & STACK_RND_MASK;
45822 @@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
45823 unsigned long load_addr = 0, load_bias = 0;
45824 int load_addr_set = 0;
45825 char * elf_interpreter = NULL;
45826 - unsigned long error;
45827 + unsigned long error = 0;
45828 struct elf_phdr *elf_ppnt, *elf_phdata;
45829 unsigned long elf_bss, elf_brk;
45830 int retval, i;
45831 @@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
45832 unsigned long start_code, end_code, start_data, end_data;
45833 unsigned long reloc_func_desc __maybe_unused = 0;
45834 int executable_stack = EXSTACK_DEFAULT;
45835 - unsigned long def_flags = 0;
45836 struct pt_regs *regs = current_pt_regs();
45837 struct {
45838 struct elfhdr elf_ex;
45839 struct elfhdr interp_elf_ex;
45840 } *loc;
45841 + unsigned long pax_task_size = TASK_SIZE;
45842
45843 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
45844 if (!loc) {
45845 @@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
45846 goto out_free_dentry;
45847
45848 /* OK, This is the point of no return */
45849 - current->mm->def_flags = def_flags;
45850 +
45851 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45852 + current->mm->pax_flags = 0UL;
45853 +#endif
45854 +
45855 +#ifdef CONFIG_PAX_DLRESOLVE
45856 + current->mm->call_dl_resolve = 0UL;
45857 +#endif
45858 +
45859 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
45860 + current->mm->call_syscall = 0UL;
45861 +#endif
45862 +
45863 +#ifdef CONFIG_PAX_ASLR
45864 + current->mm->delta_mmap = 0UL;
45865 + current->mm->delta_stack = 0UL;
45866 +#endif
45867 +
45868 + current->mm->def_flags = 0;
45869 +
45870 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45871 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
45872 + send_sig(SIGKILL, current, 0);
45873 + goto out_free_dentry;
45874 + }
45875 +#endif
45876 +
45877 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
45878 + pax_set_initial_flags(bprm);
45879 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
45880 + if (pax_set_initial_flags_func)
45881 + (pax_set_initial_flags_func)(bprm);
45882 +#endif
45883 +
45884 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45885 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
45886 + current->mm->context.user_cs_limit = PAGE_SIZE;
45887 + current->mm->def_flags |= VM_PAGEEXEC;
45888 + }
45889 +#endif
45890 +
45891 +#ifdef CONFIG_PAX_SEGMEXEC
45892 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
45893 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
45894 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
45895 + pax_task_size = SEGMEXEC_TASK_SIZE;
45896 + current->mm->def_flags |= VM_NOHUGEPAGE;
45897 + }
45898 +#endif
45899 +
45900 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
45901 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
45902 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
45903 + put_cpu();
45904 + }
45905 +#endif
45906
45907 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
45908 may depend on the personality. */
45909 SET_PERSONALITY(loc->elf_ex);
45910 +
45911 +#ifdef CONFIG_PAX_ASLR
45912 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
45913 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
45914 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
45915 + }
45916 +#endif
45917 +
45918 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
45919 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
45920 + executable_stack = EXSTACK_DISABLE_X;
45921 + current->personality &= ~READ_IMPLIES_EXEC;
45922 + } else
45923 +#endif
45924 +
45925 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
45926 current->personality |= READ_IMPLIES_EXEC;
45927
45928 @@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
45929 #else
45930 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
45931 #endif
45932 +
45933 +#ifdef CONFIG_PAX_RANDMMAP
45934 + /* PaX: randomize base address at the default exe base if requested */
45935 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
45936 +#ifdef CONFIG_SPARC64
45937 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
45938 +#else
45939 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
45940 +#endif
45941 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
45942 + elf_flags |= MAP_FIXED;
45943 + }
45944 +#endif
45945 +
45946 }
45947
45948 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
45949 @@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
45950 * allowed task size. Note that p_filesz must always be
45951 * <= p_memsz so it is only necessary to check p_memsz.
45952 */
45953 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
45954 - elf_ppnt->p_memsz > TASK_SIZE ||
45955 - TASK_SIZE - elf_ppnt->p_memsz < k) {
45956 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
45957 + elf_ppnt->p_memsz > pax_task_size ||
45958 + pax_task_size - elf_ppnt->p_memsz < k) {
45959 /* set_brk can never work. Avoid overflows. */
45960 send_sig(SIGKILL, current, 0);
45961 retval = -EINVAL;
45962 @@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
45963 goto out_free_dentry;
45964 }
45965 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
45966 - send_sig(SIGSEGV, current, 0);
45967 - retval = -EFAULT; /* Nobody gets to see this, but.. */
45968 - goto out_free_dentry;
45969 + /*
45970 + * This bss-zeroing can fail if the ELF
45971 + * file specifies odd protections. So
45972 + * we don't check the return value
45973 + */
45974 }
45975
45976 +#ifdef CONFIG_PAX_RANDMMAP
45977 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
45978 + unsigned long start, size;
45979 +
45980 + start = ELF_PAGEALIGN(elf_brk);
45981 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
45982 + down_read(&current->mm->mmap_sem);
45983 + retval = -ENOMEM;
45984 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
45985 + unsigned long prot = PROT_NONE;
45986 +
45987 + up_read(&current->mm->mmap_sem);
45988 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
45989 +// if (current->personality & ADDR_NO_RANDOMIZE)
45990 +// prot = PROT_READ;
45991 + start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
45992 + retval = IS_ERR_VALUE(start) ? start : 0;
45993 + } else
45994 + up_read(&current->mm->mmap_sem);
45995 + if (retval == 0)
45996 + retval = set_brk(start + size, start + size + PAGE_SIZE);
45997 + if (retval < 0) {
45998 + send_sig(SIGKILL, current, 0);
45999 + goto out_free_dentry;
46000 + }
46001 + }
46002 +#endif
46003 +
46004 if (elf_interpreter) {
46005 - unsigned long interp_map_addr = 0;
46006 -
46007 elf_entry = load_elf_interp(&loc->interp_elf_ex,
46008 interpreter,
46009 - &interp_map_addr,
46010 load_bias);
46011 if (!IS_ERR((void *)elf_entry)) {
46012 /*
46013 @@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
46014 * Decide what to dump of a segment, part, all or none.
46015 */
46016 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46017 - unsigned long mm_flags)
46018 + unsigned long mm_flags, long signr)
46019 {
46020 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46021
46022 @@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46023 if (vma->vm_file == NULL)
46024 return 0;
46025
46026 - if (FILTER(MAPPED_PRIVATE))
46027 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46028 goto whole;
46029
46030 /*
46031 @@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46032 {
46033 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
46034 int i = 0;
46035 - do
46036 + do {
46037 i += 2;
46038 - while (auxv[i - 2] != AT_NULL);
46039 + } while (auxv[i - 2] != AT_NULL);
46040 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
46041 }
46042
46043 @@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
46044 }
46045
46046 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
46047 - unsigned long mm_flags)
46048 + struct coredump_params *cprm)
46049 {
46050 struct vm_area_struct *vma;
46051 size_t size = 0;
46052
46053 for (vma = first_vma(current, gate_vma); vma != NULL;
46054 vma = next_vma(vma, gate_vma))
46055 - size += vma_dump_size(vma, mm_flags);
46056 + size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
46057 return size;
46058 }
46059
46060 @@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
46061
46062 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
46063
46064 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
46065 + offset += elf_core_vma_data_size(gate_vma, cprm);
46066 offset += elf_core_extra_data_size();
46067 e_shoff = offset;
46068
46069 @@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
46070 offset = dataoff;
46071
46072 size += sizeof(*elf);
46073 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
46074 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
46075 goto end_coredump;
46076
46077 size += sizeof(*phdr4note);
46078 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
46079 if (size > cprm->limit
46080 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
46081 goto end_coredump;
46082 @@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
46083 phdr.p_offset = offset;
46084 phdr.p_vaddr = vma->vm_start;
46085 phdr.p_paddr = 0;
46086 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
46087 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
46088 phdr.p_memsz = vma->vm_end - vma->vm_start;
46089 offset += phdr.p_filesz;
46090 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
46091 @@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
46092 phdr.p_align = ELF_EXEC_PAGESIZE;
46093
46094 size += sizeof(phdr);
46095 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
46096 if (size > cprm->limit
46097 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
46098 goto end_coredump;
46099 @@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
46100 unsigned long addr;
46101 unsigned long end;
46102
46103 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
46104 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
46105
46106 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
46107 struct page *page;
46108 @@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
46109 page = get_dump_page(addr);
46110 if (page) {
46111 void *kaddr = kmap(page);
46112 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
46113 stop = ((size += PAGE_SIZE) > cprm->limit) ||
46114 !dump_write(cprm->file, kaddr,
46115 PAGE_SIZE);
46116 @@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
46117
46118 if (e_phnum == PN_XNUM) {
46119 size += sizeof(*shdr4extnum);
46120 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
46121 if (size > cprm->limit
46122 || !dump_write(cprm->file, shdr4extnum,
46123 sizeof(*shdr4extnum)))
46124 @@ -2219,6 +2670,97 @@ out:
46125
46126 #endif /* CONFIG_ELF_CORE */
46127
46128 +#ifdef CONFIG_PAX_MPROTECT
46129 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
46130 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
46131 + * we'll remove VM_MAYWRITE for good on RELRO segments.
46132 + *
46133 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
46134 + * basis because we want to allow the common case and not the special ones.
46135 + */
46136 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
46137 +{
46138 + struct elfhdr elf_h;
46139 + struct elf_phdr elf_p;
46140 + unsigned long i;
46141 + unsigned long oldflags;
46142 + bool is_textrel_rw, is_textrel_rx, is_relro;
46143 +
46144 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
46145 + return;
46146 +
46147 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
46148 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
46149 +
46150 +#ifdef CONFIG_PAX_ELFRELOCS
46151 + /* possible TEXTREL */
46152 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
46153 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
46154 +#else
46155 + is_textrel_rw = false;
46156 + is_textrel_rx = false;
46157 +#endif
46158 +
46159 + /* possible RELRO */
46160 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
46161 +
46162 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
46163 + return;
46164 +
46165 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
46166 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
46167 +
46168 +#ifdef CONFIG_PAX_ETEXECRELOCS
46169 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46170 +#else
46171 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
46172 +#endif
46173 +
46174 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46175 + !elf_check_arch(&elf_h) ||
46176 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
46177 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
46178 + return;
46179 +
46180 + for (i = 0UL; i < elf_h.e_phnum; i++) {
46181 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
46182 + return;
46183 + switch (elf_p.p_type) {
46184 + case PT_DYNAMIC:
46185 + if (!is_textrel_rw && !is_textrel_rx)
46186 + continue;
46187 + i = 0UL;
46188 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
46189 + elf_dyn dyn;
46190 +
46191 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
46192 + return;
46193 + if (dyn.d_tag == DT_NULL)
46194 + return;
46195 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
46196 + gr_log_textrel(vma);
46197 + if (is_textrel_rw)
46198 + vma->vm_flags |= VM_MAYWRITE;
46199 + else
46200 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
46201 + vma->vm_flags &= ~VM_MAYWRITE;
46202 + return;
46203 + }
46204 + i++;
46205 + }
46206 + return;
46207 +
46208 + case PT_GNU_RELRO:
46209 + if (!is_relro)
46210 + continue;
46211 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
46212 + vma->vm_flags &= ~VM_MAYWRITE;
46213 + return;
46214 + }
46215 + }
46216 +}
46217 +#endif
46218 +
46219 static int __init init_elf_binfmt(void)
46220 {
46221 register_binfmt(&elf_format);
46222 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
46223 index b563719..3868998 100644
46224 --- a/fs/binfmt_flat.c
46225 +++ b/fs/binfmt_flat.c
46226 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
46227 realdatastart = (unsigned long) -ENOMEM;
46228 printk("Unable to allocate RAM for process data, errno %d\n",
46229 (int)-realdatastart);
46230 + down_write(&current->mm->mmap_sem);
46231 vm_munmap(textpos, text_len);
46232 + up_write(&current->mm->mmap_sem);
46233 ret = realdatastart;
46234 goto err;
46235 }
46236 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46237 }
46238 if (IS_ERR_VALUE(result)) {
46239 printk("Unable to read data+bss, errno %d\n", (int)-result);
46240 + down_write(&current->mm->mmap_sem);
46241 vm_munmap(textpos, text_len);
46242 vm_munmap(realdatastart, len);
46243 + up_write(&current->mm->mmap_sem);
46244 ret = result;
46245 goto err;
46246 }
46247 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46248 }
46249 if (IS_ERR_VALUE(result)) {
46250 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
46251 + down_write(&current->mm->mmap_sem);
46252 vm_munmap(textpos, text_len + data_len + extra +
46253 MAX_SHARED_LIBS * sizeof(unsigned long));
46254 + up_write(&current->mm->mmap_sem);
46255 ret = result;
46256 goto err;
46257 }
46258 diff --git a/fs/bio.c b/fs/bio.c
46259 index b96fc6c..431d628 100644
46260 --- a/fs/bio.c
46261 +++ b/fs/bio.c
46262 @@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
46263 /*
46264 * Overflow, abort
46265 */
46266 - if (end < start)
46267 + if (end < start || end - start > INT_MAX - nr_pages)
46268 return ERR_PTR(-EINVAL);
46269
46270 nr_pages += end - start;
46271 @@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
46272 /*
46273 * Overflow, abort
46274 */
46275 - if (end < start)
46276 + if (end < start || end - start > INT_MAX - nr_pages)
46277 return ERR_PTR(-EINVAL);
46278
46279 nr_pages += end - start;
46280 @@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
46281 const int read = bio_data_dir(bio) == READ;
46282 struct bio_map_data *bmd = bio->bi_private;
46283 int i;
46284 - char *p = bmd->sgvecs[0].iov_base;
46285 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
46286
46287 __bio_for_each_segment(bvec, bio, i, 0) {
46288 char *addr = page_address(bvec->bv_page);
46289 diff --git a/fs/block_dev.c b/fs/block_dev.c
46290 index 78333a3..23dcb4d 100644
46291 --- a/fs/block_dev.c
46292 +++ b/fs/block_dev.c
46293 @@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
46294 else if (bdev->bd_contains == bdev)
46295 return true; /* is a whole device which isn't held */
46296
46297 - else if (whole->bd_holder == bd_may_claim)
46298 + else if (whole->bd_holder == (void *)bd_may_claim)
46299 return true; /* is a partition of a device that is being partitioned */
46300 else if (whole->bd_holder != NULL)
46301 return false; /* is a partition of a held device */
46302 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
46303 index eea5da7..88fead70 100644
46304 --- a/fs/btrfs/ctree.c
46305 +++ b/fs/btrfs/ctree.c
46306 @@ -1033,9 +1033,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
46307 free_extent_buffer(buf);
46308 add_root_to_dirty_list(root);
46309 } else {
46310 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
46311 - parent_start = parent->start;
46312 - else
46313 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
46314 + if (parent)
46315 + parent_start = parent->start;
46316 + else
46317 + parent_start = 0;
46318 + } else
46319 parent_start = 0;
46320
46321 WARN_ON(trans->transid != btrfs_header_generation(parent));
46322 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
46323 index cc93b23..f3c42bf 100644
46324 --- a/fs/btrfs/inode.c
46325 +++ b/fs/btrfs/inode.c
46326 @@ -7296,7 +7296,7 @@ fail:
46327 return -ENOMEM;
46328 }
46329
46330 -static int btrfs_getattr(struct vfsmount *mnt,
46331 +int btrfs_getattr(struct vfsmount *mnt,
46332 struct dentry *dentry, struct kstat *stat)
46333 {
46334 struct inode *inode = dentry->d_inode;
46335 @@ -7310,6 +7310,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
46336 return 0;
46337 }
46338
46339 +EXPORT_SYMBOL(btrfs_getattr);
46340 +
46341 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
46342 +{
46343 + return BTRFS_I(inode)->root->anon_dev;
46344 +}
46345 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
46346 +
46347 /*
46348 * If a file is moved, it will inherit the cow and compression flags of the new
46349 * directory.
46350 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
46351 index 338f259..b657640 100644
46352 --- a/fs/btrfs/ioctl.c
46353 +++ b/fs/btrfs/ioctl.c
46354 @@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
46355 for (i = 0; i < num_types; i++) {
46356 struct btrfs_space_info *tmp;
46357
46358 + /* Don't copy in more than we allocated */
46359 if (!slot_count)
46360 break;
46361
46362 + slot_count--;
46363 +
46364 info = NULL;
46365 rcu_read_lock();
46366 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
46367 @@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
46368 memcpy(dest, &space, sizeof(space));
46369 dest++;
46370 space_args.total_spaces++;
46371 - slot_count--;
46372 }
46373 - if (!slot_count)
46374 - break;
46375 }
46376 up_read(&info->groups_sem);
46377 }
46378 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
46379 index 300e09a..9fe4539 100644
46380 --- a/fs/btrfs/relocation.c
46381 +++ b/fs/btrfs/relocation.c
46382 @@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
46383 }
46384 spin_unlock(&rc->reloc_root_tree.lock);
46385
46386 - BUG_ON((struct btrfs_root *)node->data != root);
46387 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
46388
46389 if (!del) {
46390 spin_lock(&rc->reloc_root_tree.lock);
46391 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
46392 index d8982e9..29a85fa 100644
46393 --- a/fs/btrfs/super.c
46394 +++ b/fs/btrfs/super.c
46395 @@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
46396 function, line, errstr);
46397 return;
46398 }
46399 - ACCESS_ONCE(trans->transaction->aborted) = errno;
46400 + ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
46401 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
46402 }
46403 /*
46404 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
46405 index 622f469..e8d2d55 100644
46406 --- a/fs/cachefiles/bind.c
46407 +++ b/fs/cachefiles/bind.c
46408 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
46409 args);
46410
46411 /* start by checking things over */
46412 - ASSERT(cache->fstop_percent >= 0 &&
46413 - cache->fstop_percent < cache->fcull_percent &&
46414 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
46415 cache->fcull_percent < cache->frun_percent &&
46416 cache->frun_percent < 100);
46417
46418 - ASSERT(cache->bstop_percent >= 0 &&
46419 - cache->bstop_percent < cache->bcull_percent &&
46420 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
46421 cache->bcull_percent < cache->brun_percent &&
46422 cache->brun_percent < 100);
46423
46424 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
46425 index 0a1467b..6a53245 100644
46426 --- a/fs/cachefiles/daemon.c
46427 +++ b/fs/cachefiles/daemon.c
46428 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
46429 if (n > buflen)
46430 return -EMSGSIZE;
46431
46432 - if (copy_to_user(_buffer, buffer, n) != 0)
46433 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
46434 return -EFAULT;
46435
46436 return n;
46437 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
46438 if (test_bit(CACHEFILES_DEAD, &cache->flags))
46439 return -EIO;
46440
46441 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
46442 + if (datalen > PAGE_SIZE - 1)
46443 return -EOPNOTSUPP;
46444
46445 /* drag the command string into the kernel so we can parse it */
46446 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
46447 if (args[0] != '%' || args[1] != '\0')
46448 return -EINVAL;
46449
46450 - if (fstop < 0 || fstop >= cache->fcull_percent)
46451 + if (fstop >= cache->fcull_percent)
46452 return cachefiles_daemon_range_error(cache, args);
46453
46454 cache->fstop_percent = fstop;
46455 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
46456 if (args[0] != '%' || args[1] != '\0')
46457 return -EINVAL;
46458
46459 - if (bstop < 0 || bstop >= cache->bcull_percent)
46460 + if (bstop >= cache->bcull_percent)
46461 return cachefiles_daemon_range_error(cache, args);
46462
46463 cache->bstop_percent = bstop;
46464 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
46465 index 4938251..7e01445 100644
46466 --- a/fs/cachefiles/internal.h
46467 +++ b/fs/cachefiles/internal.h
46468 @@ -59,7 +59,7 @@ struct cachefiles_cache {
46469 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
46470 struct rb_root active_nodes; /* active nodes (can't be culled) */
46471 rwlock_t active_lock; /* lock for active_nodes */
46472 - atomic_t gravecounter; /* graveyard uniquifier */
46473 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
46474 unsigned frun_percent; /* when to stop culling (% files) */
46475 unsigned fcull_percent; /* when to start culling (% files) */
46476 unsigned fstop_percent; /* when to stop allocating (% files) */
46477 @@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
46478 * proc.c
46479 */
46480 #ifdef CONFIG_CACHEFILES_HISTOGRAM
46481 -extern atomic_t cachefiles_lookup_histogram[HZ];
46482 -extern atomic_t cachefiles_mkdir_histogram[HZ];
46483 -extern atomic_t cachefiles_create_histogram[HZ];
46484 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
46485 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
46486 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
46487
46488 extern int __init cachefiles_proc_init(void);
46489 extern void cachefiles_proc_cleanup(void);
46490 static inline
46491 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
46492 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
46493 {
46494 unsigned long jif = jiffies - start_jif;
46495 if (jif >= HZ)
46496 jif = HZ - 1;
46497 - atomic_inc(&histogram[jif]);
46498 + atomic_inc_unchecked(&histogram[jif]);
46499 }
46500
46501 #else
46502 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
46503 index 8c01c5fc..15f982e 100644
46504 --- a/fs/cachefiles/namei.c
46505 +++ b/fs/cachefiles/namei.c
46506 @@ -317,7 +317,7 @@ try_again:
46507 /* first step is to make up a grave dentry in the graveyard */
46508 sprintf(nbuffer, "%08x%08x",
46509 (uint32_t) get_seconds(),
46510 - (uint32_t) atomic_inc_return(&cache->gravecounter));
46511 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
46512
46513 /* do the multiway lock magic */
46514 trap = lock_rename(cache->graveyard, dir);
46515 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
46516 index eccd339..4c1d995 100644
46517 --- a/fs/cachefiles/proc.c
46518 +++ b/fs/cachefiles/proc.c
46519 @@ -14,9 +14,9 @@
46520 #include <linux/seq_file.h>
46521 #include "internal.h"
46522
46523 -atomic_t cachefiles_lookup_histogram[HZ];
46524 -atomic_t cachefiles_mkdir_histogram[HZ];
46525 -atomic_t cachefiles_create_histogram[HZ];
46526 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
46527 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
46528 +atomic_unchecked_t cachefiles_create_histogram[HZ];
46529
46530 /*
46531 * display the latency histogram
46532 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
46533 return 0;
46534 default:
46535 index = (unsigned long) v - 3;
46536 - x = atomic_read(&cachefiles_lookup_histogram[index]);
46537 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
46538 - z = atomic_read(&cachefiles_create_histogram[index]);
46539 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
46540 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
46541 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
46542 if (x == 0 && y == 0 && z == 0)
46543 return 0;
46544
46545 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
46546 index 4809922..aab2c39 100644
46547 --- a/fs/cachefiles/rdwr.c
46548 +++ b/fs/cachefiles/rdwr.c
46549 @@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
46550 old_fs = get_fs();
46551 set_fs(KERNEL_DS);
46552 ret = file->f_op->write(
46553 - file, (const void __user *) data, len, &pos);
46554 + file, (const void __force_user *) data, len, &pos);
46555 set_fs(old_fs);
46556 kunmap(page);
46557 if (ret != len)
46558 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
46559 index 8c1aabe..bbf856a 100644
46560 --- a/fs/ceph/dir.c
46561 +++ b/fs/ceph/dir.c
46562 @@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
46563 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
46564 struct ceph_mds_client *mdsc = fsc->mdsc;
46565 unsigned frag = fpos_frag(filp->f_pos);
46566 - int off = fpos_off(filp->f_pos);
46567 + unsigned int off = fpos_off(filp->f_pos);
46568 int err;
46569 u32 ftype;
46570 struct ceph_mds_reply_info_parsed *rinfo;
46571 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
46572 index d9ea6ed..1e6c8ac 100644
46573 --- a/fs/cifs/cifs_debug.c
46574 +++ b/fs/cifs/cifs_debug.c
46575 @@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
46576
46577 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
46578 #ifdef CONFIG_CIFS_STATS2
46579 - atomic_set(&totBufAllocCount, 0);
46580 - atomic_set(&totSmBufAllocCount, 0);
46581 + atomic_set_unchecked(&totBufAllocCount, 0);
46582 + atomic_set_unchecked(&totSmBufAllocCount, 0);
46583 #endif /* CONFIG_CIFS_STATS2 */
46584 spin_lock(&cifs_tcp_ses_lock);
46585 list_for_each(tmp1, &cifs_tcp_ses_list) {
46586 @@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
46587 tcon = list_entry(tmp3,
46588 struct cifs_tcon,
46589 tcon_list);
46590 - atomic_set(&tcon->num_smbs_sent, 0);
46591 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
46592 if (server->ops->clear_stats)
46593 server->ops->clear_stats(tcon);
46594 }
46595 @@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
46596 smBufAllocCount.counter, cifs_min_small);
46597 #ifdef CONFIG_CIFS_STATS2
46598 seq_printf(m, "Total Large %d Small %d Allocations\n",
46599 - atomic_read(&totBufAllocCount),
46600 - atomic_read(&totSmBufAllocCount));
46601 + atomic_read_unchecked(&totBufAllocCount),
46602 + atomic_read_unchecked(&totSmBufAllocCount));
46603 #endif /* CONFIG_CIFS_STATS2 */
46604
46605 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
46606 @@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
46607 if (tcon->need_reconnect)
46608 seq_puts(m, "\tDISCONNECTED ");
46609 seq_printf(m, "\nSMBs: %d",
46610 - atomic_read(&tcon->num_smbs_sent));
46611 + atomic_read_unchecked(&tcon->num_smbs_sent));
46612 if (server->ops->print_stats)
46613 server->ops->print_stats(m, tcon);
46614 }
46615 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
46616 index de7f916..6cb22a9 100644
46617 --- a/fs/cifs/cifsfs.c
46618 +++ b/fs/cifs/cifsfs.c
46619 @@ -997,7 +997,7 @@ cifs_init_request_bufs(void)
46620 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
46621 cifs_req_cachep = kmem_cache_create("cifs_request",
46622 CIFSMaxBufSize + max_hdr_size, 0,
46623 - SLAB_HWCACHE_ALIGN, NULL);
46624 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
46625 if (cifs_req_cachep == NULL)
46626 return -ENOMEM;
46627
46628 @@ -1024,7 +1024,7 @@ cifs_init_request_bufs(void)
46629 efficient to alloc 1 per page off the slab compared to 17K (5page)
46630 alloc of large cifs buffers even when page debugging is on */
46631 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
46632 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
46633 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
46634 NULL);
46635 if (cifs_sm_req_cachep == NULL) {
46636 mempool_destroy(cifs_req_poolp);
46637 @@ -1109,8 +1109,8 @@ init_cifs(void)
46638 atomic_set(&bufAllocCount, 0);
46639 atomic_set(&smBufAllocCount, 0);
46640 #ifdef CONFIG_CIFS_STATS2
46641 - atomic_set(&totBufAllocCount, 0);
46642 - atomic_set(&totSmBufAllocCount, 0);
46643 + atomic_set_unchecked(&totBufAllocCount, 0);
46644 + atomic_set_unchecked(&totSmBufAllocCount, 0);
46645 #endif /* CONFIG_CIFS_STATS2 */
46646
46647 atomic_set(&midCount, 0);
46648 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
46649 index e6899ce..d6b2920 100644
46650 --- a/fs/cifs/cifsglob.h
46651 +++ b/fs/cifs/cifsglob.h
46652 @@ -751,35 +751,35 @@ struct cifs_tcon {
46653 __u16 Flags; /* optional support bits */
46654 enum statusEnum tidStatus;
46655 #ifdef CONFIG_CIFS_STATS
46656 - atomic_t num_smbs_sent;
46657 + atomic_unchecked_t num_smbs_sent;
46658 union {
46659 struct {
46660 - atomic_t num_writes;
46661 - atomic_t num_reads;
46662 - atomic_t num_flushes;
46663 - atomic_t num_oplock_brks;
46664 - atomic_t num_opens;
46665 - atomic_t num_closes;
46666 - atomic_t num_deletes;
46667 - atomic_t num_mkdirs;
46668 - atomic_t num_posixopens;
46669 - atomic_t num_posixmkdirs;
46670 - atomic_t num_rmdirs;
46671 - atomic_t num_renames;
46672 - atomic_t num_t2renames;
46673 - atomic_t num_ffirst;
46674 - atomic_t num_fnext;
46675 - atomic_t num_fclose;
46676 - atomic_t num_hardlinks;
46677 - atomic_t num_symlinks;
46678 - atomic_t num_locks;
46679 - atomic_t num_acl_get;
46680 - atomic_t num_acl_set;
46681 + atomic_unchecked_t num_writes;
46682 + atomic_unchecked_t num_reads;
46683 + atomic_unchecked_t num_flushes;
46684 + atomic_unchecked_t num_oplock_brks;
46685 + atomic_unchecked_t num_opens;
46686 + atomic_unchecked_t num_closes;
46687 + atomic_unchecked_t num_deletes;
46688 + atomic_unchecked_t num_mkdirs;
46689 + atomic_unchecked_t num_posixopens;
46690 + atomic_unchecked_t num_posixmkdirs;
46691 + atomic_unchecked_t num_rmdirs;
46692 + atomic_unchecked_t num_renames;
46693 + atomic_unchecked_t num_t2renames;
46694 + atomic_unchecked_t num_ffirst;
46695 + atomic_unchecked_t num_fnext;
46696 + atomic_unchecked_t num_fclose;
46697 + atomic_unchecked_t num_hardlinks;
46698 + atomic_unchecked_t num_symlinks;
46699 + atomic_unchecked_t num_locks;
46700 + atomic_unchecked_t num_acl_get;
46701 + atomic_unchecked_t num_acl_set;
46702 } cifs_stats;
46703 #ifdef CONFIG_CIFS_SMB2
46704 struct {
46705 - atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
46706 - atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
46707 + atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
46708 + atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
46709 } smb2_stats;
46710 #endif /* CONFIG_CIFS_SMB2 */
46711 } stats;
46712 @@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
46713 }
46714
46715 #ifdef CONFIG_CIFS_STATS
46716 -#define cifs_stats_inc atomic_inc
46717 +#define cifs_stats_inc atomic_inc_unchecked
46718
46719 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
46720 unsigned int bytes)
46721 @@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
46722 /* Various Debug counters */
46723 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
46724 #ifdef CONFIG_CIFS_STATS2
46725 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
46726 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
46727 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
46728 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
46729 #endif
46730 GLOBAL_EXTERN atomic_t smBufAllocCount;
46731 GLOBAL_EXTERN atomic_t midCount;
46732 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
46733 index 51dc2fb..1e12a33 100644
46734 --- a/fs/cifs/link.c
46735 +++ b/fs/cifs/link.c
46736 @@ -616,7 +616,7 @@ symlink_exit:
46737
46738 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
46739 {
46740 - char *p = nd_get_link(nd);
46741 + const char *p = nd_get_link(nd);
46742 if (!IS_ERR(p))
46743 kfree(p);
46744 }
46745 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
46746 index 3a00c0d..42d901c 100644
46747 --- a/fs/cifs/misc.c
46748 +++ b/fs/cifs/misc.c
46749 @@ -169,7 +169,7 @@ cifs_buf_get(void)
46750 memset(ret_buf, 0, buf_size + 3);
46751 atomic_inc(&bufAllocCount);
46752 #ifdef CONFIG_CIFS_STATS2
46753 - atomic_inc(&totBufAllocCount);
46754 + atomic_inc_unchecked(&totBufAllocCount);
46755 #endif /* CONFIG_CIFS_STATS2 */
46756 }
46757
46758 @@ -204,7 +204,7 @@ cifs_small_buf_get(void)
46759 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
46760 atomic_inc(&smBufAllocCount);
46761 #ifdef CONFIG_CIFS_STATS2
46762 - atomic_inc(&totSmBufAllocCount);
46763 + atomic_inc_unchecked(&totSmBufAllocCount);
46764 #endif /* CONFIG_CIFS_STATS2 */
46765
46766 }
46767 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
46768 index 47bc5a8..10decbe 100644
46769 --- a/fs/cifs/smb1ops.c
46770 +++ b/fs/cifs/smb1ops.c
46771 @@ -586,27 +586,27 @@ static void
46772 cifs_clear_stats(struct cifs_tcon *tcon)
46773 {
46774 #ifdef CONFIG_CIFS_STATS
46775 - atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
46776 - atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
46777 - atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
46778 - atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
46779 - atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
46780 - atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
46781 - atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
46782 - atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
46783 - atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
46784 - atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
46785 - atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
46786 - atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
46787 - atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
46788 - atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
46789 - atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
46790 - atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
46791 - atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
46792 - atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
46793 - atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
46794 - atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
46795 - atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
46796 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
46797 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
46798 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
46799 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
46800 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
46801 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
46802 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
46803 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
46804 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
46805 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
46806 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
46807 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
46808 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
46809 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
46810 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
46811 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
46812 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
46813 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
46814 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
46815 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
46816 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
46817 #endif
46818 }
46819
46820 @@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
46821 {
46822 #ifdef CONFIG_CIFS_STATS
46823 seq_printf(m, " Oplocks breaks: %d",
46824 - atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
46825 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
46826 seq_printf(m, "\nReads: %d Bytes: %llu",
46827 - atomic_read(&tcon->stats.cifs_stats.num_reads),
46828 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
46829 (long long)(tcon->bytes_read));
46830 seq_printf(m, "\nWrites: %d Bytes: %llu",
46831 - atomic_read(&tcon->stats.cifs_stats.num_writes),
46832 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
46833 (long long)(tcon->bytes_written));
46834 seq_printf(m, "\nFlushes: %d",
46835 - atomic_read(&tcon->stats.cifs_stats.num_flushes));
46836 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
46837 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
46838 - atomic_read(&tcon->stats.cifs_stats.num_locks),
46839 - atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
46840 - atomic_read(&tcon->stats.cifs_stats.num_symlinks));
46841 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
46842 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
46843 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
46844 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
46845 - atomic_read(&tcon->stats.cifs_stats.num_opens),
46846 - atomic_read(&tcon->stats.cifs_stats.num_closes),
46847 - atomic_read(&tcon->stats.cifs_stats.num_deletes));
46848 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
46849 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
46850 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
46851 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
46852 - atomic_read(&tcon->stats.cifs_stats.num_posixopens),
46853 - atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
46854 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
46855 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
46856 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
46857 - atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
46858 - atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
46859 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
46860 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
46861 seq_printf(m, "\nRenames: %d T2 Renames %d",
46862 - atomic_read(&tcon->stats.cifs_stats.num_renames),
46863 - atomic_read(&tcon->stats.cifs_stats.num_t2renames));
46864 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
46865 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
46866 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
46867 - atomic_read(&tcon->stats.cifs_stats.num_ffirst),
46868 - atomic_read(&tcon->stats.cifs_stats.num_fnext),
46869 - atomic_read(&tcon->stats.cifs_stats.num_fclose));
46870 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
46871 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
46872 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
46873 #endif
46874 }
46875
46876 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
46877 index c9c7aa7..065056a 100644
46878 --- a/fs/cifs/smb2ops.c
46879 +++ b/fs/cifs/smb2ops.c
46880 @@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
46881 #ifdef CONFIG_CIFS_STATS
46882 int i;
46883 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
46884 - atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
46885 - atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
46886 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
46887 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
46888 }
46889 #endif
46890 }
46891 @@ -284,66 +284,66 @@ static void
46892 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
46893 {
46894 #ifdef CONFIG_CIFS_STATS
46895 - atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
46896 - atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
46897 + atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
46898 + atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
46899 seq_printf(m, "\nNegotiates: %d sent %d failed",
46900 - atomic_read(&sent[SMB2_NEGOTIATE_HE]),
46901 - atomic_read(&failed[SMB2_NEGOTIATE_HE]));
46902 + atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
46903 + atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
46904 seq_printf(m, "\nSessionSetups: %d sent %d failed",
46905 - atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
46906 - atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
46907 + atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
46908 + atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
46909 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
46910 seq_printf(m, "\nLogoffs: %d sent %d failed",
46911 - atomic_read(&sent[SMB2_LOGOFF_HE]),
46912 - atomic_read(&failed[SMB2_LOGOFF_HE]));
46913 + atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
46914 + atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
46915 seq_printf(m, "\nTreeConnects: %d sent %d failed",
46916 - atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
46917 - atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
46918 + atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
46919 + atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
46920 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
46921 - atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
46922 - atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
46923 + atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
46924 + atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
46925 seq_printf(m, "\nCreates: %d sent %d failed",
46926 - atomic_read(&sent[SMB2_CREATE_HE]),
46927 - atomic_read(&failed[SMB2_CREATE_HE]));
46928 + atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
46929 + atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
46930 seq_printf(m, "\nCloses: %d sent %d failed",
46931 - atomic_read(&sent[SMB2_CLOSE_HE]),
46932 - atomic_read(&failed[SMB2_CLOSE_HE]));
46933 + atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
46934 + atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
46935 seq_printf(m, "\nFlushes: %d sent %d failed",
46936 - atomic_read(&sent[SMB2_FLUSH_HE]),
46937 - atomic_read(&failed[SMB2_FLUSH_HE]));
46938 + atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
46939 + atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
46940 seq_printf(m, "\nReads: %d sent %d failed",
46941 - atomic_read(&sent[SMB2_READ_HE]),
46942 - atomic_read(&failed[SMB2_READ_HE]));
46943 + atomic_read_unchecked(&sent[SMB2_READ_HE]),
46944 + atomic_read_unchecked(&failed[SMB2_READ_HE]));
46945 seq_printf(m, "\nWrites: %d sent %d failed",
46946 - atomic_read(&sent[SMB2_WRITE_HE]),
46947 - atomic_read(&failed[SMB2_WRITE_HE]));
46948 + atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
46949 + atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
46950 seq_printf(m, "\nLocks: %d sent %d failed",
46951 - atomic_read(&sent[SMB2_LOCK_HE]),
46952 - atomic_read(&failed[SMB2_LOCK_HE]));
46953 + atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
46954 + atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
46955 seq_printf(m, "\nIOCTLs: %d sent %d failed",
46956 - atomic_read(&sent[SMB2_IOCTL_HE]),
46957 - atomic_read(&failed[SMB2_IOCTL_HE]));
46958 + atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
46959 + atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
46960 seq_printf(m, "\nCancels: %d sent %d failed",
46961 - atomic_read(&sent[SMB2_CANCEL_HE]),
46962 - atomic_read(&failed[SMB2_CANCEL_HE]));
46963 + atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
46964 + atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
46965 seq_printf(m, "\nEchos: %d sent %d failed",
46966 - atomic_read(&sent[SMB2_ECHO_HE]),
46967 - atomic_read(&failed[SMB2_ECHO_HE]));
46968 + atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
46969 + atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
46970 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
46971 - atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
46972 - atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
46973 + atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
46974 + atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
46975 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
46976 - atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
46977 - atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
46978 + atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
46979 + atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
46980 seq_printf(m, "\nQueryInfos: %d sent %d failed",
46981 - atomic_read(&sent[SMB2_QUERY_INFO_HE]),
46982 - atomic_read(&failed[SMB2_QUERY_INFO_HE]));
46983 + atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
46984 + atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
46985 seq_printf(m, "\nSetInfos: %d sent %d failed",
46986 - atomic_read(&sent[SMB2_SET_INFO_HE]),
46987 - atomic_read(&failed[SMB2_SET_INFO_HE]));
46988 + atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
46989 + atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
46990 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
46991 - atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
46992 - atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
46993 + atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
46994 + atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
46995 #endif
46996 }
46997
46998 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
46999 index 41d9d07..dbb4772 100644
47000 --- a/fs/cifs/smb2pdu.c
47001 +++ b/fs/cifs/smb2pdu.c
47002 @@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
47003 default:
47004 cERROR(1, "info level %u isn't supported",
47005 srch_inf->info_level);
47006 - rc = -EINVAL;
47007 - goto qdir_exit;
47008 + return -EINVAL;
47009 }
47010
47011 req->FileIndex = cpu_to_le32(index);
47012 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47013 index 958ae0e..505c9d0 100644
47014 --- a/fs/coda/cache.c
47015 +++ b/fs/coda/cache.c
47016 @@ -24,7 +24,7 @@
47017 #include "coda_linux.h"
47018 #include "coda_cache.h"
47019
47020 -static atomic_t permission_epoch = ATOMIC_INIT(0);
47021 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47022
47023 /* replace or extend an acl cache hit */
47024 void coda_cache_enter(struct inode *inode, int mask)
47025 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
47026 struct coda_inode_info *cii = ITOC(inode);
47027
47028 spin_lock(&cii->c_lock);
47029 - cii->c_cached_epoch = atomic_read(&permission_epoch);
47030 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47031 if (cii->c_uid != current_fsuid()) {
47032 cii->c_uid = current_fsuid();
47033 cii->c_cached_perm = mask;
47034 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
47035 {
47036 struct coda_inode_info *cii = ITOC(inode);
47037 spin_lock(&cii->c_lock);
47038 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47039 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47040 spin_unlock(&cii->c_lock);
47041 }
47042
47043 /* remove all acl caches */
47044 void coda_cache_clear_all(struct super_block *sb)
47045 {
47046 - atomic_inc(&permission_epoch);
47047 + atomic_inc_unchecked(&permission_epoch);
47048 }
47049
47050
47051 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
47052 spin_lock(&cii->c_lock);
47053 hit = (mask & cii->c_cached_perm) == mask &&
47054 cii->c_uid == current_fsuid() &&
47055 - cii->c_cached_epoch == atomic_read(&permission_epoch);
47056 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47057 spin_unlock(&cii->c_lock);
47058
47059 return hit;
47060 diff --git a/fs/compat.c b/fs/compat.c
47061 index 015e1e1..5ce8e54 100644
47062 --- a/fs/compat.c
47063 +++ b/fs/compat.c
47064 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47065
47066 set_fs(KERNEL_DS);
47067 /* The __user pointer cast is valid because of the set_fs() */
47068 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47069 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47070 set_fs(oldfs);
47071 /* truncating is ok because it's a user address */
47072 if (!ret)
47073 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
47074 goto out;
47075
47076 ret = -EINVAL;
47077 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
47078 + if (nr_segs > UIO_MAXIOV)
47079 goto out;
47080 if (nr_segs > fast_segs) {
47081 ret = -ENOMEM;
47082 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
47083
47084 struct compat_readdir_callback {
47085 struct compat_old_linux_dirent __user *dirent;
47086 + struct file * file;
47087 int result;
47088 };
47089
47090 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47091 buf->result = -EOVERFLOW;
47092 return -EOVERFLOW;
47093 }
47094 +
47095 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47096 + return 0;
47097 +
47098 buf->result++;
47099 dirent = buf->dirent;
47100 if (!access_ok(VERIFY_WRITE, dirent,
47101 @@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47102
47103 buf.result = 0;
47104 buf.dirent = dirent;
47105 + buf.file = f.file;
47106
47107 error = vfs_readdir(f.file, compat_fillonedir, &buf);
47108 if (buf.result)
47109 @@ -897,6 +903,7 @@ struct compat_linux_dirent {
47110 struct compat_getdents_callback {
47111 struct compat_linux_dirent __user *current_dir;
47112 struct compat_linux_dirent __user *previous;
47113 + struct file * file;
47114 int count;
47115 int error;
47116 };
47117 @@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47118 buf->error = -EOVERFLOW;
47119 return -EOVERFLOW;
47120 }
47121 +
47122 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47123 + return 0;
47124 +
47125 dirent = buf->previous;
47126 if (dirent) {
47127 if (__put_user(offset, &dirent->d_off))
47128 @@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47129 buf.previous = NULL;
47130 buf.count = count;
47131 buf.error = 0;
47132 + buf.file = f.file;
47133
47134 error = vfs_readdir(f.file, compat_filldir, &buf);
47135 if (error >= 0)
47136 @@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47137 struct compat_getdents_callback64 {
47138 struct linux_dirent64 __user *current_dir;
47139 struct linux_dirent64 __user *previous;
47140 + struct file * file;
47141 int count;
47142 int error;
47143 };
47144 @@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47145 buf->error = -EINVAL; /* only used if we fail.. */
47146 if (reclen > buf->count)
47147 return -EINVAL;
47148 +
47149 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47150 + return 0;
47151 +
47152 dirent = buf->previous;
47153
47154 if (dirent) {
47155 @@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
47156 buf.previous = NULL;
47157 buf.count = count;
47158 buf.error = 0;
47159 + buf.file = f.file;
47160
47161 error = vfs_readdir(f.file, compat_filldir64, &buf);
47162 if (error >= 0)
47163 error = buf.error;
47164 lastdirent = buf.previous;
47165 if (lastdirent) {
47166 - typeof(lastdirent->d_off) d_off = f.file->f_pos;
47167 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
47168 if (__put_user_unaligned(d_off, &lastdirent->d_off))
47169 error = -EFAULT;
47170 else
47171 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
47172 index a81147e..20bf2b5 100644
47173 --- a/fs/compat_binfmt_elf.c
47174 +++ b/fs/compat_binfmt_elf.c
47175 @@ -30,11 +30,13 @@
47176 #undef elf_phdr
47177 #undef elf_shdr
47178 #undef elf_note
47179 +#undef elf_dyn
47180 #undef elf_addr_t
47181 #define elfhdr elf32_hdr
47182 #define elf_phdr elf32_phdr
47183 #define elf_shdr elf32_shdr
47184 #define elf_note elf32_note
47185 +#define elf_dyn Elf32_Dyn
47186 #define elf_addr_t Elf32_Addr
47187
47188 /*
47189 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
47190 index e2f57a0..3c78771 100644
47191 --- a/fs/compat_ioctl.c
47192 +++ b/fs/compat_ioctl.c
47193 @@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
47194 return -EFAULT;
47195 if (__get_user(udata, &ss32->iomem_base))
47196 return -EFAULT;
47197 - ss.iomem_base = compat_ptr(udata);
47198 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
47199 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
47200 __get_user(ss.port_high, &ss32->port_high))
47201 return -EFAULT;
47202 @@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
47203 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
47204 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
47205 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
47206 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47207 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47208 return -EFAULT;
47209
47210 return ioctl_preallocate(file, p);
47211 @@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
47212 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
47213 {
47214 unsigned int a, b;
47215 - a = *(unsigned int *)p;
47216 - b = *(unsigned int *)q;
47217 + a = *(const unsigned int *)p;
47218 + b = *(const unsigned int *)q;
47219 if (a > b)
47220 return 1;
47221 if (a < b)
47222 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
47223 index 712b10f..c33c4ca 100644
47224 --- a/fs/configfs/dir.c
47225 +++ b/fs/configfs/dir.c
47226 @@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
47227 static int configfs_depend_prep(struct dentry *origin,
47228 struct config_item *target)
47229 {
47230 - struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
47231 + struct configfs_dirent *child_sd, *sd;
47232 int ret = 0;
47233
47234 - BUG_ON(!origin || !sd);
47235 + BUG_ON(!origin || !origin->d_fsdata);
47236 + sd = origin->d_fsdata;
47237
47238 if (sd->s_element == target) /* Boo-yah */
47239 goto out;
47240 @@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47241 }
47242 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
47243 struct configfs_dirent *next;
47244 - const char * name;
47245 + const unsigned char * name;
47246 + char d_name[sizeof(next->s_dentry->d_iname)];
47247 int len;
47248 struct inode *inode = NULL;
47249
47250 @@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47251 continue;
47252
47253 name = configfs_get_name(next);
47254 - len = strlen(name);
47255 + if (next->s_dentry && name == next->s_dentry->d_iname) {
47256 + len = next->s_dentry->d_name.len;
47257 + memcpy(d_name, name, len);
47258 + name = d_name;
47259 + } else
47260 + len = strlen(name);
47261
47262 /*
47263 * We'll have a dentry and an inode for
47264 diff --git a/fs/coredump.c b/fs/coredump.c
47265 index 1774932..5812106 100644
47266 --- a/fs/coredump.c
47267 +++ b/fs/coredump.c
47268 @@ -52,7 +52,7 @@ struct core_name {
47269 char *corename;
47270 int used, size;
47271 };
47272 -static atomic_t call_count = ATOMIC_INIT(1);
47273 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
47274
47275 /* The maximal length of core_pattern is also specified in sysctl.c */
47276
47277 @@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
47278 {
47279 char *old_corename = cn->corename;
47280
47281 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
47282 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
47283 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
47284
47285 if (!cn->corename) {
47286 @@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
47287 int pid_in_pattern = 0;
47288 int err = 0;
47289
47290 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
47291 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
47292 cn->corename = kmalloc(cn->size, GFP_KERNEL);
47293 cn->used = 0;
47294
47295 @@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
47296 pipe = file->f_path.dentry->d_inode->i_pipe;
47297
47298 pipe_lock(pipe);
47299 - pipe->readers++;
47300 - pipe->writers--;
47301 + atomic_inc(&pipe->readers);
47302 + atomic_dec(&pipe->writers);
47303
47304 - while ((pipe->readers > 1) && (!signal_pending(current))) {
47305 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
47306 wake_up_interruptible_sync(&pipe->wait);
47307 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47308 pipe_wait(pipe);
47309 }
47310
47311 - pipe->readers--;
47312 - pipe->writers++;
47313 + atomic_dec(&pipe->readers);
47314 + atomic_inc(&pipe->writers);
47315 pipe_unlock(pipe);
47316
47317 }
47318 @@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
47319 int ispipe;
47320 struct files_struct *displaced;
47321 bool need_nonrelative = false;
47322 - static atomic_t core_dump_count = ATOMIC_INIT(0);
47323 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
47324 + long signr = siginfo->si_signo;
47325 struct coredump_params cprm = {
47326 .siginfo = siginfo,
47327 .regs = signal_pt_regs(),
47328 @@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
47329 .mm_flags = mm->flags,
47330 };
47331
47332 - audit_core_dumps(siginfo->si_signo);
47333 + audit_core_dumps(signr);
47334 +
47335 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
47336 + gr_handle_brute_attach(cprm.mm_flags);
47337
47338 binfmt = mm->binfmt;
47339 if (!binfmt || !binfmt->core_dump)
47340 @@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
47341 need_nonrelative = true;
47342 }
47343
47344 - retval = coredump_wait(siginfo->si_signo, &core_state);
47345 + retval = coredump_wait(signr, &core_state);
47346 if (retval < 0)
47347 goto fail_creds;
47348
47349 @@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
47350 }
47351 cprm.limit = RLIM_INFINITY;
47352
47353 - dump_count = atomic_inc_return(&core_dump_count);
47354 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
47355 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
47356 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
47357 task_tgid_vnr(current), current->comm);
47358 @@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
47359 } else {
47360 struct inode *inode;
47361
47362 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
47363 +
47364 if (cprm.limit < binfmt->min_coredump)
47365 goto fail_unlock;
47366
47367 @@ -640,7 +646,7 @@ close_fail:
47368 filp_close(cprm.file, NULL);
47369 fail_dropcount:
47370 if (ispipe)
47371 - atomic_dec(&core_dump_count);
47372 + atomic_dec_unchecked(&core_dump_count);
47373 fail_unlock:
47374 kfree(cn.corename);
47375 fail_corename:
47376 @@ -659,7 +665,7 @@ fail:
47377 */
47378 int dump_write(struct file *file, const void *addr, int nr)
47379 {
47380 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
47381 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
47382 }
47383 EXPORT_SYMBOL(dump_write);
47384
47385 diff --git a/fs/dcache.c b/fs/dcache.c
47386 index 19153a0..428c2f5 100644
47387 --- a/fs/dcache.c
47388 +++ b/fs/dcache.c
47389 @@ -3133,7 +3133,7 @@ void __init vfs_caches_init(unsigned long mempages)
47390 mempages -= reserve;
47391
47392 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
47393 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
47394 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
47395
47396 dcache_init();
47397 inode_init();
47398 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
47399 index a5f12b7..4ee8a6f 100644
47400 --- a/fs/debugfs/inode.c
47401 +++ b/fs/debugfs/inode.c
47402 @@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
47403 */
47404 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
47405 {
47406 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47407 + return __create_file(name, S_IFDIR | S_IRWXU,
47408 +#else
47409 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
47410 +#endif
47411 parent, NULL, NULL);
47412 }
47413 EXPORT_SYMBOL_GPL(debugfs_create_dir);
47414 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
47415 index cc7709e..7e7211f 100644
47416 --- a/fs/ecryptfs/inode.c
47417 +++ b/fs/ecryptfs/inode.c
47418 @@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
47419 old_fs = get_fs();
47420 set_fs(get_ds());
47421 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
47422 - (char __user *)lower_buf,
47423 + (char __force_user *)lower_buf,
47424 PATH_MAX);
47425 set_fs(old_fs);
47426 if (rc < 0)
47427 @@ -706,7 +706,7 @@ out:
47428 static void
47429 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
47430 {
47431 - char *buf = nd_get_link(nd);
47432 + const char *buf = nd_get_link(nd);
47433 if (!IS_ERR(buf)) {
47434 /* Free the char* */
47435 kfree(buf);
47436 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
47437 index 412e6ed..4292d22 100644
47438 --- a/fs/ecryptfs/miscdev.c
47439 +++ b/fs/ecryptfs/miscdev.c
47440 @@ -315,7 +315,7 @@ check_list:
47441 goto out_unlock_msg_ctx;
47442 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
47443 if (msg_ctx->msg) {
47444 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
47445 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
47446 goto out_unlock_msg_ctx;
47447 i += packet_length_size;
47448 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
47449 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
47450 index b2a34a1..162fa69 100644
47451 --- a/fs/ecryptfs/read_write.c
47452 +++ b/fs/ecryptfs/read_write.c
47453 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
47454 return -EIO;
47455 fs_save = get_fs();
47456 set_fs(get_ds());
47457 - rc = vfs_write(lower_file, data, size, &offset);
47458 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
47459 set_fs(fs_save);
47460 mark_inode_dirty_sync(ecryptfs_inode);
47461 return rc;
47462 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
47463 return -EIO;
47464 fs_save = get_fs();
47465 set_fs(get_ds());
47466 - rc = vfs_read(lower_file, data, size, &offset);
47467 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
47468 set_fs(fs_save);
47469 return rc;
47470 }
47471 diff --git a/fs/exec.c b/fs/exec.c
47472 index 20df02c..5af5d91 100644
47473 --- a/fs/exec.c
47474 +++ b/fs/exec.c
47475 @@ -55,6 +55,17 @@
47476 #include <linux/pipe_fs_i.h>
47477 #include <linux/oom.h>
47478 #include <linux/compat.h>
47479 +#include <linux/random.h>
47480 +#include <linux/seq_file.h>
47481 +#include <linux/coredump.h>
47482 +#include <linux/mman.h>
47483 +
47484 +#ifdef CONFIG_PAX_REFCOUNT
47485 +#include <linux/kallsyms.h>
47486 +#include <linux/kdebug.h>
47487 +#endif
47488 +
47489 +#include <trace/events/fs.h>
47490
47491 #include <asm/uaccess.h>
47492 #include <asm/mmu_context.h>
47493 @@ -66,6 +77,18 @@
47494
47495 #include <trace/events/sched.h>
47496
47497 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47498 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
47499 +{
47500 + pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
47501 +}
47502 +#endif
47503 +
47504 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
47505 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
47506 +EXPORT_SYMBOL(pax_set_initial_flags_func);
47507 +#endif
47508 +
47509 int suid_dumpable = 0;
47510
47511 static LIST_HEAD(formats);
47512 @@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
47513 int write)
47514 {
47515 struct page *page;
47516 - int ret;
47517
47518 -#ifdef CONFIG_STACK_GROWSUP
47519 - if (write) {
47520 - ret = expand_downwards(bprm->vma, pos);
47521 - if (ret < 0)
47522 - return NULL;
47523 - }
47524 -#endif
47525 - ret = get_user_pages(current, bprm->mm, pos,
47526 - 1, write, 1, &page, NULL);
47527 - if (ret <= 0)
47528 + if (0 > expand_downwards(bprm->vma, pos))
47529 + return NULL;
47530 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
47531 return NULL;
47532
47533 if (write) {
47534 @@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
47535 if (size <= ARG_MAX)
47536 return page;
47537
47538 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47539 + // only allow 512KB for argv+env on suid/sgid binaries
47540 + // to prevent easy ASLR exhaustion
47541 + if (((!uid_eq(bprm->cred->euid, current_euid())) ||
47542 + (!gid_eq(bprm->cred->egid, current_egid()))) &&
47543 + (size > (512 * 1024))) {
47544 + put_page(page);
47545 + return NULL;
47546 + }
47547 +#endif
47548 +
47549 /*
47550 * Limit to 1/4-th the stack size for the argv+env strings.
47551 * This ensures that:
47552 @@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
47553 vma->vm_end = STACK_TOP_MAX;
47554 vma->vm_start = vma->vm_end - PAGE_SIZE;
47555 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
47556 +
47557 +#ifdef CONFIG_PAX_SEGMEXEC
47558 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
47559 +#endif
47560 +
47561 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
47562 INIT_LIST_HEAD(&vma->anon_vma_chain);
47563
47564 @@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
47565 mm->stack_vm = mm->total_vm = 1;
47566 up_write(&mm->mmap_sem);
47567 bprm->p = vma->vm_end - sizeof(void *);
47568 +
47569 +#ifdef CONFIG_PAX_RANDUSTACK
47570 + if (randomize_va_space)
47571 + bprm->p ^= random32() & ~PAGE_MASK;
47572 +#endif
47573 +
47574 return 0;
47575 err:
47576 up_write(&mm->mmap_sem);
47577 @@ -384,19 +421,7 @@ err:
47578 return err;
47579 }
47580
47581 -struct user_arg_ptr {
47582 -#ifdef CONFIG_COMPAT
47583 - bool is_compat;
47584 -#endif
47585 - union {
47586 - const char __user *const __user *native;
47587 -#ifdef CONFIG_COMPAT
47588 - const compat_uptr_t __user *compat;
47589 -#endif
47590 - } ptr;
47591 -};
47592 -
47593 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
47594 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
47595 {
47596 const char __user *native;
47597
47598 @@ -405,14 +430,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
47599 compat_uptr_t compat;
47600
47601 if (get_user(compat, argv.ptr.compat + nr))
47602 - return ERR_PTR(-EFAULT);
47603 + return (const char __force_user *)ERR_PTR(-EFAULT);
47604
47605 return compat_ptr(compat);
47606 }
47607 #endif
47608
47609 if (get_user(native, argv.ptr.native + nr))
47610 - return ERR_PTR(-EFAULT);
47611 + return (const char __force_user *)ERR_PTR(-EFAULT);
47612
47613 return native;
47614 }
47615 @@ -431,7 +456,7 @@ static int count(struct user_arg_ptr argv, int max)
47616 if (!p)
47617 break;
47618
47619 - if (IS_ERR(p))
47620 + if (IS_ERR((const char __force_kernel *)p))
47621 return -EFAULT;
47622
47623 if (i >= max)
47624 @@ -466,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
47625
47626 ret = -EFAULT;
47627 str = get_user_arg_ptr(argv, argc);
47628 - if (IS_ERR(str))
47629 + if (IS_ERR((const char __force_kernel *)str))
47630 goto out;
47631
47632 len = strnlen_user(str, MAX_ARG_STRLEN);
47633 @@ -548,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
47634 int r;
47635 mm_segment_t oldfs = get_fs();
47636 struct user_arg_ptr argv = {
47637 - .ptr.native = (const char __user *const __user *)__argv,
47638 + .ptr.native = (const char __force_user *const __force_user *)__argv,
47639 };
47640
47641 set_fs(KERNEL_DS);
47642 @@ -583,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
47643 unsigned long new_end = old_end - shift;
47644 struct mmu_gather tlb;
47645
47646 - BUG_ON(new_start > new_end);
47647 + if (new_start >= new_end || new_start < mmap_min_addr)
47648 + return -ENOMEM;
47649
47650 /*
47651 * ensure there are no vmas between where we want to go
47652 @@ -592,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
47653 if (vma != find_vma(mm, new_start))
47654 return -EFAULT;
47655
47656 +#ifdef CONFIG_PAX_SEGMEXEC
47657 + BUG_ON(pax_find_mirror_vma(vma));
47658 +#endif
47659 +
47660 /*
47661 * cover the whole range: [new_start, old_end)
47662 */
47663 @@ -672,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
47664 stack_top = arch_align_stack(stack_top);
47665 stack_top = PAGE_ALIGN(stack_top);
47666
47667 - if (unlikely(stack_top < mmap_min_addr) ||
47668 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
47669 - return -ENOMEM;
47670 -
47671 stack_shift = vma->vm_end - stack_top;
47672
47673 bprm->p -= stack_shift;
47674 @@ -687,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
47675 bprm->exec -= stack_shift;
47676
47677 down_write(&mm->mmap_sem);
47678 +
47679 + /* Move stack pages down in memory. */
47680 + if (stack_shift) {
47681 + ret = shift_arg_pages(vma, stack_shift);
47682 + if (ret)
47683 + goto out_unlock;
47684 + }
47685 +
47686 vm_flags = VM_STACK_FLAGS;
47687
47688 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47689 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47690 + vm_flags &= ~VM_EXEC;
47691 +
47692 +#ifdef CONFIG_PAX_MPROTECT
47693 + if (mm->pax_flags & MF_PAX_MPROTECT)
47694 + vm_flags &= ~VM_MAYEXEC;
47695 +#endif
47696 +
47697 + }
47698 +#endif
47699 +
47700 /*
47701 * Adjust stack execute permissions; explicitly enable for
47702 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
47703 @@ -707,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
47704 goto out_unlock;
47705 BUG_ON(prev != vma);
47706
47707 - /* Move stack pages down in memory. */
47708 - if (stack_shift) {
47709 - ret = shift_arg_pages(vma, stack_shift);
47710 - if (ret)
47711 - goto out_unlock;
47712 - }
47713 -
47714 /* mprotect_fixup is overkill to remove the temporary stack flags */
47715 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
47716
47717 @@ -737,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
47718 #endif
47719 current->mm->start_stack = bprm->p;
47720 ret = expand_stack(vma, stack_base);
47721 +
47722 +#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_ASLR)
47723 + if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
47724 + unsigned long size, flags, vm_flags;
47725 +
47726 + size = STACK_TOP - vma->vm_end;
47727 + flags = MAP_FIXED | MAP_PRIVATE;
47728 + vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
47729 +
47730 + ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
47731 +
47732 +#ifdef CONFIG_X86
47733 + if (!ret) {
47734 + size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
47735 + ret = 0 != mmap_region(NULL, 0, size, flags, vm_flags, 0);
47736 + }
47737 +#endif
47738 +
47739 + }
47740 +#endif
47741 +
47742 if (ret)
47743 ret = -EFAULT;
47744
47745 @@ -772,6 +832,8 @@ struct file *open_exec(const char *name)
47746
47747 fsnotify_open(file);
47748
47749 + trace_open_exec(name);
47750 +
47751 err = deny_write_access(file);
47752 if (err)
47753 goto exit;
47754 @@ -795,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
47755 old_fs = get_fs();
47756 set_fs(get_ds());
47757 /* The cast to a user pointer is valid due to the set_fs() */
47758 - result = vfs_read(file, (void __user *)addr, count, &pos);
47759 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
47760 set_fs(old_fs);
47761 return result;
47762 }
47763 @@ -1247,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
47764 }
47765 rcu_read_unlock();
47766
47767 - if (p->fs->users > n_fs) {
47768 + if (atomic_read(&p->fs->users) > n_fs) {
47769 bprm->unsafe |= LSM_UNSAFE_SHARE;
47770 } else {
47771 res = -EAGAIN;
47772 @@ -1447,6 +1509,28 @@ int search_binary_handler(struct linux_binprm *bprm)
47773
47774 EXPORT_SYMBOL(search_binary_handler);
47775
47776 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47777 +static DEFINE_PER_CPU(u64, exec_counter);
47778 +static int __init init_exec_counters(void)
47779 +{
47780 + unsigned int cpu;
47781 +
47782 + for_each_possible_cpu(cpu) {
47783 + per_cpu(exec_counter, cpu) = (u64)cpu;
47784 + }
47785 +
47786 + return 0;
47787 +}
47788 +early_initcall(init_exec_counters);
47789 +static inline void increment_exec_counter(void)
47790 +{
47791 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
47792 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
47793 +}
47794 +#else
47795 +static inline void increment_exec_counter(void) {}
47796 +#endif
47797 +
47798 /*
47799 * sys_execve() executes a new program.
47800 */
47801 @@ -1454,6 +1538,11 @@ static int do_execve_common(const char *filename,
47802 struct user_arg_ptr argv,
47803 struct user_arg_ptr envp)
47804 {
47805 +#ifdef CONFIG_GRKERNSEC
47806 + struct file *old_exec_file;
47807 + struct acl_subject_label *old_acl;
47808 + struct rlimit old_rlim[RLIM_NLIMITS];
47809 +#endif
47810 struct linux_binprm *bprm;
47811 struct file *file;
47812 struct files_struct *displaced;
47813 @@ -1461,6 +1550,8 @@ static int do_execve_common(const char *filename,
47814 int retval;
47815 const struct cred *cred = current_cred();
47816
47817 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
47818 +
47819 /*
47820 * We move the actual failure in case of RLIMIT_NPROC excess from
47821 * set*uid() to execve() because too many poorly written programs
47822 @@ -1501,12 +1592,27 @@ static int do_execve_common(const char *filename,
47823 if (IS_ERR(file))
47824 goto out_unmark;
47825
47826 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
47827 + retval = -EPERM;
47828 + goto out_file;
47829 + }
47830 +
47831 sched_exec();
47832
47833 bprm->file = file;
47834 bprm->filename = filename;
47835 bprm->interp = filename;
47836
47837 + if (gr_process_user_ban()) {
47838 + retval = -EPERM;
47839 + goto out_file;
47840 + }
47841 +
47842 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
47843 + retval = -EACCES;
47844 + goto out_file;
47845 + }
47846 +
47847 retval = bprm_mm_init(bprm);
47848 if (retval)
47849 goto out_file;
47850 @@ -1523,24 +1629,65 @@ static int do_execve_common(const char *filename,
47851 if (retval < 0)
47852 goto out;
47853
47854 +#ifdef CONFIG_GRKERNSEC
47855 + old_acl = current->acl;
47856 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
47857 + old_exec_file = current->exec_file;
47858 + get_file(file);
47859 + current->exec_file = file;
47860 +#endif
47861 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47862 + /* limit suid stack to 8MB
47863 + * we saved the old limits above and will restore them if this exec fails
47864 + */
47865 + if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
47866 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
47867 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
47868 +#endif
47869 +
47870 + if (!gr_tpe_allow(file)) {
47871 + retval = -EACCES;
47872 + goto out_fail;
47873 + }
47874 +
47875 + if (gr_check_crash_exec(file)) {
47876 + retval = -EACCES;
47877 + goto out_fail;
47878 + }
47879 +
47880 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
47881 + bprm->unsafe);
47882 + if (retval < 0)
47883 + goto out_fail;
47884 +
47885 retval = copy_strings_kernel(1, &bprm->filename, bprm);
47886 if (retval < 0)
47887 - goto out;
47888 + goto out_fail;
47889
47890 bprm->exec = bprm->p;
47891 retval = copy_strings(bprm->envc, envp, bprm);
47892 if (retval < 0)
47893 - goto out;
47894 + goto out_fail;
47895
47896 retval = copy_strings(bprm->argc, argv, bprm);
47897 if (retval < 0)
47898 - goto out;
47899 + goto out_fail;
47900 +
47901 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
47902 +
47903 + gr_handle_exec_args(bprm, argv);
47904
47905 retval = search_binary_handler(bprm);
47906 if (retval < 0)
47907 - goto out;
47908 + goto out_fail;
47909 +#ifdef CONFIG_GRKERNSEC
47910 + if (old_exec_file)
47911 + fput(old_exec_file);
47912 +#endif
47913
47914 /* execve succeeded */
47915 +
47916 + increment_exec_counter();
47917 current->fs->in_exec = 0;
47918 current->in_execve = 0;
47919 acct_update_integrals(current);
47920 @@ -1549,6 +1696,14 @@ static int do_execve_common(const char *filename,
47921 put_files_struct(displaced);
47922 return retval;
47923
47924 +out_fail:
47925 +#ifdef CONFIG_GRKERNSEC
47926 + current->acl = old_acl;
47927 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
47928 + fput(current->exec_file);
47929 + current->exec_file = old_exec_file;
47930 +#endif
47931 +
47932 out:
47933 if (bprm->mm) {
47934 acct_arg_size(bprm, 0);
47935 @@ -1697,3 +1852,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
47936 return error;
47937 }
47938 #endif
47939 +
47940 +int pax_check_flags(unsigned long *flags)
47941 +{
47942 + int retval = 0;
47943 +
47944 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
47945 + if (*flags & MF_PAX_SEGMEXEC)
47946 + {
47947 + *flags &= ~MF_PAX_SEGMEXEC;
47948 + retval = -EINVAL;
47949 + }
47950 +#endif
47951 +
47952 + if ((*flags & MF_PAX_PAGEEXEC)
47953 +
47954 +#ifdef CONFIG_PAX_PAGEEXEC
47955 + && (*flags & MF_PAX_SEGMEXEC)
47956 +#endif
47957 +
47958 + )
47959 + {
47960 + *flags &= ~MF_PAX_PAGEEXEC;
47961 + retval = -EINVAL;
47962 + }
47963 +
47964 + if ((*flags & MF_PAX_MPROTECT)
47965 +
47966 +#ifdef CONFIG_PAX_MPROTECT
47967 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
47968 +#endif
47969 +
47970 + )
47971 + {
47972 + *flags &= ~MF_PAX_MPROTECT;
47973 + retval = -EINVAL;
47974 + }
47975 +
47976 + if ((*flags & MF_PAX_EMUTRAMP)
47977 +
47978 +#ifdef CONFIG_PAX_EMUTRAMP
47979 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
47980 +#endif
47981 +
47982 + )
47983 + {
47984 + *flags &= ~MF_PAX_EMUTRAMP;
47985 + retval = -EINVAL;
47986 + }
47987 +
47988 + return retval;
47989 +}
47990 +
47991 +EXPORT_SYMBOL(pax_check_flags);
47992 +
47993 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47994 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
47995 +{
47996 + struct task_struct *tsk = current;
47997 + struct mm_struct *mm = current->mm;
47998 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
47999 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48000 + char *path_exec = NULL;
48001 + char *path_fault = NULL;
48002 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
48003 + siginfo_t info = { };
48004 +
48005 + if (buffer_exec && buffer_fault) {
48006 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48007 +
48008 + down_read(&mm->mmap_sem);
48009 + vma = mm->mmap;
48010 + while (vma && (!vma_exec || !vma_fault)) {
48011 + if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
48012 + vma_exec = vma;
48013 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48014 + vma_fault = vma;
48015 + vma = vma->vm_next;
48016 + }
48017 + if (vma_exec) {
48018 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48019 + if (IS_ERR(path_exec))
48020 + path_exec = "<path too long>";
48021 + else {
48022 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48023 + if (path_exec) {
48024 + *path_exec = 0;
48025 + path_exec = buffer_exec;
48026 + } else
48027 + path_exec = "<path too long>";
48028 + }
48029 + }
48030 + if (vma_fault) {
48031 + start = vma_fault->vm_start;
48032 + end = vma_fault->vm_end;
48033 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48034 + if (vma_fault->vm_file) {
48035 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48036 + if (IS_ERR(path_fault))
48037 + path_fault = "<path too long>";
48038 + else {
48039 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48040 + if (path_fault) {
48041 + *path_fault = 0;
48042 + path_fault = buffer_fault;
48043 + } else
48044 + path_fault = "<path too long>";
48045 + }
48046 + } else
48047 + path_fault = "<anonymous mapping>";
48048 + }
48049 + up_read(&mm->mmap_sem);
48050 + }
48051 + if (tsk->signal->curr_ip)
48052 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48053 + else
48054 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48055 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48056 + from_kuid(&init_user_ns, task_uid(tsk)), from_kuid(&init_user_ns, task_euid(tsk)), pc, sp);
48057 + free_page((unsigned long)buffer_exec);
48058 + free_page((unsigned long)buffer_fault);
48059 + pax_report_insns(regs, pc, sp);
48060 + info.si_signo = SIGKILL;
48061 + info.si_errno = 0;
48062 + info.si_code = SI_KERNEL;
48063 + info.si_pid = 0;
48064 + info.si_uid = 0;
48065 + do_coredump(&info);
48066 +}
48067 +#endif
48068 +
48069 +#ifdef CONFIG_PAX_REFCOUNT
48070 +void pax_report_refcount_overflow(struct pt_regs *regs)
48071 +{
48072 + if (current->signal->curr_ip)
48073 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48074 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
48075 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
48076 + else
48077 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
48078 + from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
48079 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48080 + show_regs(regs);
48081 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
48082 +}
48083 +#endif
48084 +
48085 +#ifdef CONFIG_PAX_USERCOPY
48086 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48087 +static noinline int check_stack_object(const void *obj, unsigned long len)
48088 +{
48089 + const void * const stack = task_stack_page(current);
48090 + const void * const stackend = stack + THREAD_SIZE;
48091 +
48092 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48093 + const void *frame = NULL;
48094 + const void *oldframe;
48095 +#endif
48096 +
48097 + if (obj + len < obj)
48098 + return -1;
48099 +
48100 + if (obj + len <= stack || stackend <= obj)
48101 + return 0;
48102 +
48103 + if (obj < stack || stackend < obj + len)
48104 + return -1;
48105 +
48106 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48107 + oldframe = __builtin_frame_address(1);
48108 + if (oldframe)
48109 + frame = __builtin_frame_address(2);
48110 + /*
48111 + low ----------------------------------------------> high
48112 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
48113 + ^----------------^
48114 + allow copies only within here
48115 + */
48116 + while (stack <= frame && frame < stackend) {
48117 + /* if obj + len extends past the last frame, this
48118 + check won't pass and the next frame will be 0,
48119 + causing us to bail out and correctly report
48120 + the copy as invalid
48121 + */
48122 + if (obj + len <= frame)
48123 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48124 + oldframe = frame;
48125 + frame = *(const void * const *)frame;
48126 + }
48127 + return -1;
48128 +#else
48129 + return 1;
48130 +#endif
48131 +}
48132 +
48133 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48134 +{
48135 + if (current->signal->curr_ip)
48136 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48137 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48138 + else
48139 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48140 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48141 + dump_stack();
48142 + gr_handle_kernel_exploit();
48143 + do_group_exit(SIGKILL);
48144 +}
48145 +#endif
48146 +
48147 +void __check_object_size(const void *ptr, unsigned long n, bool to)
48148 +{
48149 +
48150 +#ifdef CONFIG_PAX_USERCOPY
48151 + const char *type;
48152 +
48153 + if (!n)
48154 + return;
48155 +
48156 + type = check_heap_object(ptr, n);
48157 + if (!type) {
48158 + if (check_stack_object(ptr, n) != -1)
48159 + return;
48160 + type = "<process stack>";
48161 + }
48162 +
48163 + pax_report_usercopy(ptr, n, to, type);
48164 +#endif
48165 +
48166 +}
48167 +EXPORT_SYMBOL(__check_object_size);
48168 +
48169 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48170 +void pax_track_stack(void)
48171 +{
48172 + unsigned long sp = (unsigned long)&sp;
48173 + if (sp < current_thread_info()->lowest_stack &&
48174 + sp > (unsigned long)task_stack_page(current))
48175 + current_thread_info()->lowest_stack = sp;
48176 +}
48177 +EXPORT_SYMBOL(pax_track_stack);
48178 +#endif
48179 +
48180 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
48181 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
48182 +{
48183 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
48184 + dump_stack();
48185 + do_group_exit(SIGKILL);
48186 +}
48187 +EXPORT_SYMBOL(report_size_overflow);
48188 +#endif
48189 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
48190 index 2616d0e..2ffdec9 100644
48191 --- a/fs/ext2/balloc.c
48192 +++ b/fs/ext2/balloc.c
48193 @@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
48194
48195 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48196 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48197 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48198 + if (free_blocks < root_blocks + 1 &&
48199 !uid_eq(sbi->s_resuid, current_fsuid()) &&
48200 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
48201 - !in_group_p (sbi->s_resgid))) {
48202 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
48203 return 0;
48204 }
48205 return 1;
48206 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
48207 index 22548f5..41521d8 100644
48208 --- a/fs/ext3/balloc.c
48209 +++ b/fs/ext3/balloc.c
48210 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
48211
48212 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48213 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48214 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48215 + if (free_blocks < root_blocks + 1 &&
48216 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
48217 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
48218 - !in_group_p (sbi->s_resgid))) {
48219 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
48220 return 0;
48221 }
48222 return 1;
48223 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
48224 index 2f2e0da..89b113a 100644
48225 --- a/fs/ext4/balloc.c
48226 +++ b/fs/ext4/balloc.c
48227 @@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
48228 /* Hm, nope. Are (enough) root reserved clusters available? */
48229 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
48230 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
48231 - capable(CAP_SYS_RESOURCE) ||
48232 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
48233 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
48234 + capable_nolog(CAP_SYS_RESOURCE)) {
48235
48236 if (free_clusters >= (nclusters + dirty_clusters))
48237 return 1;
48238 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
48239 index 8462eb3..4a71af6 100644
48240 --- a/fs/ext4/ext4.h
48241 +++ b/fs/ext4/ext4.h
48242 @@ -1265,19 +1265,19 @@ struct ext4_sb_info {
48243 unsigned long s_mb_last_start;
48244
48245 /* stats for buddy allocator */
48246 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
48247 - atomic_t s_bal_success; /* we found long enough chunks */
48248 - atomic_t s_bal_allocated; /* in blocks */
48249 - atomic_t s_bal_ex_scanned; /* total extents scanned */
48250 - atomic_t s_bal_goals; /* goal hits */
48251 - atomic_t s_bal_breaks; /* too long searches */
48252 - atomic_t s_bal_2orders; /* 2^order hits */
48253 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
48254 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
48255 + atomic_unchecked_t s_bal_allocated; /* in blocks */
48256 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
48257 + atomic_unchecked_t s_bal_goals; /* goal hits */
48258 + atomic_unchecked_t s_bal_breaks; /* too long searches */
48259 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
48260 spinlock_t s_bal_lock;
48261 unsigned long s_mb_buddies_generated;
48262 unsigned long long s_mb_generation_time;
48263 - atomic_t s_mb_lost_chunks;
48264 - atomic_t s_mb_preallocated;
48265 - atomic_t s_mb_discarded;
48266 + atomic_unchecked_t s_mb_lost_chunks;
48267 + atomic_unchecked_t s_mb_preallocated;
48268 + atomic_unchecked_t s_mb_discarded;
48269 atomic_t s_lock_busy;
48270
48271 /* locality groups */
48272 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
48273 index 061727a..7622abf 100644
48274 --- a/fs/ext4/mballoc.c
48275 +++ b/fs/ext4/mballoc.c
48276 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
48277 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
48278
48279 if (EXT4_SB(sb)->s_mb_stats)
48280 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
48281 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
48282
48283 break;
48284 }
48285 @@ -2044,7 +2044,7 @@ repeat:
48286 ac->ac_status = AC_STATUS_CONTINUE;
48287 ac->ac_flags |= EXT4_MB_HINT_FIRST;
48288 cr = 3;
48289 - atomic_inc(&sbi->s_mb_lost_chunks);
48290 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
48291 goto repeat;
48292 }
48293 }
48294 @@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
48295 if (sbi->s_mb_stats) {
48296 ext4_msg(sb, KERN_INFO,
48297 "mballoc: %u blocks %u reqs (%u success)",
48298 - atomic_read(&sbi->s_bal_allocated),
48299 - atomic_read(&sbi->s_bal_reqs),
48300 - atomic_read(&sbi->s_bal_success));
48301 + atomic_read_unchecked(&sbi->s_bal_allocated),
48302 + atomic_read_unchecked(&sbi->s_bal_reqs),
48303 + atomic_read_unchecked(&sbi->s_bal_success));
48304 ext4_msg(sb, KERN_INFO,
48305 "mballoc: %u extents scanned, %u goal hits, "
48306 "%u 2^N hits, %u breaks, %u lost",
48307 - atomic_read(&sbi->s_bal_ex_scanned),
48308 - atomic_read(&sbi->s_bal_goals),
48309 - atomic_read(&sbi->s_bal_2orders),
48310 - atomic_read(&sbi->s_bal_breaks),
48311 - atomic_read(&sbi->s_mb_lost_chunks));
48312 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
48313 + atomic_read_unchecked(&sbi->s_bal_goals),
48314 + atomic_read_unchecked(&sbi->s_bal_2orders),
48315 + atomic_read_unchecked(&sbi->s_bal_breaks),
48316 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
48317 ext4_msg(sb, KERN_INFO,
48318 "mballoc: %lu generated and it took %Lu",
48319 sbi->s_mb_buddies_generated,
48320 sbi->s_mb_generation_time);
48321 ext4_msg(sb, KERN_INFO,
48322 "mballoc: %u preallocated, %u discarded",
48323 - atomic_read(&sbi->s_mb_preallocated),
48324 - atomic_read(&sbi->s_mb_discarded));
48325 + atomic_read_unchecked(&sbi->s_mb_preallocated),
48326 + atomic_read_unchecked(&sbi->s_mb_discarded));
48327 }
48328
48329 free_percpu(sbi->s_locality_groups);
48330 @@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
48331 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
48332
48333 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
48334 - atomic_inc(&sbi->s_bal_reqs);
48335 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48336 + atomic_inc_unchecked(&sbi->s_bal_reqs);
48337 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48338 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
48339 - atomic_inc(&sbi->s_bal_success);
48340 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
48341 + atomic_inc_unchecked(&sbi->s_bal_success);
48342 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
48343 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
48344 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
48345 - atomic_inc(&sbi->s_bal_goals);
48346 + atomic_inc_unchecked(&sbi->s_bal_goals);
48347 if (ac->ac_found > sbi->s_mb_max_to_scan)
48348 - atomic_inc(&sbi->s_bal_breaks);
48349 + atomic_inc_unchecked(&sbi->s_bal_breaks);
48350 }
48351
48352 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
48353 @@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
48354 trace_ext4_mb_new_inode_pa(ac, pa);
48355
48356 ext4_mb_use_inode_pa(ac, pa);
48357 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
48358 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
48359
48360 ei = EXT4_I(ac->ac_inode);
48361 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48362 @@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
48363 trace_ext4_mb_new_group_pa(ac, pa);
48364
48365 ext4_mb_use_group_pa(ac, pa);
48366 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48367 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48368
48369 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48370 lg = ac->ac_lg;
48371 @@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
48372 * from the bitmap and continue.
48373 */
48374 }
48375 - atomic_add(free, &sbi->s_mb_discarded);
48376 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
48377
48378 return err;
48379 }
48380 @@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
48381 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
48382 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
48383 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
48384 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48385 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48386 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
48387
48388 return 0;
48389 diff --git a/fs/fcntl.c b/fs/fcntl.c
48390 index 71a600a..20d87b1 100644
48391 --- a/fs/fcntl.c
48392 +++ b/fs/fcntl.c
48393 @@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
48394 if (err)
48395 return err;
48396
48397 + if (gr_handle_chroot_fowner(pid, type))
48398 + return -ENOENT;
48399 + if (gr_check_protected_task_fowner(pid, type))
48400 + return -EACCES;
48401 +
48402 f_modown(filp, pid, type, force);
48403 return 0;
48404 }
48405 diff --git a/fs/fhandle.c b/fs/fhandle.c
48406 index 999ff5c..41f4109 100644
48407 --- a/fs/fhandle.c
48408 +++ b/fs/fhandle.c
48409 @@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
48410 } else
48411 retval = 0;
48412 /* copy the mount id */
48413 - if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
48414 - sizeof(*mnt_id)) ||
48415 + if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
48416 copy_to_user(ufh, handle,
48417 sizeof(struct file_handle) + handle_bytes))
48418 retval = -EFAULT;
48419 diff --git a/fs/fifo.c b/fs/fifo.c
48420 index cf6f434..3d7942c 100644
48421 --- a/fs/fifo.c
48422 +++ b/fs/fifo.c
48423 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
48424 */
48425 filp->f_op = &read_pipefifo_fops;
48426 pipe->r_counter++;
48427 - if (pipe->readers++ == 0)
48428 + if (atomic_inc_return(&pipe->readers) == 1)
48429 wake_up_partner(inode);
48430
48431 - if (!pipe->writers) {
48432 + if (!atomic_read(&pipe->writers)) {
48433 if ((filp->f_flags & O_NONBLOCK)) {
48434 /* suppress POLLHUP until we have
48435 * seen a writer */
48436 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
48437 * errno=ENXIO when there is no process reading the FIFO.
48438 */
48439 ret = -ENXIO;
48440 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
48441 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
48442 goto err;
48443
48444 filp->f_op = &write_pipefifo_fops;
48445 pipe->w_counter++;
48446 - if (!pipe->writers++)
48447 + if (atomic_inc_return(&pipe->writers) == 1)
48448 wake_up_partner(inode);
48449
48450 - if (!pipe->readers) {
48451 + if (!atomic_read(&pipe->readers)) {
48452 if (wait_for_partner(inode, &pipe->r_counter))
48453 goto err_wr;
48454 }
48455 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
48456 */
48457 filp->f_op = &rdwr_pipefifo_fops;
48458
48459 - pipe->readers++;
48460 - pipe->writers++;
48461 + atomic_inc(&pipe->readers);
48462 + atomic_inc(&pipe->writers);
48463 pipe->r_counter++;
48464 pipe->w_counter++;
48465 - if (pipe->readers == 1 || pipe->writers == 1)
48466 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
48467 wake_up_partner(inode);
48468 break;
48469
48470 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
48471 return 0;
48472
48473 err_rd:
48474 - if (!--pipe->readers)
48475 + if (atomic_dec_and_test(&pipe->readers))
48476 wake_up_interruptible(&pipe->wait);
48477 ret = -ERESTARTSYS;
48478 goto err;
48479
48480 err_wr:
48481 - if (!--pipe->writers)
48482 + if (atomic_dec_and_test(&pipe->writers))
48483 wake_up_interruptible(&pipe->wait);
48484 ret = -ERESTARTSYS;
48485 goto err;
48486
48487 err:
48488 - if (!pipe->readers && !pipe->writers)
48489 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
48490 free_pipe_info(inode);
48491
48492 err_nocleanup:
48493 diff --git a/fs/file.c b/fs/file.c
48494 index 2b3570b..c57924b 100644
48495 --- a/fs/file.c
48496 +++ b/fs/file.c
48497 @@ -16,6 +16,7 @@
48498 #include <linux/slab.h>
48499 #include <linux/vmalloc.h>
48500 #include <linux/file.h>
48501 +#include <linux/security.h>
48502 #include <linux/fdtable.h>
48503 #include <linux/bitops.h>
48504 #include <linux/interrupt.h>
48505 @@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
48506 if (!file)
48507 return __close_fd(files, fd);
48508
48509 + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
48510 if (fd >= rlimit(RLIMIT_NOFILE))
48511 return -EBADF;
48512
48513 @@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
48514 if (unlikely(oldfd == newfd))
48515 return -EINVAL;
48516
48517 + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
48518 if (newfd >= rlimit(RLIMIT_NOFILE))
48519 return -EBADF;
48520
48521 @@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
48522 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
48523 {
48524 int err;
48525 + gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
48526 if (from >= rlimit(RLIMIT_NOFILE))
48527 return -EINVAL;
48528 err = alloc_fd(from, flags);
48529 diff --git a/fs/filesystems.c b/fs/filesystems.c
48530 index da165f6..3671bdb 100644
48531 --- a/fs/filesystems.c
48532 +++ b/fs/filesystems.c
48533 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
48534 int len = dot ? dot - name : strlen(name);
48535
48536 fs = __get_fs_type(name, len);
48537 +
48538 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
48539 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
48540 +#else
48541 if (!fs && (request_module("%.*s", len, name) == 0))
48542 +#endif
48543 fs = __get_fs_type(name, len);
48544
48545 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
48546 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
48547 index fe6ca58..65318cf 100644
48548 --- a/fs/fs_struct.c
48549 +++ b/fs/fs_struct.c
48550 @@ -4,6 +4,7 @@
48551 #include <linux/path.h>
48552 #include <linux/slab.h>
48553 #include <linux/fs_struct.h>
48554 +#include <linux/grsecurity.h>
48555 #include "internal.h"
48556
48557 /*
48558 @@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
48559 write_seqcount_begin(&fs->seq);
48560 old_root = fs->root;
48561 fs->root = *path;
48562 + gr_set_chroot_entries(current, path);
48563 write_seqcount_end(&fs->seq);
48564 spin_unlock(&fs->lock);
48565 if (old_root.dentry)
48566 @@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
48567 return 1;
48568 }
48569
48570 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
48571 +{
48572 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
48573 + return 0;
48574 + *p = *new;
48575 +
48576 + /* This function is only called from pivot_root(). Leave our
48577 + gr_chroot_dentry and is_chrooted flags as-is, so that a
48578 + pivoted root isn't treated as a chroot
48579 + */
48580 + //gr_set_chroot_entries(task, new);
48581 +
48582 + return 1;
48583 +}
48584 +
48585 void chroot_fs_refs(struct path *old_root, struct path *new_root)
48586 {
48587 struct task_struct *g, *p;
48588 @@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
48589 int hits = 0;
48590 spin_lock(&fs->lock);
48591 write_seqcount_begin(&fs->seq);
48592 - hits += replace_path(&fs->root, old_root, new_root);
48593 + hits += replace_root_path(p, &fs->root, old_root, new_root);
48594 hits += replace_path(&fs->pwd, old_root, new_root);
48595 write_seqcount_end(&fs->seq);
48596 while (hits--) {
48597 @@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
48598 task_lock(tsk);
48599 spin_lock(&fs->lock);
48600 tsk->fs = NULL;
48601 - kill = !--fs->users;
48602 + gr_clear_chroot_entries(tsk);
48603 + kill = !atomic_dec_return(&fs->users);
48604 spin_unlock(&fs->lock);
48605 task_unlock(tsk);
48606 if (kill)
48607 @@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
48608 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
48609 /* We don't need to lock fs - think why ;-) */
48610 if (fs) {
48611 - fs->users = 1;
48612 + atomic_set(&fs->users, 1);
48613 fs->in_exec = 0;
48614 spin_lock_init(&fs->lock);
48615 seqcount_init(&fs->seq);
48616 @@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
48617 spin_lock(&old->lock);
48618 fs->root = old->root;
48619 path_get(&fs->root);
48620 + /* instead of calling gr_set_chroot_entries here,
48621 + we call it from every caller of this function
48622 + */
48623 fs->pwd = old->pwd;
48624 path_get(&fs->pwd);
48625 spin_unlock(&old->lock);
48626 @@ -139,8 +160,9 @@ int unshare_fs_struct(void)
48627
48628 task_lock(current);
48629 spin_lock(&fs->lock);
48630 - kill = !--fs->users;
48631 + kill = !atomic_dec_return(&fs->users);
48632 current->fs = new_fs;
48633 + gr_set_chroot_entries(current, &new_fs->root);
48634 spin_unlock(&fs->lock);
48635 task_unlock(current);
48636
48637 @@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
48638
48639 int current_umask(void)
48640 {
48641 - return current->fs->umask;
48642 + return current->fs->umask | gr_acl_umask();
48643 }
48644 EXPORT_SYMBOL(current_umask);
48645
48646 /* to be mentioned only in INIT_TASK */
48647 struct fs_struct init_fs = {
48648 - .users = 1,
48649 + .users = ATOMIC_INIT(1),
48650 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
48651 .seq = SEQCNT_ZERO,
48652 .umask = 0022,
48653 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
48654 index 8dcb114..b1072e2 100644
48655 --- a/fs/fscache/cookie.c
48656 +++ b/fs/fscache/cookie.c
48657 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
48658 parent ? (char *) parent->def->name : "<no-parent>",
48659 def->name, netfs_data);
48660
48661 - fscache_stat(&fscache_n_acquires);
48662 + fscache_stat_unchecked(&fscache_n_acquires);
48663
48664 /* if there's no parent cookie, then we don't create one here either */
48665 if (!parent) {
48666 - fscache_stat(&fscache_n_acquires_null);
48667 + fscache_stat_unchecked(&fscache_n_acquires_null);
48668 _leave(" [no parent]");
48669 return NULL;
48670 }
48671 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
48672 /* allocate and initialise a cookie */
48673 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
48674 if (!cookie) {
48675 - fscache_stat(&fscache_n_acquires_oom);
48676 + fscache_stat_unchecked(&fscache_n_acquires_oom);
48677 _leave(" [ENOMEM]");
48678 return NULL;
48679 }
48680 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
48681
48682 switch (cookie->def->type) {
48683 case FSCACHE_COOKIE_TYPE_INDEX:
48684 - fscache_stat(&fscache_n_cookie_index);
48685 + fscache_stat_unchecked(&fscache_n_cookie_index);
48686 break;
48687 case FSCACHE_COOKIE_TYPE_DATAFILE:
48688 - fscache_stat(&fscache_n_cookie_data);
48689 + fscache_stat_unchecked(&fscache_n_cookie_data);
48690 break;
48691 default:
48692 - fscache_stat(&fscache_n_cookie_special);
48693 + fscache_stat_unchecked(&fscache_n_cookie_special);
48694 break;
48695 }
48696
48697 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
48698 if (fscache_acquire_non_index_cookie(cookie) < 0) {
48699 atomic_dec(&parent->n_children);
48700 __fscache_cookie_put(cookie);
48701 - fscache_stat(&fscache_n_acquires_nobufs);
48702 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
48703 _leave(" = NULL");
48704 return NULL;
48705 }
48706 }
48707
48708 - fscache_stat(&fscache_n_acquires_ok);
48709 + fscache_stat_unchecked(&fscache_n_acquires_ok);
48710 _leave(" = %p", cookie);
48711 return cookie;
48712 }
48713 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
48714 cache = fscache_select_cache_for_object(cookie->parent);
48715 if (!cache) {
48716 up_read(&fscache_addremove_sem);
48717 - fscache_stat(&fscache_n_acquires_no_cache);
48718 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
48719 _leave(" = -ENOMEDIUM [no cache]");
48720 return -ENOMEDIUM;
48721 }
48722 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
48723 object = cache->ops->alloc_object(cache, cookie);
48724 fscache_stat_d(&fscache_n_cop_alloc_object);
48725 if (IS_ERR(object)) {
48726 - fscache_stat(&fscache_n_object_no_alloc);
48727 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
48728 ret = PTR_ERR(object);
48729 goto error;
48730 }
48731
48732 - fscache_stat(&fscache_n_object_alloc);
48733 + fscache_stat_unchecked(&fscache_n_object_alloc);
48734
48735 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
48736
48737 @@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
48738
48739 _enter("{%s}", cookie->def->name);
48740
48741 - fscache_stat(&fscache_n_invalidates);
48742 + fscache_stat_unchecked(&fscache_n_invalidates);
48743
48744 /* Only permit invalidation of data files. Invalidating an index will
48745 * require the caller to release all its attachments to the tree rooted
48746 @@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
48747 struct fscache_object *object;
48748 struct hlist_node *_p;
48749
48750 - fscache_stat(&fscache_n_updates);
48751 + fscache_stat_unchecked(&fscache_n_updates);
48752
48753 if (!cookie) {
48754 - fscache_stat(&fscache_n_updates_null);
48755 + fscache_stat_unchecked(&fscache_n_updates_null);
48756 _leave(" [no cookie]");
48757 return;
48758 }
48759 @@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
48760 struct fscache_object *object;
48761 unsigned long event;
48762
48763 - fscache_stat(&fscache_n_relinquishes);
48764 + fscache_stat_unchecked(&fscache_n_relinquishes);
48765 if (retire)
48766 - fscache_stat(&fscache_n_relinquishes_retire);
48767 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
48768
48769 if (!cookie) {
48770 - fscache_stat(&fscache_n_relinquishes_null);
48771 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
48772 _leave(" [no cookie]");
48773 return;
48774 }
48775 @@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
48776
48777 /* wait for the cookie to finish being instantiated (or to fail) */
48778 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
48779 - fscache_stat(&fscache_n_relinquishes_waitcrt);
48780 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
48781 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
48782 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
48783 }
48784 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
48785 index ee38fef..0a326d4 100644
48786 --- a/fs/fscache/internal.h
48787 +++ b/fs/fscache/internal.h
48788 @@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
48789 * stats.c
48790 */
48791 #ifdef CONFIG_FSCACHE_STATS
48792 -extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
48793 -extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
48794 +extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
48795 +extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
48796
48797 -extern atomic_t fscache_n_op_pend;
48798 -extern atomic_t fscache_n_op_run;
48799 -extern atomic_t fscache_n_op_enqueue;
48800 -extern atomic_t fscache_n_op_deferred_release;
48801 -extern atomic_t fscache_n_op_release;
48802 -extern atomic_t fscache_n_op_gc;
48803 -extern atomic_t fscache_n_op_cancelled;
48804 -extern atomic_t fscache_n_op_rejected;
48805 +extern atomic_unchecked_t fscache_n_op_pend;
48806 +extern atomic_unchecked_t fscache_n_op_run;
48807 +extern atomic_unchecked_t fscache_n_op_enqueue;
48808 +extern atomic_unchecked_t fscache_n_op_deferred_release;
48809 +extern atomic_unchecked_t fscache_n_op_release;
48810 +extern atomic_unchecked_t fscache_n_op_gc;
48811 +extern atomic_unchecked_t fscache_n_op_cancelled;
48812 +extern atomic_unchecked_t fscache_n_op_rejected;
48813
48814 -extern atomic_t fscache_n_attr_changed;
48815 -extern atomic_t fscache_n_attr_changed_ok;
48816 -extern atomic_t fscache_n_attr_changed_nobufs;
48817 -extern atomic_t fscache_n_attr_changed_nomem;
48818 -extern atomic_t fscache_n_attr_changed_calls;
48819 +extern atomic_unchecked_t fscache_n_attr_changed;
48820 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
48821 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
48822 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
48823 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
48824
48825 -extern atomic_t fscache_n_allocs;
48826 -extern atomic_t fscache_n_allocs_ok;
48827 -extern atomic_t fscache_n_allocs_wait;
48828 -extern atomic_t fscache_n_allocs_nobufs;
48829 -extern atomic_t fscache_n_allocs_intr;
48830 -extern atomic_t fscache_n_allocs_object_dead;
48831 -extern atomic_t fscache_n_alloc_ops;
48832 -extern atomic_t fscache_n_alloc_op_waits;
48833 +extern atomic_unchecked_t fscache_n_allocs;
48834 +extern atomic_unchecked_t fscache_n_allocs_ok;
48835 +extern atomic_unchecked_t fscache_n_allocs_wait;
48836 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
48837 +extern atomic_unchecked_t fscache_n_allocs_intr;
48838 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
48839 +extern atomic_unchecked_t fscache_n_alloc_ops;
48840 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
48841
48842 -extern atomic_t fscache_n_retrievals;
48843 -extern atomic_t fscache_n_retrievals_ok;
48844 -extern atomic_t fscache_n_retrievals_wait;
48845 -extern atomic_t fscache_n_retrievals_nodata;
48846 -extern atomic_t fscache_n_retrievals_nobufs;
48847 -extern atomic_t fscache_n_retrievals_intr;
48848 -extern atomic_t fscache_n_retrievals_nomem;
48849 -extern atomic_t fscache_n_retrievals_object_dead;
48850 -extern atomic_t fscache_n_retrieval_ops;
48851 -extern atomic_t fscache_n_retrieval_op_waits;
48852 +extern atomic_unchecked_t fscache_n_retrievals;
48853 +extern atomic_unchecked_t fscache_n_retrievals_ok;
48854 +extern atomic_unchecked_t fscache_n_retrievals_wait;
48855 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
48856 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
48857 +extern atomic_unchecked_t fscache_n_retrievals_intr;
48858 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
48859 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
48860 +extern atomic_unchecked_t fscache_n_retrieval_ops;
48861 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
48862
48863 -extern atomic_t fscache_n_stores;
48864 -extern atomic_t fscache_n_stores_ok;
48865 -extern atomic_t fscache_n_stores_again;
48866 -extern atomic_t fscache_n_stores_nobufs;
48867 -extern atomic_t fscache_n_stores_oom;
48868 -extern atomic_t fscache_n_store_ops;
48869 -extern atomic_t fscache_n_store_calls;
48870 -extern atomic_t fscache_n_store_pages;
48871 -extern atomic_t fscache_n_store_radix_deletes;
48872 -extern atomic_t fscache_n_store_pages_over_limit;
48873 +extern atomic_unchecked_t fscache_n_stores;
48874 +extern atomic_unchecked_t fscache_n_stores_ok;
48875 +extern atomic_unchecked_t fscache_n_stores_again;
48876 +extern atomic_unchecked_t fscache_n_stores_nobufs;
48877 +extern atomic_unchecked_t fscache_n_stores_oom;
48878 +extern atomic_unchecked_t fscache_n_store_ops;
48879 +extern atomic_unchecked_t fscache_n_store_calls;
48880 +extern atomic_unchecked_t fscache_n_store_pages;
48881 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
48882 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
48883
48884 -extern atomic_t fscache_n_store_vmscan_not_storing;
48885 -extern atomic_t fscache_n_store_vmscan_gone;
48886 -extern atomic_t fscache_n_store_vmscan_busy;
48887 -extern atomic_t fscache_n_store_vmscan_cancelled;
48888 -extern atomic_t fscache_n_store_vmscan_wait;
48889 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
48890 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
48891 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
48892 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
48893 +extern atomic_unchecked_t fscache_n_store_vmscan_wait;
48894
48895 -extern atomic_t fscache_n_marks;
48896 -extern atomic_t fscache_n_uncaches;
48897 +extern atomic_unchecked_t fscache_n_marks;
48898 +extern atomic_unchecked_t fscache_n_uncaches;
48899
48900 -extern atomic_t fscache_n_acquires;
48901 -extern atomic_t fscache_n_acquires_null;
48902 -extern atomic_t fscache_n_acquires_no_cache;
48903 -extern atomic_t fscache_n_acquires_ok;
48904 -extern atomic_t fscache_n_acquires_nobufs;
48905 -extern atomic_t fscache_n_acquires_oom;
48906 +extern atomic_unchecked_t fscache_n_acquires;
48907 +extern atomic_unchecked_t fscache_n_acquires_null;
48908 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
48909 +extern atomic_unchecked_t fscache_n_acquires_ok;
48910 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
48911 +extern atomic_unchecked_t fscache_n_acquires_oom;
48912
48913 -extern atomic_t fscache_n_invalidates;
48914 -extern atomic_t fscache_n_invalidates_run;
48915 +extern atomic_unchecked_t fscache_n_invalidates;
48916 +extern atomic_unchecked_t fscache_n_invalidates_run;
48917
48918 -extern atomic_t fscache_n_updates;
48919 -extern atomic_t fscache_n_updates_null;
48920 -extern atomic_t fscache_n_updates_run;
48921 +extern atomic_unchecked_t fscache_n_updates;
48922 +extern atomic_unchecked_t fscache_n_updates_null;
48923 +extern atomic_unchecked_t fscache_n_updates_run;
48924
48925 -extern atomic_t fscache_n_relinquishes;
48926 -extern atomic_t fscache_n_relinquishes_null;
48927 -extern atomic_t fscache_n_relinquishes_waitcrt;
48928 -extern atomic_t fscache_n_relinquishes_retire;
48929 +extern atomic_unchecked_t fscache_n_relinquishes;
48930 +extern atomic_unchecked_t fscache_n_relinquishes_null;
48931 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
48932 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
48933
48934 -extern atomic_t fscache_n_cookie_index;
48935 -extern atomic_t fscache_n_cookie_data;
48936 -extern atomic_t fscache_n_cookie_special;
48937 +extern atomic_unchecked_t fscache_n_cookie_index;
48938 +extern atomic_unchecked_t fscache_n_cookie_data;
48939 +extern atomic_unchecked_t fscache_n_cookie_special;
48940
48941 -extern atomic_t fscache_n_object_alloc;
48942 -extern atomic_t fscache_n_object_no_alloc;
48943 -extern atomic_t fscache_n_object_lookups;
48944 -extern atomic_t fscache_n_object_lookups_negative;
48945 -extern atomic_t fscache_n_object_lookups_positive;
48946 -extern atomic_t fscache_n_object_lookups_timed_out;
48947 -extern atomic_t fscache_n_object_created;
48948 -extern atomic_t fscache_n_object_avail;
48949 -extern atomic_t fscache_n_object_dead;
48950 +extern atomic_unchecked_t fscache_n_object_alloc;
48951 +extern atomic_unchecked_t fscache_n_object_no_alloc;
48952 +extern atomic_unchecked_t fscache_n_object_lookups;
48953 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
48954 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
48955 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
48956 +extern atomic_unchecked_t fscache_n_object_created;
48957 +extern atomic_unchecked_t fscache_n_object_avail;
48958 +extern atomic_unchecked_t fscache_n_object_dead;
48959
48960 -extern atomic_t fscache_n_checkaux_none;
48961 -extern atomic_t fscache_n_checkaux_okay;
48962 -extern atomic_t fscache_n_checkaux_update;
48963 -extern atomic_t fscache_n_checkaux_obsolete;
48964 +extern atomic_unchecked_t fscache_n_checkaux_none;
48965 +extern atomic_unchecked_t fscache_n_checkaux_okay;
48966 +extern atomic_unchecked_t fscache_n_checkaux_update;
48967 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
48968
48969 extern atomic_t fscache_n_cop_alloc_object;
48970 extern atomic_t fscache_n_cop_lookup_object;
48971 @@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
48972 atomic_inc(stat);
48973 }
48974
48975 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
48976 +{
48977 + atomic_inc_unchecked(stat);
48978 +}
48979 +
48980 static inline void fscache_stat_d(atomic_t *stat)
48981 {
48982 atomic_dec(stat);
48983 @@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
48984
48985 #define __fscache_stat(stat) (NULL)
48986 #define fscache_stat(stat) do {} while (0)
48987 +#define fscache_stat_unchecked(stat) do {} while (0)
48988 #define fscache_stat_d(stat) do {} while (0)
48989 #endif
48990
48991 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
48992 index 50d41c1..10ee117 100644
48993 --- a/fs/fscache/object.c
48994 +++ b/fs/fscache/object.c
48995 @@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
48996 /* Invalidate an object on disk */
48997 case FSCACHE_OBJECT_INVALIDATING:
48998 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
48999 - fscache_stat(&fscache_n_invalidates_run);
49000 + fscache_stat_unchecked(&fscache_n_invalidates_run);
49001 fscache_stat(&fscache_n_cop_invalidate_object);
49002 fscache_invalidate_object(object);
49003 fscache_stat_d(&fscache_n_cop_invalidate_object);
49004 @@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49005 /* update the object metadata on disk */
49006 case FSCACHE_OBJECT_UPDATING:
49007 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49008 - fscache_stat(&fscache_n_updates_run);
49009 + fscache_stat_unchecked(&fscache_n_updates_run);
49010 fscache_stat(&fscache_n_cop_update_object);
49011 object->cache->ops->update_object(object);
49012 fscache_stat_d(&fscache_n_cop_update_object);
49013 @@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49014 spin_lock(&object->lock);
49015 object->state = FSCACHE_OBJECT_DEAD;
49016 spin_unlock(&object->lock);
49017 - fscache_stat(&fscache_n_object_dead);
49018 + fscache_stat_unchecked(&fscache_n_object_dead);
49019 goto terminal_transit;
49020
49021 /* handle the parent cache of this object being withdrawn from
49022 @@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49023 spin_lock(&object->lock);
49024 object->state = FSCACHE_OBJECT_DEAD;
49025 spin_unlock(&object->lock);
49026 - fscache_stat(&fscache_n_object_dead);
49027 + fscache_stat_unchecked(&fscache_n_object_dead);
49028 goto terminal_transit;
49029
49030 /* complain about the object being woken up once it is
49031 @@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49032 parent->cookie->def->name, cookie->def->name,
49033 object->cache->tag->name);
49034
49035 - fscache_stat(&fscache_n_object_lookups);
49036 + fscache_stat_unchecked(&fscache_n_object_lookups);
49037 fscache_stat(&fscache_n_cop_lookup_object);
49038 ret = object->cache->ops->lookup_object(object);
49039 fscache_stat_d(&fscache_n_cop_lookup_object);
49040 @@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49041 if (ret == -ETIMEDOUT) {
49042 /* probably stuck behind another object, so move this one to
49043 * the back of the queue */
49044 - fscache_stat(&fscache_n_object_lookups_timed_out);
49045 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49046 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49047 }
49048
49049 @@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49050
49051 spin_lock(&object->lock);
49052 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49053 - fscache_stat(&fscache_n_object_lookups_negative);
49054 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49055
49056 /* transit here to allow write requests to begin stacking up
49057 * and read requests to begin returning ENODATA */
49058 @@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
49059 * result, in which case there may be data available */
49060 spin_lock(&object->lock);
49061 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49062 - fscache_stat(&fscache_n_object_lookups_positive);
49063 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49064
49065 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49066
49067 @@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
49068 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49069 } else {
49070 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49071 - fscache_stat(&fscache_n_object_created);
49072 + fscache_stat_unchecked(&fscache_n_object_created);
49073
49074 object->state = FSCACHE_OBJECT_AVAILABLE;
49075 spin_unlock(&object->lock);
49076 @@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
49077 fscache_enqueue_dependents(object);
49078
49079 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49080 - fscache_stat(&fscache_n_object_avail);
49081 + fscache_stat_unchecked(&fscache_n_object_avail);
49082
49083 _leave("");
49084 }
49085 @@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49086 enum fscache_checkaux result;
49087
49088 if (!object->cookie->def->check_aux) {
49089 - fscache_stat(&fscache_n_checkaux_none);
49090 + fscache_stat_unchecked(&fscache_n_checkaux_none);
49091 return FSCACHE_CHECKAUX_OKAY;
49092 }
49093
49094 @@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49095 switch (result) {
49096 /* entry okay as is */
49097 case FSCACHE_CHECKAUX_OKAY:
49098 - fscache_stat(&fscache_n_checkaux_okay);
49099 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
49100 break;
49101
49102 /* entry requires update */
49103 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49104 - fscache_stat(&fscache_n_checkaux_update);
49105 + fscache_stat_unchecked(&fscache_n_checkaux_update);
49106 break;
49107
49108 /* entry requires deletion */
49109 case FSCACHE_CHECKAUX_OBSOLETE:
49110 - fscache_stat(&fscache_n_checkaux_obsolete);
49111 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49112 break;
49113
49114 default:
49115 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49116 index 762a9ec..2023284 100644
49117 --- a/fs/fscache/operation.c
49118 +++ b/fs/fscache/operation.c
49119 @@ -17,7 +17,7 @@
49120 #include <linux/slab.h>
49121 #include "internal.h"
49122
49123 -atomic_t fscache_op_debug_id;
49124 +atomic_unchecked_t fscache_op_debug_id;
49125 EXPORT_SYMBOL(fscache_op_debug_id);
49126
49127 /**
49128 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49129 ASSERTCMP(atomic_read(&op->usage), >, 0);
49130 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
49131
49132 - fscache_stat(&fscache_n_op_enqueue);
49133 + fscache_stat_unchecked(&fscache_n_op_enqueue);
49134 switch (op->flags & FSCACHE_OP_TYPE) {
49135 case FSCACHE_OP_ASYNC:
49136 _debug("queue async");
49137 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
49138 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49139 if (op->processor)
49140 fscache_enqueue_operation(op);
49141 - fscache_stat(&fscache_n_op_run);
49142 + fscache_stat_unchecked(&fscache_n_op_run);
49143 }
49144
49145 /*
49146 @@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49147 if (object->n_in_progress > 0) {
49148 atomic_inc(&op->usage);
49149 list_add_tail(&op->pend_link, &object->pending_ops);
49150 - fscache_stat(&fscache_n_op_pend);
49151 + fscache_stat_unchecked(&fscache_n_op_pend);
49152 } else if (!list_empty(&object->pending_ops)) {
49153 atomic_inc(&op->usage);
49154 list_add_tail(&op->pend_link, &object->pending_ops);
49155 - fscache_stat(&fscache_n_op_pend);
49156 + fscache_stat_unchecked(&fscache_n_op_pend);
49157 fscache_start_operations(object);
49158 } else {
49159 ASSERTCMP(object->n_in_progress, ==, 0);
49160 @@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49161 object->n_exclusive++; /* reads and writes must wait */
49162 atomic_inc(&op->usage);
49163 list_add_tail(&op->pend_link, &object->pending_ops);
49164 - fscache_stat(&fscache_n_op_pend);
49165 + fscache_stat_unchecked(&fscache_n_op_pend);
49166 ret = 0;
49167 } else {
49168 /* If we're in any other state, there must have been an I/O
49169 @@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
49170 if (object->n_exclusive > 0) {
49171 atomic_inc(&op->usage);
49172 list_add_tail(&op->pend_link, &object->pending_ops);
49173 - fscache_stat(&fscache_n_op_pend);
49174 + fscache_stat_unchecked(&fscache_n_op_pend);
49175 } else if (!list_empty(&object->pending_ops)) {
49176 atomic_inc(&op->usage);
49177 list_add_tail(&op->pend_link, &object->pending_ops);
49178 - fscache_stat(&fscache_n_op_pend);
49179 + fscache_stat_unchecked(&fscache_n_op_pend);
49180 fscache_start_operations(object);
49181 } else {
49182 ASSERTCMP(object->n_exclusive, ==, 0);
49183 @@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
49184 object->n_ops++;
49185 atomic_inc(&op->usage);
49186 list_add_tail(&op->pend_link, &object->pending_ops);
49187 - fscache_stat(&fscache_n_op_pend);
49188 + fscache_stat_unchecked(&fscache_n_op_pend);
49189 ret = 0;
49190 } else if (object->state == FSCACHE_OBJECT_DYING ||
49191 object->state == FSCACHE_OBJECT_LC_DYING ||
49192 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49193 - fscache_stat(&fscache_n_op_rejected);
49194 + fscache_stat_unchecked(&fscache_n_op_rejected);
49195 op->state = FSCACHE_OP_ST_CANCELLED;
49196 ret = -ENOBUFS;
49197 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49198 @@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
49199 ret = -EBUSY;
49200 if (op->state == FSCACHE_OP_ST_PENDING) {
49201 ASSERT(!list_empty(&op->pend_link));
49202 - fscache_stat(&fscache_n_op_cancelled);
49203 + fscache_stat_unchecked(&fscache_n_op_cancelled);
49204 list_del_init(&op->pend_link);
49205 if (do_cancel)
49206 do_cancel(op);
49207 @@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
49208 while (!list_empty(&object->pending_ops)) {
49209 op = list_entry(object->pending_ops.next,
49210 struct fscache_operation, pend_link);
49211 - fscache_stat(&fscache_n_op_cancelled);
49212 + fscache_stat_unchecked(&fscache_n_op_cancelled);
49213 list_del_init(&op->pend_link);
49214
49215 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
49216 @@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
49217 op->state, ==, FSCACHE_OP_ST_CANCELLED);
49218 op->state = FSCACHE_OP_ST_DEAD;
49219
49220 - fscache_stat(&fscache_n_op_release);
49221 + fscache_stat_unchecked(&fscache_n_op_release);
49222
49223 if (op->release) {
49224 op->release(op);
49225 @@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
49226 * lock, and defer it otherwise */
49227 if (!spin_trylock(&object->lock)) {
49228 _debug("defer put");
49229 - fscache_stat(&fscache_n_op_deferred_release);
49230 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
49231
49232 cache = object->cache;
49233 spin_lock(&cache->op_gc_list_lock);
49234 @@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
49235
49236 _debug("GC DEFERRED REL OBJ%x OP%x",
49237 object->debug_id, op->debug_id);
49238 - fscache_stat(&fscache_n_op_gc);
49239 + fscache_stat_unchecked(&fscache_n_op_gc);
49240
49241 ASSERTCMP(atomic_read(&op->usage), ==, 0);
49242 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
49243 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
49244 index ff000e5..c44ec6d 100644
49245 --- a/fs/fscache/page.c
49246 +++ b/fs/fscache/page.c
49247 @@ -61,7 +61,7 @@ try_again:
49248 val = radix_tree_lookup(&cookie->stores, page->index);
49249 if (!val) {
49250 rcu_read_unlock();
49251 - fscache_stat(&fscache_n_store_vmscan_not_storing);
49252 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
49253 __fscache_uncache_page(cookie, page);
49254 return true;
49255 }
49256 @@ -91,11 +91,11 @@ try_again:
49257 spin_unlock(&cookie->stores_lock);
49258
49259 if (xpage) {
49260 - fscache_stat(&fscache_n_store_vmscan_cancelled);
49261 - fscache_stat(&fscache_n_store_radix_deletes);
49262 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
49263 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49264 ASSERTCMP(xpage, ==, page);
49265 } else {
49266 - fscache_stat(&fscache_n_store_vmscan_gone);
49267 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
49268 }
49269
49270 wake_up_bit(&cookie->flags, 0);
49271 @@ -110,11 +110,11 @@ page_busy:
49272 * sleeping on memory allocation, so we may need to impose a timeout
49273 * too. */
49274 if (!(gfp & __GFP_WAIT)) {
49275 - fscache_stat(&fscache_n_store_vmscan_busy);
49276 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
49277 return false;
49278 }
49279
49280 - fscache_stat(&fscache_n_store_vmscan_wait);
49281 + fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
49282 __fscache_wait_on_page_write(cookie, page);
49283 gfp &= ~__GFP_WAIT;
49284 goto try_again;
49285 @@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
49286 FSCACHE_COOKIE_STORING_TAG);
49287 if (!radix_tree_tag_get(&cookie->stores, page->index,
49288 FSCACHE_COOKIE_PENDING_TAG)) {
49289 - fscache_stat(&fscache_n_store_radix_deletes);
49290 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49291 xpage = radix_tree_delete(&cookie->stores, page->index);
49292 }
49293 spin_unlock(&cookie->stores_lock);
49294 @@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
49295
49296 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
49297
49298 - fscache_stat(&fscache_n_attr_changed_calls);
49299 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
49300
49301 if (fscache_object_is_active(object)) {
49302 fscache_stat(&fscache_n_cop_attr_changed);
49303 @@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49304
49305 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49306
49307 - fscache_stat(&fscache_n_attr_changed);
49308 + fscache_stat_unchecked(&fscache_n_attr_changed);
49309
49310 op = kzalloc(sizeof(*op), GFP_KERNEL);
49311 if (!op) {
49312 - fscache_stat(&fscache_n_attr_changed_nomem);
49313 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
49314 _leave(" = -ENOMEM");
49315 return -ENOMEM;
49316 }
49317 @@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49318 if (fscache_submit_exclusive_op(object, op) < 0)
49319 goto nobufs;
49320 spin_unlock(&cookie->lock);
49321 - fscache_stat(&fscache_n_attr_changed_ok);
49322 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
49323 fscache_put_operation(op);
49324 _leave(" = 0");
49325 return 0;
49326 @@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49327 nobufs:
49328 spin_unlock(&cookie->lock);
49329 kfree(op);
49330 - fscache_stat(&fscache_n_attr_changed_nobufs);
49331 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
49332 _leave(" = %d", -ENOBUFS);
49333 return -ENOBUFS;
49334 }
49335 @@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
49336 /* allocate a retrieval operation and attempt to submit it */
49337 op = kzalloc(sizeof(*op), GFP_NOIO);
49338 if (!op) {
49339 - fscache_stat(&fscache_n_retrievals_nomem);
49340 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49341 return NULL;
49342 }
49343
49344 @@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49345 return 0;
49346 }
49347
49348 - fscache_stat(&fscache_n_retrievals_wait);
49349 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
49350
49351 jif = jiffies;
49352 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
49353 fscache_wait_bit_interruptible,
49354 TASK_INTERRUPTIBLE) != 0) {
49355 - fscache_stat(&fscache_n_retrievals_intr);
49356 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
49357 _leave(" = -ERESTARTSYS");
49358 return -ERESTARTSYS;
49359 }
49360 @@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
49361 */
49362 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49363 struct fscache_retrieval *op,
49364 - atomic_t *stat_op_waits,
49365 - atomic_t *stat_object_dead)
49366 + atomic_unchecked_t *stat_op_waits,
49367 + atomic_unchecked_t *stat_object_dead)
49368 {
49369 int ret;
49370
49371 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49372 goto check_if_dead;
49373
49374 _debug(">>> WT");
49375 - fscache_stat(stat_op_waits);
49376 + fscache_stat_unchecked(stat_op_waits);
49377 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
49378 fscache_wait_bit_interruptible,
49379 TASK_INTERRUPTIBLE) != 0) {
49380 @@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49381
49382 check_if_dead:
49383 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
49384 - fscache_stat(stat_object_dead);
49385 + fscache_stat_unchecked(stat_object_dead);
49386 _leave(" = -ENOBUFS [cancelled]");
49387 return -ENOBUFS;
49388 }
49389 if (unlikely(fscache_object_is_dead(object))) {
49390 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
49391 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
49392 - fscache_stat(stat_object_dead);
49393 + fscache_stat_unchecked(stat_object_dead);
49394 return -ENOBUFS;
49395 }
49396 return 0;
49397 @@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49398
49399 _enter("%p,%p,,,", cookie, page);
49400
49401 - fscache_stat(&fscache_n_retrievals);
49402 + fscache_stat_unchecked(&fscache_n_retrievals);
49403
49404 if (hlist_empty(&cookie->backing_objects))
49405 goto nobufs;
49406 @@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49407 goto nobufs_unlock_dec;
49408 spin_unlock(&cookie->lock);
49409
49410 - fscache_stat(&fscache_n_retrieval_ops);
49411 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
49412
49413 /* pin the netfs read context in case we need to do the actual netfs
49414 * read because we've encountered a cache read failure */
49415 @@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49416
49417 error:
49418 if (ret == -ENOMEM)
49419 - fscache_stat(&fscache_n_retrievals_nomem);
49420 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49421 else if (ret == -ERESTARTSYS)
49422 - fscache_stat(&fscache_n_retrievals_intr);
49423 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
49424 else if (ret == -ENODATA)
49425 - fscache_stat(&fscache_n_retrievals_nodata);
49426 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49427 else if (ret < 0)
49428 - fscache_stat(&fscache_n_retrievals_nobufs);
49429 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49430 else
49431 - fscache_stat(&fscache_n_retrievals_ok);
49432 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
49433
49434 fscache_put_retrieval(op);
49435 _leave(" = %d", ret);
49436 @@ -467,7 +467,7 @@ nobufs_unlock:
49437 spin_unlock(&cookie->lock);
49438 kfree(op);
49439 nobufs:
49440 - fscache_stat(&fscache_n_retrievals_nobufs);
49441 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49442 _leave(" = -ENOBUFS");
49443 return -ENOBUFS;
49444 }
49445 @@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49446
49447 _enter("%p,,%d,,,", cookie, *nr_pages);
49448
49449 - fscache_stat(&fscache_n_retrievals);
49450 + fscache_stat_unchecked(&fscache_n_retrievals);
49451
49452 if (hlist_empty(&cookie->backing_objects))
49453 goto nobufs;
49454 @@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49455 goto nobufs_unlock_dec;
49456 spin_unlock(&cookie->lock);
49457
49458 - fscache_stat(&fscache_n_retrieval_ops);
49459 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
49460
49461 /* pin the netfs read context in case we need to do the actual netfs
49462 * read because we've encountered a cache read failure */
49463 @@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49464
49465 error:
49466 if (ret == -ENOMEM)
49467 - fscache_stat(&fscache_n_retrievals_nomem);
49468 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49469 else if (ret == -ERESTARTSYS)
49470 - fscache_stat(&fscache_n_retrievals_intr);
49471 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
49472 else if (ret == -ENODATA)
49473 - fscache_stat(&fscache_n_retrievals_nodata);
49474 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49475 else if (ret < 0)
49476 - fscache_stat(&fscache_n_retrievals_nobufs);
49477 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49478 else
49479 - fscache_stat(&fscache_n_retrievals_ok);
49480 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
49481
49482 fscache_put_retrieval(op);
49483 _leave(" = %d", ret);
49484 @@ -591,7 +591,7 @@ nobufs_unlock:
49485 spin_unlock(&cookie->lock);
49486 kfree(op);
49487 nobufs:
49488 - fscache_stat(&fscache_n_retrievals_nobufs);
49489 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49490 _leave(" = -ENOBUFS");
49491 return -ENOBUFS;
49492 }
49493 @@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49494
49495 _enter("%p,%p,,,", cookie, page);
49496
49497 - fscache_stat(&fscache_n_allocs);
49498 + fscache_stat_unchecked(&fscache_n_allocs);
49499
49500 if (hlist_empty(&cookie->backing_objects))
49501 goto nobufs;
49502 @@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49503 goto nobufs_unlock;
49504 spin_unlock(&cookie->lock);
49505
49506 - fscache_stat(&fscache_n_alloc_ops);
49507 + fscache_stat_unchecked(&fscache_n_alloc_ops);
49508
49509 ret = fscache_wait_for_retrieval_activation(
49510 object, op,
49511 @@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49512
49513 error:
49514 if (ret == -ERESTARTSYS)
49515 - fscache_stat(&fscache_n_allocs_intr);
49516 + fscache_stat_unchecked(&fscache_n_allocs_intr);
49517 else if (ret < 0)
49518 - fscache_stat(&fscache_n_allocs_nobufs);
49519 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49520 else
49521 - fscache_stat(&fscache_n_allocs_ok);
49522 + fscache_stat_unchecked(&fscache_n_allocs_ok);
49523
49524 fscache_put_retrieval(op);
49525 _leave(" = %d", ret);
49526 @@ -677,7 +677,7 @@ nobufs_unlock:
49527 spin_unlock(&cookie->lock);
49528 kfree(op);
49529 nobufs:
49530 - fscache_stat(&fscache_n_allocs_nobufs);
49531 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49532 _leave(" = -ENOBUFS");
49533 return -ENOBUFS;
49534 }
49535 @@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49536
49537 spin_lock(&cookie->stores_lock);
49538
49539 - fscache_stat(&fscache_n_store_calls);
49540 + fscache_stat_unchecked(&fscache_n_store_calls);
49541
49542 /* find a page to store */
49543 page = NULL;
49544 @@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49545 page = results[0];
49546 _debug("gang %d [%lx]", n, page->index);
49547 if (page->index > op->store_limit) {
49548 - fscache_stat(&fscache_n_store_pages_over_limit);
49549 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
49550 goto superseded;
49551 }
49552
49553 @@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49554 spin_unlock(&cookie->stores_lock);
49555 spin_unlock(&object->lock);
49556
49557 - fscache_stat(&fscache_n_store_pages);
49558 + fscache_stat_unchecked(&fscache_n_store_pages);
49559 fscache_stat(&fscache_n_cop_write_page);
49560 ret = object->cache->ops->write_page(op, page);
49561 fscache_stat_d(&fscache_n_cop_write_page);
49562 @@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49563 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49564 ASSERT(PageFsCache(page));
49565
49566 - fscache_stat(&fscache_n_stores);
49567 + fscache_stat_unchecked(&fscache_n_stores);
49568
49569 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
49570 _leave(" = -ENOBUFS [invalidating]");
49571 @@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49572 spin_unlock(&cookie->stores_lock);
49573 spin_unlock(&object->lock);
49574
49575 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
49576 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
49577 op->store_limit = object->store_limit;
49578
49579 if (fscache_submit_op(object, &op->op) < 0)
49580 @@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49581
49582 spin_unlock(&cookie->lock);
49583 radix_tree_preload_end();
49584 - fscache_stat(&fscache_n_store_ops);
49585 - fscache_stat(&fscache_n_stores_ok);
49586 + fscache_stat_unchecked(&fscache_n_store_ops);
49587 + fscache_stat_unchecked(&fscache_n_stores_ok);
49588
49589 /* the work queue now carries its own ref on the object */
49590 fscache_put_operation(&op->op);
49591 @@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49592 return 0;
49593
49594 already_queued:
49595 - fscache_stat(&fscache_n_stores_again);
49596 + fscache_stat_unchecked(&fscache_n_stores_again);
49597 already_pending:
49598 spin_unlock(&cookie->stores_lock);
49599 spin_unlock(&object->lock);
49600 spin_unlock(&cookie->lock);
49601 radix_tree_preload_end();
49602 kfree(op);
49603 - fscache_stat(&fscache_n_stores_ok);
49604 + fscache_stat_unchecked(&fscache_n_stores_ok);
49605 _leave(" = 0");
49606 return 0;
49607
49608 @@ -959,14 +959,14 @@ nobufs:
49609 spin_unlock(&cookie->lock);
49610 radix_tree_preload_end();
49611 kfree(op);
49612 - fscache_stat(&fscache_n_stores_nobufs);
49613 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
49614 _leave(" = -ENOBUFS");
49615 return -ENOBUFS;
49616
49617 nomem_free:
49618 kfree(op);
49619 nomem:
49620 - fscache_stat(&fscache_n_stores_oom);
49621 + fscache_stat_unchecked(&fscache_n_stores_oom);
49622 _leave(" = -ENOMEM");
49623 return -ENOMEM;
49624 }
49625 @@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
49626 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49627 ASSERTCMP(page, !=, NULL);
49628
49629 - fscache_stat(&fscache_n_uncaches);
49630 + fscache_stat_unchecked(&fscache_n_uncaches);
49631
49632 /* cache withdrawal may beat us to it */
49633 if (!PageFsCache(page))
49634 @@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
49635 struct fscache_cookie *cookie = op->op.object->cookie;
49636
49637 #ifdef CONFIG_FSCACHE_STATS
49638 - atomic_inc(&fscache_n_marks);
49639 + atomic_inc_unchecked(&fscache_n_marks);
49640 #endif
49641
49642 _debug("- mark %p{%lx}", page, page->index);
49643 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
49644 index 8179e8b..5072cc7 100644
49645 --- a/fs/fscache/stats.c
49646 +++ b/fs/fscache/stats.c
49647 @@ -18,99 +18,99 @@
49648 /*
49649 * operation counters
49650 */
49651 -atomic_t fscache_n_op_pend;
49652 -atomic_t fscache_n_op_run;
49653 -atomic_t fscache_n_op_enqueue;
49654 -atomic_t fscache_n_op_requeue;
49655 -atomic_t fscache_n_op_deferred_release;
49656 -atomic_t fscache_n_op_release;
49657 -atomic_t fscache_n_op_gc;
49658 -atomic_t fscache_n_op_cancelled;
49659 -atomic_t fscache_n_op_rejected;
49660 +atomic_unchecked_t fscache_n_op_pend;
49661 +atomic_unchecked_t fscache_n_op_run;
49662 +atomic_unchecked_t fscache_n_op_enqueue;
49663 +atomic_unchecked_t fscache_n_op_requeue;
49664 +atomic_unchecked_t fscache_n_op_deferred_release;
49665 +atomic_unchecked_t fscache_n_op_release;
49666 +atomic_unchecked_t fscache_n_op_gc;
49667 +atomic_unchecked_t fscache_n_op_cancelled;
49668 +atomic_unchecked_t fscache_n_op_rejected;
49669
49670 -atomic_t fscache_n_attr_changed;
49671 -atomic_t fscache_n_attr_changed_ok;
49672 -atomic_t fscache_n_attr_changed_nobufs;
49673 -atomic_t fscache_n_attr_changed_nomem;
49674 -atomic_t fscache_n_attr_changed_calls;
49675 +atomic_unchecked_t fscache_n_attr_changed;
49676 +atomic_unchecked_t fscache_n_attr_changed_ok;
49677 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
49678 +atomic_unchecked_t fscache_n_attr_changed_nomem;
49679 +atomic_unchecked_t fscache_n_attr_changed_calls;
49680
49681 -atomic_t fscache_n_allocs;
49682 -atomic_t fscache_n_allocs_ok;
49683 -atomic_t fscache_n_allocs_wait;
49684 -atomic_t fscache_n_allocs_nobufs;
49685 -atomic_t fscache_n_allocs_intr;
49686 -atomic_t fscache_n_allocs_object_dead;
49687 -atomic_t fscache_n_alloc_ops;
49688 -atomic_t fscache_n_alloc_op_waits;
49689 +atomic_unchecked_t fscache_n_allocs;
49690 +atomic_unchecked_t fscache_n_allocs_ok;
49691 +atomic_unchecked_t fscache_n_allocs_wait;
49692 +atomic_unchecked_t fscache_n_allocs_nobufs;
49693 +atomic_unchecked_t fscache_n_allocs_intr;
49694 +atomic_unchecked_t fscache_n_allocs_object_dead;
49695 +atomic_unchecked_t fscache_n_alloc_ops;
49696 +atomic_unchecked_t fscache_n_alloc_op_waits;
49697
49698 -atomic_t fscache_n_retrievals;
49699 -atomic_t fscache_n_retrievals_ok;
49700 -atomic_t fscache_n_retrievals_wait;
49701 -atomic_t fscache_n_retrievals_nodata;
49702 -atomic_t fscache_n_retrievals_nobufs;
49703 -atomic_t fscache_n_retrievals_intr;
49704 -atomic_t fscache_n_retrievals_nomem;
49705 -atomic_t fscache_n_retrievals_object_dead;
49706 -atomic_t fscache_n_retrieval_ops;
49707 -atomic_t fscache_n_retrieval_op_waits;
49708 +atomic_unchecked_t fscache_n_retrievals;
49709 +atomic_unchecked_t fscache_n_retrievals_ok;
49710 +atomic_unchecked_t fscache_n_retrievals_wait;
49711 +atomic_unchecked_t fscache_n_retrievals_nodata;
49712 +atomic_unchecked_t fscache_n_retrievals_nobufs;
49713 +atomic_unchecked_t fscache_n_retrievals_intr;
49714 +atomic_unchecked_t fscache_n_retrievals_nomem;
49715 +atomic_unchecked_t fscache_n_retrievals_object_dead;
49716 +atomic_unchecked_t fscache_n_retrieval_ops;
49717 +atomic_unchecked_t fscache_n_retrieval_op_waits;
49718
49719 -atomic_t fscache_n_stores;
49720 -atomic_t fscache_n_stores_ok;
49721 -atomic_t fscache_n_stores_again;
49722 -atomic_t fscache_n_stores_nobufs;
49723 -atomic_t fscache_n_stores_oom;
49724 -atomic_t fscache_n_store_ops;
49725 -atomic_t fscache_n_store_calls;
49726 -atomic_t fscache_n_store_pages;
49727 -atomic_t fscache_n_store_radix_deletes;
49728 -atomic_t fscache_n_store_pages_over_limit;
49729 +atomic_unchecked_t fscache_n_stores;
49730 +atomic_unchecked_t fscache_n_stores_ok;
49731 +atomic_unchecked_t fscache_n_stores_again;
49732 +atomic_unchecked_t fscache_n_stores_nobufs;
49733 +atomic_unchecked_t fscache_n_stores_oom;
49734 +atomic_unchecked_t fscache_n_store_ops;
49735 +atomic_unchecked_t fscache_n_store_calls;
49736 +atomic_unchecked_t fscache_n_store_pages;
49737 +atomic_unchecked_t fscache_n_store_radix_deletes;
49738 +atomic_unchecked_t fscache_n_store_pages_over_limit;
49739
49740 -atomic_t fscache_n_store_vmscan_not_storing;
49741 -atomic_t fscache_n_store_vmscan_gone;
49742 -atomic_t fscache_n_store_vmscan_busy;
49743 -atomic_t fscache_n_store_vmscan_cancelled;
49744 -atomic_t fscache_n_store_vmscan_wait;
49745 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49746 +atomic_unchecked_t fscache_n_store_vmscan_gone;
49747 +atomic_unchecked_t fscache_n_store_vmscan_busy;
49748 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49749 +atomic_unchecked_t fscache_n_store_vmscan_wait;
49750
49751 -atomic_t fscache_n_marks;
49752 -atomic_t fscache_n_uncaches;
49753 +atomic_unchecked_t fscache_n_marks;
49754 +atomic_unchecked_t fscache_n_uncaches;
49755
49756 -atomic_t fscache_n_acquires;
49757 -atomic_t fscache_n_acquires_null;
49758 -atomic_t fscache_n_acquires_no_cache;
49759 -atomic_t fscache_n_acquires_ok;
49760 -atomic_t fscache_n_acquires_nobufs;
49761 -atomic_t fscache_n_acquires_oom;
49762 +atomic_unchecked_t fscache_n_acquires;
49763 +atomic_unchecked_t fscache_n_acquires_null;
49764 +atomic_unchecked_t fscache_n_acquires_no_cache;
49765 +atomic_unchecked_t fscache_n_acquires_ok;
49766 +atomic_unchecked_t fscache_n_acquires_nobufs;
49767 +atomic_unchecked_t fscache_n_acquires_oom;
49768
49769 -atomic_t fscache_n_invalidates;
49770 -atomic_t fscache_n_invalidates_run;
49771 +atomic_unchecked_t fscache_n_invalidates;
49772 +atomic_unchecked_t fscache_n_invalidates_run;
49773
49774 -atomic_t fscache_n_updates;
49775 -atomic_t fscache_n_updates_null;
49776 -atomic_t fscache_n_updates_run;
49777 +atomic_unchecked_t fscache_n_updates;
49778 +atomic_unchecked_t fscache_n_updates_null;
49779 +atomic_unchecked_t fscache_n_updates_run;
49780
49781 -atomic_t fscache_n_relinquishes;
49782 -atomic_t fscache_n_relinquishes_null;
49783 -atomic_t fscache_n_relinquishes_waitcrt;
49784 -atomic_t fscache_n_relinquishes_retire;
49785 +atomic_unchecked_t fscache_n_relinquishes;
49786 +atomic_unchecked_t fscache_n_relinquishes_null;
49787 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49788 +atomic_unchecked_t fscache_n_relinquishes_retire;
49789
49790 -atomic_t fscache_n_cookie_index;
49791 -atomic_t fscache_n_cookie_data;
49792 -atomic_t fscache_n_cookie_special;
49793 +atomic_unchecked_t fscache_n_cookie_index;
49794 +atomic_unchecked_t fscache_n_cookie_data;
49795 +atomic_unchecked_t fscache_n_cookie_special;
49796
49797 -atomic_t fscache_n_object_alloc;
49798 -atomic_t fscache_n_object_no_alloc;
49799 -atomic_t fscache_n_object_lookups;
49800 -atomic_t fscache_n_object_lookups_negative;
49801 -atomic_t fscache_n_object_lookups_positive;
49802 -atomic_t fscache_n_object_lookups_timed_out;
49803 -atomic_t fscache_n_object_created;
49804 -atomic_t fscache_n_object_avail;
49805 -atomic_t fscache_n_object_dead;
49806 +atomic_unchecked_t fscache_n_object_alloc;
49807 +atomic_unchecked_t fscache_n_object_no_alloc;
49808 +atomic_unchecked_t fscache_n_object_lookups;
49809 +atomic_unchecked_t fscache_n_object_lookups_negative;
49810 +atomic_unchecked_t fscache_n_object_lookups_positive;
49811 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
49812 +atomic_unchecked_t fscache_n_object_created;
49813 +atomic_unchecked_t fscache_n_object_avail;
49814 +atomic_unchecked_t fscache_n_object_dead;
49815
49816 -atomic_t fscache_n_checkaux_none;
49817 -atomic_t fscache_n_checkaux_okay;
49818 -atomic_t fscache_n_checkaux_update;
49819 -atomic_t fscache_n_checkaux_obsolete;
49820 +atomic_unchecked_t fscache_n_checkaux_none;
49821 +atomic_unchecked_t fscache_n_checkaux_okay;
49822 +atomic_unchecked_t fscache_n_checkaux_update;
49823 +atomic_unchecked_t fscache_n_checkaux_obsolete;
49824
49825 atomic_t fscache_n_cop_alloc_object;
49826 atomic_t fscache_n_cop_lookup_object;
49827 @@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
49828 seq_puts(m, "FS-Cache statistics\n");
49829
49830 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
49831 - atomic_read(&fscache_n_cookie_index),
49832 - atomic_read(&fscache_n_cookie_data),
49833 - atomic_read(&fscache_n_cookie_special));
49834 + atomic_read_unchecked(&fscache_n_cookie_index),
49835 + atomic_read_unchecked(&fscache_n_cookie_data),
49836 + atomic_read_unchecked(&fscache_n_cookie_special));
49837
49838 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
49839 - atomic_read(&fscache_n_object_alloc),
49840 - atomic_read(&fscache_n_object_no_alloc),
49841 - atomic_read(&fscache_n_object_avail),
49842 - atomic_read(&fscache_n_object_dead));
49843 + atomic_read_unchecked(&fscache_n_object_alloc),
49844 + atomic_read_unchecked(&fscache_n_object_no_alloc),
49845 + atomic_read_unchecked(&fscache_n_object_avail),
49846 + atomic_read_unchecked(&fscache_n_object_dead));
49847 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
49848 - atomic_read(&fscache_n_checkaux_none),
49849 - atomic_read(&fscache_n_checkaux_okay),
49850 - atomic_read(&fscache_n_checkaux_update),
49851 - atomic_read(&fscache_n_checkaux_obsolete));
49852 + atomic_read_unchecked(&fscache_n_checkaux_none),
49853 + atomic_read_unchecked(&fscache_n_checkaux_okay),
49854 + atomic_read_unchecked(&fscache_n_checkaux_update),
49855 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
49856
49857 seq_printf(m, "Pages : mrk=%u unc=%u\n",
49858 - atomic_read(&fscache_n_marks),
49859 - atomic_read(&fscache_n_uncaches));
49860 + atomic_read_unchecked(&fscache_n_marks),
49861 + atomic_read_unchecked(&fscache_n_uncaches));
49862
49863 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
49864 " oom=%u\n",
49865 - atomic_read(&fscache_n_acquires),
49866 - atomic_read(&fscache_n_acquires_null),
49867 - atomic_read(&fscache_n_acquires_no_cache),
49868 - atomic_read(&fscache_n_acquires_ok),
49869 - atomic_read(&fscache_n_acquires_nobufs),
49870 - atomic_read(&fscache_n_acquires_oom));
49871 + atomic_read_unchecked(&fscache_n_acquires),
49872 + atomic_read_unchecked(&fscache_n_acquires_null),
49873 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
49874 + atomic_read_unchecked(&fscache_n_acquires_ok),
49875 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
49876 + atomic_read_unchecked(&fscache_n_acquires_oom));
49877
49878 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
49879 - atomic_read(&fscache_n_object_lookups),
49880 - atomic_read(&fscache_n_object_lookups_negative),
49881 - atomic_read(&fscache_n_object_lookups_positive),
49882 - atomic_read(&fscache_n_object_created),
49883 - atomic_read(&fscache_n_object_lookups_timed_out));
49884 + atomic_read_unchecked(&fscache_n_object_lookups),
49885 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
49886 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
49887 + atomic_read_unchecked(&fscache_n_object_created),
49888 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
49889
49890 seq_printf(m, "Invals : n=%u run=%u\n",
49891 - atomic_read(&fscache_n_invalidates),
49892 - atomic_read(&fscache_n_invalidates_run));
49893 + atomic_read_unchecked(&fscache_n_invalidates),
49894 + atomic_read_unchecked(&fscache_n_invalidates_run));
49895
49896 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
49897 - atomic_read(&fscache_n_updates),
49898 - atomic_read(&fscache_n_updates_null),
49899 - atomic_read(&fscache_n_updates_run));
49900 + atomic_read_unchecked(&fscache_n_updates),
49901 + atomic_read_unchecked(&fscache_n_updates_null),
49902 + atomic_read_unchecked(&fscache_n_updates_run));
49903
49904 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
49905 - atomic_read(&fscache_n_relinquishes),
49906 - atomic_read(&fscache_n_relinquishes_null),
49907 - atomic_read(&fscache_n_relinquishes_waitcrt),
49908 - atomic_read(&fscache_n_relinquishes_retire));
49909 + atomic_read_unchecked(&fscache_n_relinquishes),
49910 + atomic_read_unchecked(&fscache_n_relinquishes_null),
49911 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
49912 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
49913
49914 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
49915 - atomic_read(&fscache_n_attr_changed),
49916 - atomic_read(&fscache_n_attr_changed_ok),
49917 - atomic_read(&fscache_n_attr_changed_nobufs),
49918 - atomic_read(&fscache_n_attr_changed_nomem),
49919 - atomic_read(&fscache_n_attr_changed_calls));
49920 + atomic_read_unchecked(&fscache_n_attr_changed),
49921 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
49922 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
49923 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
49924 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
49925
49926 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
49927 - atomic_read(&fscache_n_allocs),
49928 - atomic_read(&fscache_n_allocs_ok),
49929 - atomic_read(&fscache_n_allocs_wait),
49930 - atomic_read(&fscache_n_allocs_nobufs),
49931 - atomic_read(&fscache_n_allocs_intr));
49932 + atomic_read_unchecked(&fscache_n_allocs),
49933 + atomic_read_unchecked(&fscache_n_allocs_ok),
49934 + atomic_read_unchecked(&fscache_n_allocs_wait),
49935 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
49936 + atomic_read_unchecked(&fscache_n_allocs_intr));
49937 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
49938 - atomic_read(&fscache_n_alloc_ops),
49939 - atomic_read(&fscache_n_alloc_op_waits),
49940 - atomic_read(&fscache_n_allocs_object_dead));
49941 + atomic_read_unchecked(&fscache_n_alloc_ops),
49942 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
49943 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
49944
49945 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
49946 " int=%u oom=%u\n",
49947 - atomic_read(&fscache_n_retrievals),
49948 - atomic_read(&fscache_n_retrievals_ok),
49949 - atomic_read(&fscache_n_retrievals_wait),
49950 - atomic_read(&fscache_n_retrievals_nodata),
49951 - atomic_read(&fscache_n_retrievals_nobufs),
49952 - atomic_read(&fscache_n_retrievals_intr),
49953 - atomic_read(&fscache_n_retrievals_nomem));
49954 + atomic_read_unchecked(&fscache_n_retrievals),
49955 + atomic_read_unchecked(&fscache_n_retrievals_ok),
49956 + atomic_read_unchecked(&fscache_n_retrievals_wait),
49957 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
49958 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
49959 + atomic_read_unchecked(&fscache_n_retrievals_intr),
49960 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
49961 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
49962 - atomic_read(&fscache_n_retrieval_ops),
49963 - atomic_read(&fscache_n_retrieval_op_waits),
49964 - atomic_read(&fscache_n_retrievals_object_dead));
49965 + atomic_read_unchecked(&fscache_n_retrieval_ops),
49966 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
49967 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
49968
49969 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
49970 - atomic_read(&fscache_n_stores),
49971 - atomic_read(&fscache_n_stores_ok),
49972 - atomic_read(&fscache_n_stores_again),
49973 - atomic_read(&fscache_n_stores_nobufs),
49974 - atomic_read(&fscache_n_stores_oom));
49975 + atomic_read_unchecked(&fscache_n_stores),
49976 + atomic_read_unchecked(&fscache_n_stores_ok),
49977 + atomic_read_unchecked(&fscache_n_stores_again),
49978 + atomic_read_unchecked(&fscache_n_stores_nobufs),
49979 + atomic_read_unchecked(&fscache_n_stores_oom));
49980 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
49981 - atomic_read(&fscache_n_store_ops),
49982 - atomic_read(&fscache_n_store_calls),
49983 - atomic_read(&fscache_n_store_pages),
49984 - atomic_read(&fscache_n_store_radix_deletes),
49985 - atomic_read(&fscache_n_store_pages_over_limit));
49986 + atomic_read_unchecked(&fscache_n_store_ops),
49987 + atomic_read_unchecked(&fscache_n_store_calls),
49988 + atomic_read_unchecked(&fscache_n_store_pages),
49989 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
49990 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
49991
49992 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
49993 - atomic_read(&fscache_n_store_vmscan_not_storing),
49994 - atomic_read(&fscache_n_store_vmscan_gone),
49995 - atomic_read(&fscache_n_store_vmscan_busy),
49996 - atomic_read(&fscache_n_store_vmscan_cancelled),
49997 - atomic_read(&fscache_n_store_vmscan_wait));
49998 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
49999 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50000 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50001 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
50002 + atomic_read_unchecked(&fscache_n_store_vmscan_wait));
50003
50004 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50005 - atomic_read(&fscache_n_op_pend),
50006 - atomic_read(&fscache_n_op_run),
50007 - atomic_read(&fscache_n_op_enqueue),
50008 - atomic_read(&fscache_n_op_cancelled),
50009 - atomic_read(&fscache_n_op_rejected));
50010 + atomic_read_unchecked(&fscache_n_op_pend),
50011 + atomic_read_unchecked(&fscache_n_op_run),
50012 + atomic_read_unchecked(&fscache_n_op_enqueue),
50013 + atomic_read_unchecked(&fscache_n_op_cancelled),
50014 + atomic_read_unchecked(&fscache_n_op_rejected));
50015 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50016 - atomic_read(&fscache_n_op_deferred_release),
50017 - atomic_read(&fscache_n_op_release),
50018 - atomic_read(&fscache_n_op_gc));
50019 + atomic_read_unchecked(&fscache_n_op_deferred_release),
50020 + atomic_read_unchecked(&fscache_n_op_release),
50021 + atomic_read_unchecked(&fscache_n_op_gc));
50022
50023 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50024 atomic_read(&fscache_n_cop_alloc_object),
50025 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50026 index e397b67..b0d8709 100644
50027 --- a/fs/fuse/cuse.c
50028 +++ b/fs/fuse/cuse.c
50029 @@ -593,10 +593,12 @@ static int __init cuse_init(void)
50030 INIT_LIST_HEAD(&cuse_conntbl[i]);
50031
50032 /* inherit and extend fuse_dev_operations */
50033 - cuse_channel_fops = fuse_dev_operations;
50034 - cuse_channel_fops.owner = THIS_MODULE;
50035 - cuse_channel_fops.open = cuse_channel_open;
50036 - cuse_channel_fops.release = cuse_channel_release;
50037 + pax_open_kernel();
50038 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50039 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50040 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
50041 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
50042 + pax_close_kernel();
50043
50044 cuse_class = class_create(THIS_MODULE, "cuse");
50045 if (IS_ERR(cuse_class))
50046 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50047 index e83351a..41e3c9c 100644
50048 --- a/fs/fuse/dev.c
50049 +++ b/fs/fuse/dev.c
50050 @@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
50051 ret = 0;
50052 pipe_lock(pipe);
50053
50054 - if (!pipe->readers) {
50055 + if (!atomic_read(&pipe->readers)) {
50056 send_sig(SIGPIPE, current, 0);
50057 if (!ret)
50058 ret = -EPIPE;
50059 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50060 index 315e1f8..91f890c 100644
50061 --- a/fs/fuse/dir.c
50062 +++ b/fs/fuse/dir.c
50063 @@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
50064 return link;
50065 }
50066
50067 -static void free_link(char *link)
50068 +static void free_link(const char *link)
50069 {
50070 if (!IS_ERR(link))
50071 free_page((unsigned long) link);
50072 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
50073 index 2b6f569..fcb4d1f 100644
50074 --- a/fs/gfs2/inode.c
50075 +++ b/fs/gfs2/inode.c
50076 @@ -1499,7 +1499,7 @@ out:
50077
50078 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
50079 {
50080 - char *s = nd_get_link(nd);
50081 + const char *s = nd_get_link(nd);
50082 if (!IS_ERR(s))
50083 kfree(s);
50084 }
50085 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
50086 index 78bde32..767e906 100644
50087 --- a/fs/hugetlbfs/inode.c
50088 +++ b/fs/hugetlbfs/inode.c
50089 @@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
50090 struct mm_struct *mm = current->mm;
50091 struct vm_area_struct *vma;
50092 struct hstate *h = hstate_file(file);
50093 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
50094 struct vm_unmapped_area_info info;
50095
50096 if (len & ~huge_page_mask(h))
50097 @@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
50098 return addr;
50099 }
50100
50101 +#ifdef CONFIG_PAX_RANDMMAP
50102 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
50103 +#endif
50104 +
50105 if (addr) {
50106 addr = ALIGN(addr, huge_page_size(h));
50107 vma = find_vma(mm, addr);
50108 - if (TASK_SIZE - len >= addr &&
50109 - (!vma || addr + len <= vma->vm_start))
50110 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
50111 return addr;
50112 }
50113
50114 info.flags = 0;
50115 info.length = len;
50116 info.low_limit = TASK_UNMAPPED_BASE;
50117 +
50118 +#ifdef CONFIG_PAX_RANDMMAP
50119 + if (mm->pax_flags & MF_PAX_RANDMMAP)
50120 + info.low_limit += mm->delta_mmap;
50121 +#endif
50122 +
50123 info.high_limit = TASK_SIZE;
50124 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
50125 info.align_offset = 0;
50126 @@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
50127 .kill_sb = kill_litter_super,
50128 };
50129
50130 -static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
50131 +struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
50132
50133 static int can_do_hugetlb_shm(void)
50134 {
50135 diff --git a/fs/inode.c b/fs/inode.c
50136 index 14084b7..29af1d9 100644
50137 --- a/fs/inode.c
50138 +++ b/fs/inode.c
50139 @@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
50140
50141 #ifdef CONFIG_SMP
50142 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
50143 - static atomic_t shared_last_ino;
50144 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
50145 + static atomic_unchecked_t shared_last_ino;
50146 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
50147
50148 res = next - LAST_INO_BATCH;
50149 }
50150 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
50151 index 4a6cf28..d3a29d3 100644
50152 --- a/fs/jffs2/erase.c
50153 +++ b/fs/jffs2/erase.c
50154 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
50155 struct jffs2_unknown_node marker = {
50156 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
50157 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50158 - .totlen = cpu_to_je32(c->cleanmarker_size)
50159 + .totlen = cpu_to_je32(c->cleanmarker_size),
50160 + .hdr_crc = cpu_to_je32(0)
50161 };
50162
50163 jffs2_prealloc_raw_node_refs(c, jeb, 1);
50164 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
50165 index a6597d6..41b30ec 100644
50166 --- a/fs/jffs2/wbuf.c
50167 +++ b/fs/jffs2/wbuf.c
50168 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
50169 {
50170 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
50171 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50172 - .totlen = constant_cpu_to_je32(8)
50173 + .totlen = constant_cpu_to_je32(8),
50174 + .hdr_crc = constant_cpu_to_je32(0)
50175 };
50176
50177 /*
50178 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
50179 index 1a543be..d803c40 100644
50180 --- a/fs/jfs/super.c
50181 +++ b/fs/jfs/super.c
50182 @@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
50183
50184 jfs_inode_cachep =
50185 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
50186 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
50187 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
50188 init_once);
50189 if (jfs_inode_cachep == NULL)
50190 return -ENOMEM;
50191 diff --git a/fs/libfs.c b/fs/libfs.c
50192 index 916da8c..1588998 100644
50193 --- a/fs/libfs.c
50194 +++ b/fs/libfs.c
50195 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
50196
50197 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
50198 struct dentry *next;
50199 + char d_name[sizeof(next->d_iname)];
50200 + const unsigned char *name;
50201 +
50202 next = list_entry(p, struct dentry, d_u.d_child);
50203 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
50204 if (!simple_positive(next)) {
50205 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
50206
50207 spin_unlock(&next->d_lock);
50208 spin_unlock(&dentry->d_lock);
50209 - if (filldir(dirent, next->d_name.name,
50210 + name = next->d_name.name;
50211 + if (name == next->d_iname) {
50212 + memcpy(d_name, name, next->d_name.len);
50213 + name = d_name;
50214 + }
50215 + if (filldir(dirent, name,
50216 next->d_name.len, filp->f_pos,
50217 next->d_inode->i_ino,
50218 dt_type(next->d_inode)) < 0)
50219 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
50220 index 52e5120..808936e 100644
50221 --- a/fs/lockd/clntproc.c
50222 +++ b/fs/lockd/clntproc.c
50223 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
50224 /*
50225 * Cookie counter for NLM requests
50226 */
50227 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
50228 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
50229
50230 void nlmclnt_next_cookie(struct nlm_cookie *c)
50231 {
50232 - u32 cookie = atomic_inc_return(&nlm_cookie);
50233 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
50234
50235 memcpy(c->data, &cookie, 4);
50236 c->len=4;
50237 diff --git a/fs/locks.c b/fs/locks.c
50238 index a94e331..060bce3 100644
50239 --- a/fs/locks.c
50240 +++ b/fs/locks.c
50241 @@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
50242 return;
50243
50244 if (filp->f_op && filp->f_op->flock) {
50245 - struct file_lock fl = {
50246 + struct file_lock flock = {
50247 .fl_pid = current->tgid,
50248 .fl_file = filp,
50249 .fl_flags = FL_FLOCK,
50250 .fl_type = F_UNLCK,
50251 .fl_end = OFFSET_MAX,
50252 };
50253 - filp->f_op->flock(filp, F_SETLKW, &fl);
50254 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
50255 - fl.fl_ops->fl_release_private(&fl);
50256 + filp->f_op->flock(filp, F_SETLKW, &flock);
50257 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
50258 + flock.fl_ops->fl_release_private(&flock);
50259 }
50260
50261 lock_flocks();
50262 diff --git a/fs/namei.c b/fs/namei.c
50263 index 43a97ee..4e585fd 100644
50264 --- a/fs/namei.c
50265 +++ b/fs/namei.c
50266 @@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
50267 if (ret != -EACCES)
50268 return ret;
50269
50270 +#ifdef CONFIG_GRKERNSEC
50271 + /* we'll block if we have to log due to a denied capability use */
50272 + if (mask & MAY_NOT_BLOCK)
50273 + return -ECHILD;
50274 +#endif
50275 +
50276 if (S_ISDIR(inode->i_mode)) {
50277 /* DACs are overridable for directories */
50278 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
50279 - return 0;
50280 if (!(mask & MAY_WRITE))
50281 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
50282 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
50283 + inode_capable(inode, CAP_DAC_READ_SEARCH))
50284 return 0;
50285 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
50286 + return 0;
50287 return -EACCES;
50288 }
50289 /*
50290 + * Searching includes executable on directories, else just read.
50291 + */
50292 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
50293 + if (mask == MAY_READ)
50294 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
50295 + inode_capable(inode, CAP_DAC_READ_SEARCH))
50296 + return 0;
50297 +
50298 + /*
50299 * Read/write DACs are always overridable.
50300 * Executable DACs are overridable when there is
50301 * at least one exec bit set.
50302 @@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
50303 if (inode_capable(inode, CAP_DAC_OVERRIDE))
50304 return 0;
50305
50306 - /*
50307 - * Searching includes executable on directories, else just read.
50308 - */
50309 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
50310 - if (mask == MAY_READ)
50311 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
50312 - return 0;
50313 -
50314 return -EACCES;
50315 }
50316
50317 @@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
50318 {
50319 struct dentry *dentry = link->dentry;
50320 int error;
50321 - char *s;
50322 + const char *s;
50323
50324 BUG_ON(nd->flags & LOOKUP_RCU);
50325
50326 @@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
50327 if (error)
50328 goto out_put_nd_path;
50329
50330 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
50331 + dentry->d_inode, dentry, nd->path.mnt)) {
50332 + error = -EACCES;
50333 + goto out_put_nd_path;
50334 + }
50335 +
50336 nd->last_type = LAST_BIND;
50337 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
50338 error = PTR_ERR(*p);
50339 @@ -1596,6 +1610,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
50340 break;
50341 res = walk_component(nd, path, &nd->last,
50342 nd->last_type, LOOKUP_FOLLOW);
50343 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
50344 + res = -EACCES;
50345 put_link(nd, &link, cookie);
50346 } while (res > 0);
50347
50348 @@ -1694,7 +1710,7 @@ EXPORT_SYMBOL(full_name_hash);
50349 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
50350 {
50351 unsigned long a, b, adata, bdata, mask, hash, len;
50352 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
50353 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
50354
50355 hash = a = 0;
50356 len = -sizeof(unsigned long);
50357 @@ -1979,6 +1995,8 @@ static int path_lookupat(int dfd, const char *name,
50358 if (err)
50359 break;
50360 err = lookup_last(nd, &path);
50361 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
50362 + err = -EACCES;
50363 put_link(nd, &link, cookie);
50364 }
50365 }
50366 @@ -1986,6 +2004,19 @@ static int path_lookupat(int dfd, const char *name,
50367 if (!err)
50368 err = complete_walk(nd);
50369
50370 + if (!err && !(nd->flags & LOOKUP_PARENT)) {
50371 +#ifdef CONFIG_GRKERNSEC
50372 + if (flags & LOOKUP_RCU) {
50373 + path_put(&nd->path);
50374 + err = -ECHILD;
50375 + } else
50376 +#endif
50377 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
50378 + path_put(&nd->path);
50379 + err = -ENOENT;
50380 + }
50381 + }
50382 +
50383 if (!err && nd->flags & LOOKUP_DIRECTORY) {
50384 if (!nd->inode->i_op->lookup) {
50385 path_put(&nd->path);
50386 @@ -2013,8 +2044,17 @@ static int filename_lookup(int dfd, struct filename *name,
50387 retval = path_lookupat(dfd, name->name,
50388 flags | LOOKUP_REVAL, nd);
50389
50390 - if (likely(!retval))
50391 + if (likely(!retval)) {
50392 + if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
50393 +#ifdef CONFIG_GRKERNSEC
50394 + if (flags & LOOKUP_RCU)
50395 + return -ECHILD;
50396 +#endif
50397 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
50398 + return -ENOENT;
50399 + }
50400 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
50401 + }
50402 return retval;
50403 }
50404
50405 @@ -2392,6 +2432,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
50406 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
50407 return -EPERM;
50408
50409 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
50410 + return -EPERM;
50411 + if (gr_handle_rawio(inode))
50412 + return -EPERM;
50413 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
50414 + return -EACCES;
50415 +
50416 return 0;
50417 }
50418
50419 @@ -2613,7 +2660,7 @@ looked_up:
50420 * cleared otherwise prior to returning.
50421 */
50422 static int lookup_open(struct nameidata *nd, struct path *path,
50423 - struct file *file,
50424 + struct path *link, struct file *file,
50425 const struct open_flags *op,
50426 bool got_write, int *opened)
50427 {
50428 @@ -2648,6 +2695,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
50429 /* Negative dentry, just create the file */
50430 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
50431 umode_t mode = op->mode;
50432 +
50433 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
50434 + error = -EACCES;
50435 + goto out_dput;
50436 + }
50437 +
50438 + if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
50439 + error = -EACCES;
50440 + goto out_dput;
50441 + }
50442 +
50443 if (!IS_POSIXACL(dir->d_inode))
50444 mode &= ~current_umask();
50445 /*
50446 @@ -2669,6 +2727,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
50447 nd->flags & LOOKUP_EXCL);
50448 if (error)
50449 goto out_dput;
50450 + else
50451 + gr_handle_create(dentry, nd->path.mnt);
50452 }
50453 out_no_open:
50454 path->dentry = dentry;
50455 @@ -2683,7 +2743,7 @@ out_dput:
50456 /*
50457 * Handle the last step of open()
50458 */
50459 -static int do_last(struct nameidata *nd, struct path *path,
50460 +static int do_last(struct nameidata *nd, struct path *path, struct path *link,
50461 struct file *file, const struct open_flags *op,
50462 int *opened, struct filename *name)
50463 {
50464 @@ -2712,16 +2772,44 @@ static int do_last(struct nameidata *nd, struct path *path,
50465 error = complete_walk(nd);
50466 if (error)
50467 return error;
50468 +#ifdef CONFIG_GRKERNSEC
50469 + if (nd->flags & LOOKUP_RCU) {
50470 + error = -ECHILD;
50471 + goto out;
50472 + }
50473 +#endif
50474 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
50475 + error = -ENOENT;
50476 + goto out;
50477 + }
50478 audit_inode(name, nd->path.dentry, 0);
50479 if (open_flag & O_CREAT) {
50480 error = -EISDIR;
50481 goto out;
50482 }
50483 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
50484 + error = -EACCES;
50485 + goto out;
50486 + }
50487 goto finish_open;
50488 case LAST_BIND:
50489 error = complete_walk(nd);
50490 if (error)
50491 return error;
50492 +#ifdef CONFIG_GRKERNSEC
50493 + if (nd->flags & LOOKUP_RCU) {
50494 + error = -ECHILD;
50495 + goto out;
50496 + }
50497 +#endif
50498 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
50499 + error = -ENOENT;
50500 + goto out;
50501 + }
50502 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
50503 + error = -EACCES;
50504 + goto out;
50505 + }
50506 audit_inode(name, dir, 0);
50507 goto finish_open;
50508 }
50509 @@ -2770,7 +2858,7 @@ retry_lookup:
50510 */
50511 }
50512 mutex_lock(&dir->d_inode->i_mutex);
50513 - error = lookup_open(nd, path, file, op, got_write, opened);
50514 + error = lookup_open(nd, path, link, file, op, got_write, opened);
50515 mutex_unlock(&dir->d_inode->i_mutex);
50516
50517 if (error <= 0) {
50518 @@ -2794,11 +2882,28 @@ retry_lookup:
50519 goto finish_open_created;
50520 }
50521
50522 + if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
50523 + error = -ENOENT;
50524 + goto exit_dput;
50525 + }
50526 + if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
50527 + error = -EACCES;
50528 + goto exit_dput;
50529 + }
50530 +
50531 /*
50532 * create/update audit record if it already exists.
50533 */
50534 - if (path->dentry->d_inode)
50535 + if (path->dentry->d_inode) {
50536 + /* only check if O_CREAT is specified, all other checks need to go
50537 + into may_open */
50538 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
50539 + error = -EACCES;
50540 + goto exit_dput;
50541 + }
50542 +
50543 audit_inode(name, path->dentry, 0);
50544 + }
50545
50546 /*
50547 * If atomic_open() acquired write access it is dropped now due to
50548 @@ -2839,6 +2944,11 @@ finish_lookup:
50549 }
50550 }
50551 BUG_ON(inode != path->dentry->d_inode);
50552 + /* if we're resolving a symlink to another symlink */
50553 + if (link && gr_handle_symlink_owner(link, inode)) {
50554 + error = -EACCES;
50555 + goto out;
50556 + }
50557 return 1;
50558 }
50559
50560 @@ -2848,7 +2958,6 @@ finish_lookup:
50561 save_parent.dentry = nd->path.dentry;
50562 save_parent.mnt = mntget(path->mnt);
50563 nd->path.dentry = path->dentry;
50564 -
50565 }
50566 nd->inode = inode;
50567 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
50568 @@ -2857,6 +2966,22 @@ finish_lookup:
50569 path_put(&save_parent);
50570 return error;
50571 }
50572 +
50573 +#ifdef CONFIG_GRKERNSEC
50574 + if (nd->flags & LOOKUP_RCU) {
50575 + error = -ECHILD;
50576 + goto out;
50577 + }
50578 +#endif
50579 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
50580 + error = -ENOENT;
50581 + goto out;
50582 + }
50583 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
50584 + error = -EACCES;
50585 + goto out;
50586 + }
50587 +
50588 error = -EISDIR;
50589 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
50590 goto out;
50591 @@ -2955,7 +3080,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
50592 if (unlikely(error))
50593 goto out;
50594
50595 - error = do_last(nd, &path, file, op, &opened, pathname);
50596 + error = do_last(nd, &path, NULL, file, op, &opened, pathname);
50597 while (unlikely(error > 0)) { /* trailing symlink */
50598 struct path link = path;
50599 void *cookie;
50600 @@ -2973,7 +3098,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
50601 error = follow_link(&link, nd, &cookie);
50602 if (unlikely(error))
50603 break;
50604 - error = do_last(nd, &path, file, op, &opened, pathname);
50605 + error = do_last(nd, &path, &link, file, op, &opened, pathname);
50606 put_link(nd, &link, cookie);
50607 }
50608 out:
50609 @@ -3073,8 +3198,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
50610 goto unlock;
50611
50612 error = -EEXIST;
50613 - if (dentry->d_inode)
50614 + if (dentry->d_inode) {
50615 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
50616 + error = -ENOENT;
50617 + }
50618 goto fail;
50619 + }
50620 /*
50621 * Special case - lookup gave negative, but... we had foo/bar/
50622 * From the vfs_mknod() POV we just have a negative dentry -
50623 @@ -3126,6 +3255,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
50624 }
50625 EXPORT_SYMBOL(user_path_create);
50626
50627 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
50628 +{
50629 + struct filename *tmp = getname(pathname);
50630 + struct dentry *res;
50631 + if (IS_ERR(tmp))
50632 + return ERR_CAST(tmp);
50633 + res = kern_path_create(dfd, tmp->name, path, lookup_flags);
50634 + if (IS_ERR(res))
50635 + putname(tmp);
50636 + else
50637 + *to = tmp;
50638 + return res;
50639 +}
50640 +
50641 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
50642 {
50643 int error = may_create(dir, dentry);
50644 @@ -3188,6 +3331,17 @@ retry:
50645
50646 if (!IS_POSIXACL(path.dentry->d_inode))
50647 mode &= ~current_umask();
50648 +
50649 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
50650 + error = -EPERM;
50651 + goto out;
50652 + }
50653 +
50654 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
50655 + error = -EACCES;
50656 + goto out;
50657 + }
50658 +
50659 error = security_path_mknod(&path, dentry, mode, dev);
50660 if (error)
50661 goto out;
50662 @@ -3204,6 +3358,8 @@ retry:
50663 break;
50664 }
50665 out:
50666 + if (!error)
50667 + gr_handle_create(dentry, path.mnt);
50668 done_path_create(&path, dentry);
50669 if (retry_estale(error, lookup_flags)) {
50670 lookup_flags |= LOOKUP_REVAL;
50671 @@ -3256,9 +3412,16 @@ retry:
50672
50673 if (!IS_POSIXACL(path.dentry->d_inode))
50674 mode &= ~current_umask();
50675 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
50676 + error = -EACCES;
50677 + goto out;
50678 + }
50679 error = security_path_mkdir(&path, dentry, mode);
50680 if (!error)
50681 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
50682 + if (!error)
50683 + gr_handle_create(dentry, path.mnt);
50684 +out:
50685 done_path_create(&path, dentry);
50686 if (retry_estale(error, lookup_flags)) {
50687 lookup_flags |= LOOKUP_REVAL;
50688 @@ -3339,6 +3502,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
50689 struct filename *name;
50690 struct dentry *dentry;
50691 struct nameidata nd;
50692 + ino_t saved_ino = 0;
50693 + dev_t saved_dev = 0;
50694 unsigned int lookup_flags = 0;
50695 retry:
50696 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
50697 @@ -3371,10 +3536,21 @@ retry:
50698 error = -ENOENT;
50699 goto exit3;
50700 }
50701 +
50702 + saved_ino = dentry->d_inode->i_ino;
50703 + saved_dev = gr_get_dev_from_dentry(dentry);
50704 +
50705 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
50706 + error = -EACCES;
50707 + goto exit3;
50708 + }
50709 +
50710 error = security_path_rmdir(&nd.path, dentry);
50711 if (error)
50712 goto exit3;
50713 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
50714 + if (!error && (saved_dev || saved_ino))
50715 + gr_handle_delete(saved_ino, saved_dev);
50716 exit3:
50717 dput(dentry);
50718 exit2:
50719 @@ -3440,6 +3616,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
50720 struct dentry *dentry;
50721 struct nameidata nd;
50722 struct inode *inode = NULL;
50723 + ino_t saved_ino = 0;
50724 + dev_t saved_dev = 0;
50725 unsigned int lookup_flags = 0;
50726 retry:
50727 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
50728 @@ -3466,10 +3644,22 @@ retry:
50729 if (!inode)
50730 goto slashes;
50731 ihold(inode);
50732 +
50733 + if (inode->i_nlink <= 1) {
50734 + saved_ino = inode->i_ino;
50735 + saved_dev = gr_get_dev_from_dentry(dentry);
50736 + }
50737 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
50738 + error = -EACCES;
50739 + goto exit2;
50740 + }
50741 +
50742 error = security_path_unlink(&nd.path, dentry);
50743 if (error)
50744 goto exit2;
50745 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
50746 + if (!error && (saved_ino || saved_dev))
50747 + gr_handle_delete(saved_ino, saved_dev);
50748 exit2:
50749 dput(dentry);
50750 }
50751 @@ -3547,9 +3737,17 @@ retry:
50752 if (IS_ERR(dentry))
50753 goto out_putname;
50754
50755 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
50756 + error = -EACCES;
50757 + goto out;
50758 + }
50759 +
50760 error = security_path_symlink(&path, dentry, from->name);
50761 if (!error)
50762 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
50763 + if (!error)
50764 + gr_handle_create(dentry, path.mnt);
50765 +out:
50766 done_path_create(&path, dentry);
50767 if (retry_estale(error, lookup_flags)) {
50768 lookup_flags |= LOOKUP_REVAL;
50769 @@ -3623,6 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
50770 {
50771 struct dentry *new_dentry;
50772 struct path old_path, new_path;
50773 + struct filename *to = NULL;
50774 int how = 0;
50775 int error;
50776
50777 @@ -3646,7 +3845,7 @@ retry:
50778 if (error)
50779 return error;
50780
50781 - new_dentry = user_path_create(newdfd, newname, &new_path,
50782 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
50783 (how & LOOKUP_REVAL));
50784 error = PTR_ERR(new_dentry);
50785 if (IS_ERR(new_dentry))
50786 @@ -3658,11 +3857,28 @@ retry:
50787 error = may_linkat(&old_path);
50788 if (unlikely(error))
50789 goto out_dput;
50790 +
50791 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
50792 + old_path.dentry->d_inode,
50793 + old_path.dentry->d_inode->i_mode, to)) {
50794 + error = -EACCES;
50795 + goto out_dput;
50796 + }
50797 +
50798 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
50799 + old_path.dentry, old_path.mnt, to)) {
50800 + error = -EACCES;
50801 + goto out_dput;
50802 + }
50803 +
50804 error = security_path_link(old_path.dentry, &new_path, new_dentry);
50805 if (error)
50806 goto out_dput;
50807 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
50808 + if (!error)
50809 + gr_handle_create(new_dentry, new_path.mnt);
50810 out_dput:
50811 + putname(to);
50812 done_path_create(&new_path, new_dentry);
50813 if (retry_estale(error, how)) {
50814 how |= LOOKUP_REVAL;
50815 @@ -3908,12 +4124,21 @@ retry:
50816 if (new_dentry == trap)
50817 goto exit5;
50818
50819 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
50820 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
50821 + to);
50822 + if (error)
50823 + goto exit5;
50824 +
50825 error = security_path_rename(&oldnd.path, old_dentry,
50826 &newnd.path, new_dentry);
50827 if (error)
50828 goto exit5;
50829 error = vfs_rename(old_dir->d_inode, old_dentry,
50830 new_dir->d_inode, new_dentry);
50831 + if (!error)
50832 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
50833 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
50834 exit5:
50835 dput(new_dentry);
50836 exit4:
50837 @@ -3945,6 +4170,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
50838
50839 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
50840 {
50841 + char tmpbuf[64];
50842 + const char *newlink;
50843 int len;
50844
50845 len = PTR_ERR(link);
50846 @@ -3954,7 +4181,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
50847 len = strlen(link);
50848 if (len > (unsigned) buflen)
50849 len = buflen;
50850 - if (copy_to_user(buffer, link, len))
50851 +
50852 + if (len < sizeof(tmpbuf)) {
50853 + memcpy(tmpbuf, link, len);
50854 + newlink = tmpbuf;
50855 + } else
50856 + newlink = link;
50857 +
50858 + if (copy_to_user(buffer, newlink, len))
50859 len = -EFAULT;
50860 out:
50861 return len;
50862 diff --git a/fs/namespace.c b/fs/namespace.c
50863 index a51054f..f9b53e5 100644
50864 --- a/fs/namespace.c
50865 +++ b/fs/namespace.c
50866 @@ -1215,6 +1215,9 @@ static int do_umount(struct mount *mnt, int flags)
50867 if (!(sb->s_flags & MS_RDONLY))
50868 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
50869 up_write(&sb->s_umount);
50870 +
50871 + gr_log_remount(mnt->mnt_devname, retval);
50872 +
50873 return retval;
50874 }
50875
50876 @@ -1234,6 +1237,9 @@ static int do_umount(struct mount *mnt, int flags)
50877 br_write_unlock(&vfsmount_lock);
50878 up_write(&namespace_sem);
50879 release_mounts(&umount_list);
50880 +
50881 + gr_log_unmount(mnt->mnt_devname, retval);
50882 +
50883 return retval;
50884 }
50885
50886 @@ -2287,6 +2293,16 @@ long do_mount(const char *dev_name, const char *dir_name,
50887 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
50888 MS_STRICTATIME);
50889
50890 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
50891 + retval = -EPERM;
50892 + goto dput_out;
50893 + }
50894 +
50895 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
50896 + retval = -EPERM;
50897 + goto dput_out;
50898 + }
50899 +
50900 if (flags & MS_REMOUNT)
50901 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
50902 data_page);
50903 @@ -2301,6 +2317,9 @@ long do_mount(const char *dev_name, const char *dir_name,
50904 dev_name, data_page);
50905 dput_out:
50906 path_put(&path);
50907 +
50908 + gr_log_mount(dev_name, dir_name, retval);
50909 +
50910 return retval;
50911 }
50912
50913 @@ -2587,6 +2606,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
50914 if (error)
50915 goto out2;
50916
50917 + if (gr_handle_chroot_pivot()) {
50918 + error = -EPERM;
50919 + goto out2;
50920 + }
50921 +
50922 get_fs_root(current->fs, &root);
50923 error = lock_mount(&old);
50924 if (error)
50925 @@ -2790,7 +2814,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
50926 !nsown_capable(CAP_SYS_ADMIN))
50927 return -EPERM;
50928
50929 - if (fs->users != 1)
50930 + if (atomic_read(&fs->users) != 1)
50931 return -EINVAL;
50932
50933 get_mnt_ns(mnt_ns);
50934 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
50935 index ebeb94c..ff35337 100644
50936 --- a/fs/nfs/inode.c
50937 +++ b/fs/nfs/inode.c
50938 @@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
50939 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
50940 }
50941
50942 -static atomic_long_t nfs_attr_generation_counter;
50943 +static atomic_long_unchecked_t nfs_attr_generation_counter;
50944
50945 static unsigned long nfs_read_attr_generation_counter(void)
50946 {
50947 - return atomic_long_read(&nfs_attr_generation_counter);
50948 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
50949 }
50950
50951 unsigned long nfs_inc_attr_generation_counter(void)
50952 {
50953 - return atomic_long_inc_return(&nfs_attr_generation_counter);
50954 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
50955 }
50956
50957 void nfs_fattr_init(struct nfs_fattr *fattr)
50958 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
50959 index d586117..143d568 100644
50960 --- a/fs/nfsd/vfs.c
50961 +++ b/fs/nfsd/vfs.c
50962 @@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
50963 } else {
50964 oldfs = get_fs();
50965 set_fs(KERNEL_DS);
50966 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
50967 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
50968 set_fs(oldfs);
50969 }
50970
50971 @@ -1025,7 +1025,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
50972
50973 /* Write the data. */
50974 oldfs = get_fs(); set_fs(KERNEL_DS);
50975 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
50976 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
50977 set_fs(oldfs);
50978 if (host_err < 0)
50979 goto out_nfserr;
50980 @@ -1571,7 +1571,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
50981 */
50982
50983 oldfs = get_fs(); set_fs(KERNEL_DS);
50984 - host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
50985 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
50986 set_fs(oldfs);
50987
50988 if (host_err < 0)
50989 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
50990 index 9ff4a5e..deb1f0f 100644
50991 --- a/fs/notify/fanotify/fanotify_user.c
50992 +++ b/fs/notify/fanotify/fanotify_user.c
50993 @@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
50994
50995 fd = fanotify_event_metadata.fd;
50996 ret = -EFAULT;
50997 - if (copy_to_user(buf, &fanotify_event_metadata,
50998 - fanotify_event_metadata.event_len))
50999 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
51000 + copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
51001 goto out_close_fd;
51002
51003 ret = prepare_for_access_response(group, event, fd);
51004 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
51005 index 7b51b05..5ea5ef6 100644
51006 --- a/fs/notify/notification.c
51007 +++ b/fs/notify/notification.c
51008 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
51009 * get set to 0 so it will never get 'freed'
51010 */
51011 static struct fsnotify_event *q_overflow_event;
51012 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51013 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51014
51015 /**
51016 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
51017 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51018 */
51019 u32 fsnotify_get_cookie(void)
51020 {
51021 - return atomic_inc_return(&fsnotify_sync_cookie);
51022 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
51023 }
51024 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
51025
51026 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
51027 index 99e3610..02c1068 100644
51028 --- a/fs/ntfs/dir.c
51029 +++ b/fs/ntfs/dir.c
51030 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
51031 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
51032 ~(s64)(ndir->itype.index.block_size - 1)));
51033 /* Bounds checks. */
51034 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51035 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51036 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
51037 "inode 0x%lx or driver bug.", vdir->i_ino);
51038 goto err_out;
51039 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
51040 index 5b2d4f0..c6de396 100644
51041 --- a/fs/ntfs/file.c
51042 +++ b/fs/ntfs/file.c
51043 @@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
51044 #endif /* NTFS_RW */
51045 };
51046
51047 -const struct file_operations ntfs_empty_file_ops = {};
51048 +const struct file_operations ntfs_empty_file_ops __read_only;
51049
51050 -const struct inode_operations ntfs_empty_inode_ops = {};
51051 +const struct inode_operations ntfs_empty_inode_ops __read_only;
51052 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
51053 index a9f78c7..ed8a381 100644
51054 --- a/fs/ocfs2/localalloc.c
51055 +++ b/fs/ocfs2/localalloc.c
51056 @@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
51057 goto bail;
51058 }
51059
51060 - atomic_inc(&osb->alloc_stats.moves);
51061 + atomic_inc_unchecked(&osb->alloc_stats.moves);
51062
51063 bail:
51064 if (handle)
51065 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
51066 index d355e6e..578d905 100644
51067 --- a/fs/ocfs2/ocfs2.h
51068 +++ b/fs/ocfs2/ocfs2.h
51069 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
51070
51071 struct ocfs2_alloc_stats
51072 {
51073 - atomic_t moves;
51074 - atomic_t local_data;
51075 - atomic_t bitmap_data;
51076 - atomic_t bg_allocs;
51077 - atomic_t bg_extends;
51078 + atomic_unchecked_t moves;
51079 + atomic_unchecked_t local_data;
51080 + atomic_unchecked_t bitmap_data;
51081 + atomic_unchecked_t bg_allocs;
51082 + atomic_unchecked_t bg_extends;
51083 };
51084
51085 enum ocfs2_local_alloc_state
51086 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
51087 index b7e74b5..19c6536 100644
51088 --- a/fs/ocfs2/suballoc.c
51089 +++ b/fs/ocfs2/suballoc.c
51090 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
51091 mlog_errno(status);
51092 goto bail;
51093 }
51094 - atomic_inc(&osb->alloc_stats.bg_extends);
51095 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
51096
51097 /* You should never ask for this much metadata */
51098 BUG_ON(bits_wanted >
51099 @@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
51100 mlog_errno(status);
51101 goto bail;
51102 }
51103 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
51104 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
51105
51106 *suballoc_loc = res.sr_bg_blkno;
51107 *suballoc_bit_start = res.sr_bit_offset;
51108 @@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
51109 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
51110 res->sr_bits);
51111
51112 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
51113 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
51114
51115 BUG_ON(res->sr_bits != 1);
51116
51117 @@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
51118 mlog_errno(status);
51119 goto bail;
51120 }
51121 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
51122 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
51123
51124 BUG_ON(res.sr_bits != 1);
51125
51126 @@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
51127 cluster_start,
51128 num_clusters);
51129 if (!status)
51130 - atomic_inc(&osb->alloc_stats.local_data);
51131 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
51132 } else {
51133 if (min_clusters > (osb->bitmap_cpg - 1)) {
51134 /* The only paths asking for contiguousness
51135 @@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
51136 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
51137 res.sr_bg_blkno,
51138 res.sr_bit_offset);
51139 - atomic_inc(&osb->alloc_stats.bitmap_data);
51140 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
51141 *num_clusters = res.sr_bits;
51142 }
51143 }
51144 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
51145 index 0e91ec2..f4b3fc6 100644
51146 --- a/fs/ocfs2/super.c
51147 +++ b/fs/ocfs2/super.c
51148 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
51149 "%10s => GlobalAllocs: %d LocalAllocs: %d "
51150 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
51151 "Stats",
51152 - atomic_read(&osb->alloc_stats.bitmap_data),
51153 - atomic_read(&osb->alloc_stats.local_data),
51154 - atomic_read(&osb->alloc_stats.bg_allocs),
51155 - atomic_read(&osb->alloc_stats.moves),
51156 - atomic_read(&osb->alloc_stats.bg_extends));
51157 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
51158 + atomic_read_unchecked(&osb->alloc_stats.local_data),
51159 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
51160 + atomic_read_unchecked(&osb->alloc_stats.moves),
51161 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
51162
51163 out += snprintf(buf + out, len - out,
51164 "%10s => State: %u Descriptor: %llu Size: %u bits "
51165 @@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
51166 spin_lock_init(&osb->osb_xattr_lock);
51167 ocfs2_init_steal_slots(osb);
51168
51169 - atomic_set(&osb->alloc_stats.moves, 0);
51170 - atomic_set(&osb->alloc_stats.local_data, 0);
51171 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
51172 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
51173 - atomic_set(&osb->alloc_stats.bg_extends, 0);
51174 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
51175 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
51176 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
51177 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
51178 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
51179
51180 /* Copy the blockcheck stats from the superblock probe */
51181 osb->osb_ecc_stats = *stats;
51182 diff --git a/fs/open.c b/fs/open.c
51183 index 9b33c0c..2ffcca2 100644
51184 --- a/fs/open.c
51185 +++ b/fs/open.c
51186 @@ -31,6 +31,8 @@
51187 #include <linux/ima.h>
51188 #include <linux/dnotify.h>
51189
51190 +#define CREATE_TRACE_POINTS
51191 +#include <trace/events/fs.h>
51192 #include "internal.h"
51193
51194 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
51195 @@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
51196 error = locks_verify_truncate(inode, NULL, length);
51197 if (!error)
51198 error = security_path_truncate(path);
51199 + if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
51200 + error = -EACCES;
51201 if (!error)
51202 error = do_truncate(path->dentry, length, 0, NULL);
51203
51204 @@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
51205 error = locks_verify_truncate(inode, f.file, length);
51206 if (!error)
51207 error = security_path_truncate(&f.file->f_path);
51208 + if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
51209 + error = -EACCES;
51210 if (!error)
51211 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
51212 sb_end_write(inode->i_sb);
51213 @@ -373,6 +379,9 @@ retry:
51214 if (__mnt_is_readonly(path.mnt))
51215 res = -EROFS;
51216
51217 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
51218 + res = -EACCES;
51219 +
51220 out_path_release:
51221 path_put(&path);
51222 if (retry_estale(res, lookup_flags)) {
51223 @@ -404,6 +413,8 @@ retry:
51224 if (error)
51225 goto dput_and_out;
51226
51227 + gr_log_chdir(path.dentry, path.mnt);
51228 +
51229 set_fs_pwd(current->fs, &path);
51230
51231 dput_and_out:
51232 @@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
51233 goto out_putf;
51234
51235 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
51236 +
51237 + if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
51238 + error = -EPERM;
51239 +
51240 + if (!error)
51241 + gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
51242 +
51243 if (!error)
51244 set_fs_pwd(current->fs, &f.file->f_path);
51245 out_putf:
51246 @@ -462,7 +480,13 @@ retry:
51247 if (error)
51248 goto dput_and_out;
51249
51250 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
51251 + goto dput_and_out;
51252 +
51253 set_fs_root(current->fs, &path);
51254 +
51255 + gr_handle_chroot_chdir(&path);
51256 +
51257 error = 0;
51258 dput_and_out:
51259 path_put(&path);
51260 @@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
51261 if (error)
51262 return error;
51263 mutex_lock(&inode->i_mutex);
51264 +
51265 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
51266 + error = -EACCES;
51267 + goto out_unlock;
51268 + }
51269 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
51270 + error = -EACCES;
51271 + goto out_unlock;
51272 + }
51273 +
51274 error = security_path_chmod(path, mode);
51275 if (error)
51276 goto out_unlock;
51277 @@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
51278 uid = make_kuid(current_user_ns(), user);
51279 gid = make_kgid(current_user_ns(), group);
51280
51281 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
51282 + return -EACCES;
51283 +
51284 newattrs.ia_valid = ATTR_CTIME;
51285 if (user != (uid_t) -1) {
51286 if (!uid_valid(uid))
51287 @@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
51288 } else {
51289 fsnotify_open(f);
51290 fd_install(fd, f);
51291 + trace_do_sys_open(tmp->name, flags, mode);
51292 }
51293 }
51294 putname(tmp);
51295 diff --git a/fs/pipe.c b/fs/pipe.c
51296 index bd3479d..fb92c4d 100644
51297 --- a/fs/pipe.c
51298 +++ b/fs/pipe.c
51299 @@ -438,9 +438,9 @@ redo:
51300 }
51301 if (bufs) /* More to do? */
51302 continue;
51303 - if (!pipe->writers)
51304 + if (!atomic_read(&pipe->writers))
51305 break;
51306 - if (!pipe->waiting_writers) {
51307 + if (!atomic_read(&pipe->waiting_writers)) {
51308 /* syscall merging: Usually we must not sleep
51309 * if O_NONBLOCK is set, or if we got some data.
51310 * But if a writer sleeps in kernel space, then
51311 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
51312 mutex_lock(&inode->i_mutex);
51313 pipe = inode->i_pipe;
51314
51315 - if (!pipe->readers) {
51316 + if (!atomic_read(&pipe->readers)) {
51317 send_sig(SIGPIPE, current, 0);
51318 ret = -EPIPE;
51319 goto out;
51320 @@ -553,7 +553,7 @@ redo1:
51321 for (;;) {
51322 int bufs;
51323
51324 - if (!pipe->readers) {
51325 + if (!atomic_read(&pipe->readers)) {
51326 send_sig(SIGPIPE, current, 0);
51327 if (!ret)
51328 ret = -EPIPE;
51329 @@ -644,9 +644,9 @@ redo2:
51330 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
51331 do_wakeup = 0;
51332 }
51333 - pipe->waiting_writers++;
51334 + atomic_inc(&pipe->waiting_writers);
51335 pipe_wait(pipe);
51336 - pipe->waiting_writers--;
51337 + atomic_dec(&pipe->waiting_writers);
51338 }
51339 out:
51340 mutex_unlock(&inode->i_mutex);
51341 @@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
51342 mask = 0;
51343 if (filp->f_mode & FMODE_READ) {
51344 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
51345 - if (!pipe->writers && filp->f_version != pipe->w_counter)
51346 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
51347 mask |= POLLHUP;
51348 }
51349
51350 @@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
51351 * Most Unices do not set POLLERR for FIFOs but on Linux they
51352 * behave exactly like pipes for poll().
51353 */
51354 - if (!pipe->readers)
51355 + if (!atomic_read(&pipe->readers))
51356 mask |= POLLERR;
51357 }
51358
51359 @@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
51360
51361 mutex_lock(&inode->i_mutex);
51362 pipe = inode->i_pipe;
51363 - pipe->readers -= decr;
51364 - pipe->writers -= decw;
51365 + atomic_sub(decr, &pipe->readers);
51366 + atomic_sub(decw, &pipe->writers);
51367
51368 - if (!pipe->readers && !pipe->writers) {
51369 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
51370 free_pipe_info(inode);
51371 } else {
51372 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
51373 @@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
51374
51375 if (inode->i_pipe) {
51376 ret = 0;
51377 - inode->i_pipe->readers++;
51378 + atomic_inc(&inode->i_pipe->readers);
51379 }
51380
51381 mutex_unlock(&inode->i_mutex);
51382 @@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
51383
51384 if (inode->i_pipe) {
51385 ret = 0;
51386 - inode->i_pipe->writers++;
51387 + atomic_inc(&inode->i_pipe->writers);
51388 }
51389
51390 mutex_unlock(&inode->i_mutex);
51391 @@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
51392 if (inode->i_pipe) {
51393 ret = 0;
51394 if (filp->f_mode & FMODE_READ)
51395 - inode->i_pipe->readers++;
51396 + atomic_inc(&inode->i_pipe->readers);
51397 if (filp->f_mode & FMODE_WRITE)
51398 - inode->i_pipe->writers++;
51399 + atomic_inc(&inode->i_pipe->writers);
51400 }
51401
51402 mutex_unlock(&inode->i_mutex);
51403 @@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
51404 inode->i_pipe = NULL;
51405 }
51406
51407 -static struct vfsmount *pipe_mnt __read_mostly;
51408 +struct vfsmount *pipe_mnt __read_mostly;
51409
51410 /*
51411 * pipefs_dname() is called from d_path().
51412 @@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
51413 goto fail_iput;
51414 inode->i_pipe = pipe;
51415
51416 - pipe->readers = pipe->writers = 1;
51417 + atomic_set(&pipe->readers, 1);
51418 + atomic_set(&pipe->writers, 1);
51419 inode->i_fop = &rdwr_pipefifo_fops;
51420
51421 /*
51422 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
51423 index 15af622..0e9f4467 100644
51424 --- a/fs/proc/Kconfig
51425 +++ b/fs/proc/Kconfig
51426 @@ -30,12 +30,12 @@ config PROC_FS
51427
51428 config PROC_KCORE
51429 bool "/proc/kcore support" if !ARM
51430 - depends on PROC_FS && MMU
51431 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
51432
51433 config PROC_VMCORE
51434 bool "/proc/vmcore support"
51435 - depends on PROC_FS && CRASH_DUMP
51436 - default y
51437 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
51438 + default n
51439 help
51440 Exports the dump image of crashed kernel in ELF format.
51441
51442 @@ -59,8 +59,8 @@ config PROC_SYSCTL
51443 limited in memory.
51444
51445 config PROC_PAGE_MONITOR
51446 - default y
51447 - depends on PROC_FS && MMU
51448 + default n
51449 + depends on PROC_FS && MMU && !GRKERNSEC
51450 bool "Enable /proc page monitoring" if EXPERT
51451 help
51452 Various /proc files exist to monitor process memory utilization:
51453 diff --git a/fs/proc/array.c b/fs/proc/array.c
51454 index 6a91e6f..e54dbc14 100644
51455 --- a/fs/proc/array.c
51456 +++ b/fs/proc/array.c
51457 @@ -60,6 +60,7 @@
51458 #include <linux/tty.h>
51459 #include <linux/string.h>
51460 #include <linux/mman.h>
51461 +#include <linux/grsecurity.h>
51462 #include <linux/proc_fs.h>
51463 #include <linux/ioport.h>
51464 #include <linux/uaccess.h>
51465 @@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
51466 seq_putc(m, '\n');
51467 }
51468
51469 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51470 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
51471 +{
51472 + if (p->mm)
51473 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
51474 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
51475 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
51476 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
51477 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
51478 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
51479 + else
51480 + seq_printf(m, "PaX:\t-----\n");
51481 +}
51482 +#endif
51483 +
51484 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
51485 struct pid *pid, struct task_struct *task)
51486 {
51487 @@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
51488 task_cpus_allowed(m, task);
51489 cpuset_task_status_allowed(m, task);
51490 task_context_switch_counts(m, task);
51491 +
51492 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51493 + task_pax(m, task);
51494 +#endif
51495 +
51496 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
51497 + task_grsec_rbac(m, task);
51498 +#endif
51499 +
51500 return 0;
51501 }
51502
51503 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51504 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
51505 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
51506 + _mm->pax_flags & MF_PAX_SEGMEXEC))
51507 +#endif
51508 +
51509 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
51510 struct pid *pid, struct task_struct *task, int whole)
51511 {
51512 @@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
51513 char tcomm[sizeof(task->comm)];
51514 unsigned long flags;
51515
51516 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51517 + if (current->exec_id != m->exec_id) {
51518 + gr_log_badprocpid("stat");
51519 + return 0;
51520 + }
51521 +#endif
51522 +
51523 state = *get_task_state(task);
51524 vsize = eip = esp = 0;
51525 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
51526 @@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
51527 gtime = task->gtime;
51528 }
51529
51530 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51531 + if (PAX_RAND_FLAGS(mm)) {
51532 + eip = 0;
51533 + esp = 0;
51534 + wchan = 0;
51535 + }
51536 +#endif
51537 +#ifdef CONFIG_GRKERNSEC_HIDESYM
51538 + wchan = 0;
51539 + eip =0;
51540 + esp =0;
51541 +#endif
51542 +
51543 /* scale priority and nice values from timeslices to -20..20 */
51544 /* to make it look like a "normal" Unix priority/nice value */
51545 priority = task_prio(task);
51546 @@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
51547 seq_put_decimal_ull(m, ' ', vsize);
51548 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
51549 seq_put_decimal_ull(m, ' ', rsslim);
51550 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51551 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
51552 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
51553 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
51554 +#else
51555 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
51556 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
51557 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
51558 +#endif
51559 seq_put_decimal_ull(m, ' ', esp);
51560 seq_put_decimal_ull(m, ' ', eip);
51561 /* The signal information here is obsolete.
51562 @@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
51563 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
51564 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
51565
51566 - if (mm && permitted) {
51567 + if (mm && permitted
51568 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51569 + && !PAX_RAND_FLAGS(mm)
51570 +#endif
51571 + ) {
51572 seq_put_decimal_ull(m, ' ', mm->start_data);
51573 seq_put_decimal_ull(m, ' ', mm->end_data);
51574 seq_put_decimal_ull(m, ' ', mm->start_brk);
51575 @@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
51576 struct pid *pid, struct task_struct *task)
51577 {
51578 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
51579 - struct mm_struct *mm = get_task_mm(task);
51580 + struct mm_struct *mm;
51581
51582 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51583 + if (current->exec_id != m->exec_id) {
51584 + gr_log_badprocpid("statm");
51585 + return 0;
51586 + }
51587 +#endif
51588 + mm = get_task_mm(task);
51589 if (mm) {
51590 size = task_statm(mm, &shared, &text, &data, &resident);
51591 mmput(mm);
51592 @@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
51593 return 0;
51594 }
51595
51596 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
51597 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
51598 +{
51599 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
51600 +}
51601 +#endif
51602 +
51603 #ifdef CONFIG_CHECKPOINT_RESTORE
51604 static struct pid *
51605 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
51606 diff --git a/fs/proc/base.c b/fs/proc/base.c
51607 index 9b43ff77..ba3e990 100644
51608 --- a/fs/proc/base.c
51609 +++ b/fs/proc/base.c
51610 @@ -111,6 +111,14 @@ struct pid_entry {
51611 union proc_op op;
51612 };
51613
51614 +struct getdents_callback {
51615 + struct linux_dirent __user * current_dir;
51616 + struct linux_dirent __user * previous;
51617 + struct file * file;
51618 + int count;
51619 + int error;
51620 +};
51621 +
51622 #define NOD(NAME, MODE, IOP, FOP, OP) { \
51623 .name = (NAME), \
51624 .len = sizeof(NAME) - 1, \
51625 @@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
51626 if (!mm->arg_end)
51627 goto out_mm; /* Shh! No looking before we're done */
51628
51629 + if (gr_acl_handle_procpidmem(task))
51630 + goto out_mm;
51631 +
51632 len = mm->arg_end - mm->arg_start;
51633
51634 if (len > PAGE_SIZE)
51635 @@ -235,12 +246,28 @@ out:
51636 return res;
51637 }
51638
51639 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51640 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
51641 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
51642 + _mm->pax_flags & MF_PAX_SEGMEXEC))
51643 +#endif
51644 +
51645 static int proc_pid_auxv(struct task_struct *task, char *buffer)
51646 {
51647 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
51648 int res = PTR_ERR(mm);
51649 if (mm && !IS_ERR(mm)) {
51650 unsigned int nwords = 0;
51651 +
51652 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51653 + /* allow if we're currently ptracing this task */
51654 + if (PAX_RAND_FLAGS(mm) &&
51655 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
51656 + mmput(mm);
51657 + return 0;
51658 + }
51659 +#endif
51660 +
51661 do {
51662 nwords += 2;
51663 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
51664 @@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
51665 }
51666
51667
51668 -#ifdef CONFIG_KALLSYMS
51669 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
51670 /*
51671 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
51672 * Returns the resolved symbol. If that fails, simply return the address.
51673 @@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
51674 mutex_unlock(&task->signal->cred_guard_mutex);
51675 }
51676
51677 -#ifdef CONFIG_STACKTRACE
51678 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
51679
51680 #define MAX_STACK_TRACE_DEPTH 64
51681
51682 @@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
51683 return count;
51684 }
51685
51686 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
51687 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
51688 static int proc_pid_syscall(struct task_struct *task, char *buffer)
51689 {
51690 long nr;
51691 @@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
51692 /************************************************************************/
51693
51694 /* permission checks */
51695 -static int proc_fd_access_allowed(struct inode *inode)
51696 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
51697 {
51698 struct task_struct *task;
51699 int allowed = 0;
51700 @@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
51701 */
51702 task = get_proc_task(inode);
51703 if (task) {
51704 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
51705 + if (log)
51706 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
51707 + else
51708 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
51709 put_task_struct(task);
51710 }
51711 return allowed;
51712 @@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
51713 struct task_struct *task,
51714 int hide_pid_min)
51715 {
51716 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
51717 + return false;
51718 +
51719 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51720 + rcu_read_lock();
51721 + {
51722 + const struct cred *tmpcred = current_cred();
51723 + const struct cred *cred = __task_cred(task);
51724 +
51725 + if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
51726 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
51727 + || in_group_p(grsec_proc_gid)
51728 +#endif
51729 + ) {
51730 + rcu_read_unlock();
51731 + return true;
51732 + }
51733 + }
51734 + rcu_read_unlock();
51735 +
51736 + if (!pid->hide_pid)
51737 + return false;
51738 +#endif
51739 +
51740 if (pid->hide_pid < hide_pid_min)
51741 return true;
51742 if (in_group_p(pid->pid_gid))
51743 return true;
51744 +
51745 return ptrace_may_access(task, PTRACE_MODE_READ);
51746 }
51747
51748 @@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
51749 put_task_struct(task);
51750
51751 if (!has_perms) {
51752 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51753 + {
51754 +#else
51755 if (pid->hide_pid == 2) {
51756 +#endif
51757 /*
51758 * Let's make getdents(), stat(), and open()
51759 * consistent with each other. If a process
51760 @@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
51761 if (!task)
51762 return -ESRCH;
51763
51764 + if (gr_acl_handle_procpidmem(task)) {
51765 + put_task_struct(task);
51766 + return -EPERM;
51767 + }
51768 +
51769 mm = mm_access(task, mode);
51770 put_task_struct(task);
51771
51772 @@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
51773
51774 file->private_data = mm;
51775
51776 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51777 + file->f_version = current->exec_id;
51778 +#endif
51779 +
51780 return 0;
51781 }
51782
51783 @@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
51784 ssize_t copied;
51785 char *page;
51786
51787 +#ifdef CONFIG_GRKERNSEC
51788 + if (write)
51789 + return -EPERM;
51790 +#endif
51791 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51792 + if (file->f_version != current->exec_id) {
51793 + gr_log_badprocpid("mem");
51794 + return 0;
51795 + }
51796 +#endif
51797 +
51798 if (!mm)
51799 return 0;
51800
51801 @@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
51802 if (!mm)
51803 return 0;
51804
51805 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51806 + if (file->f_version != current->exec_id) {
51807 + gr_log_badprocpid("environ");
51808 + return 0;
51809 + }
51810 +#endif
51811 +
51812 page = (char *)__get_free_page(GFP_TEMPORARY);
51813 if (!page)
51814 return -ENOMEM;
51815 @@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
51816 int error = -EACCES;
51817
51818 /* Are we allowed to snoop on the tasks file descriptors? */
51819 - if (!proc_fd_access_allowed(inode))
51820 + if (!proc_fd_access_allowed(inode, 0))
51821 goto out;
51822
51823 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
51824 @@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
51825 struct path path;
51826
51827 /* Are we allowed to snoop on the tasks file descriptors? */
51828 - if (!proc_fd_access_allowed(inode))
51829 - goto out;
51830 + /* logging this is needed for learning on chromium to work properly,
51831 + but we don't want to flood the logs from 'ps' which does a readlink
51832 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
51833 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
51834 + */
51835 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
51836 + if (!proc_fd_access_allowed(inode,0))
51837 + goto out;
51838 + } else {
51839 + if (!proc_fd_access_allowed(inode,1))
51840 + goto out;
51841 + }
51842
51843 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
51844 if (error)
51845 @@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
51846 rcu_read_lock();
51847 cred = __task_cred(task);
51848 inode->i_uid = cred->euid;
51849 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
51850 + inode->i_gid = grsec_proc_gid;
51851 +#else
51852 inode->i_gid = cred->egid;
51853 +#endif
51854 rcu_read_unlock();
51855 }
51856 security_task_to_inode(task, inode);
51857 @@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
51858 return -ENOENT;
51859 }
51860 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
51861 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51862 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
51863 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51864 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
51865 +#endif
51866 task_dumpable(task)) {
51867 cred = __task_cred(task);
51868 stat->uid = cred->euid;
51869 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
51870 + stat->gid = grsec_proc_gid;
51871 +#else
51872 stat->gid = cred->egid;
51873 +#endif
51874 }
51875 }
51876 rcu_read_unlock();
51877 @@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
51878
51879 if (task) {
51880 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
51881 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51882 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
51883 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51884 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
51885 +#endif
51886 task_dumpable(task)) {
51887 rcu_read_lock();
51888 cred = __task_cred(task);
51889 inode->i_uid = cred->euid;
51890 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
51891 + inode->i_gid = grsec_proc_gid;
51892 +#else
51893 inode->i_gid = cred->egid;
51894 +#endif
51895 rcu_read_unlock();
51896 } else {
51897 inode->i_uid = GLOBAL_ROOT_UID;
51898 @@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
51899 if (!task)
51900 goto out_no_task;
51901
51902 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
51903 + goto out;
51904 +
51905 /*
51906 * Yes, it does not scale. And it should not. Don't add
51907 * new entries into /proc/<tgid>/ without very good reasons.
51908 @@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
51909 if (!task)
51910 goto out_no_task;
51911
51912 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
51913 + goto out;
51914 +
51915 ret = 0;
51916 i = filp->f_pos;
51917 switch (i) {
51918 @@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
51919 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
51920 #endif
51921 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
51922 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
51923 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
51924 INF("syscall", S_IRUGO, proc_pid_syscall),
51925 #endif
51926 INF("cmdline", S_IRUGO, proc_pid_cmdline),
51927 @@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
51928 #ifdef CONFIG_SECURITY
51929 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
51930 #endif
51931 -#ifdef CONFIG_KALLSYMS
51932 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
51933 INF("wchan", S_IRUGO, proc_pid_wchan),
51934 #endif
51935 -#ifdef CONFIG_STACKTRACE
51936 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
51937 ONE("stack", S_IRUGO, proc_pid_stack),
51938 #endif
51939 #ifdef CONFIG_SCHEDSTATS
51940 @@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
51941 #ifdef CONFIG_HARDWALL
51942 INF("hardwall", S_IRUGO, proc_pid_hardwall),
51943 #endif
51944 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
51945 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
51946 +#endif
51947 #ifdef CONFIG_USER_NS
51948 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
51949 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
51950 @@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
51951 if (!inode)
51952 goto out;
51953
51954 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51955 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
51956 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51957 + inode->i_gid = grsec_proc_gid;
51958 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
51959 +#else
51960 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
51961 +#endif
51962 inode->i_op = &proc_tgid_base_inode_operations;
51963 inode->i_fop = &proc_tgid_base_operations;
51964 inode->i_flags|=S_IMMUTABLE;
51965 @@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
51966 if (!task)
51967 goto out;
51968
51969 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
51970 + goto out_put_task;
51971 +
51972 result = proc_pid_instantiate(dir, dentry, task, NULL);
51973 +out_put_task:
51974 put_task_struct(task);
51975 out:
51976 return result;
51977 @@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
51978 static int fake_filldir(void *buf, const char *name, int namelen,
51979 loff_t offset, u64 ino, unsigned d_type)
51980 {
51981 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
51982 + __buf->error = -EINVAL;
51983 return 0;
51984 }
51985
51986 @@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
51987 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
51988 #endif
51989 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
51990 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
51991 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
51992 INF("syscall", S_IRUGO, proc_pid_syscall),
51993 #endif
51994 INF("cmdline", S_IRUGO, proc_pid_cmdline),
51995 @@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
51996 #ifdef CONFIG_SECURITY
51997 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
51998 #endif
51999 -#ifdef CONFIG_KALLSYMS
52000 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52001 INF("wchan", S_IRUGO, proc_pid_wchan),
52002 #endif
52003 -#ifdef CONFIG_STACKTRACE
52004 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52005 ONE("stack", S_IRUGO, proc_pid_stack),
52006 #endif
52007 #ifdef CONFIG_SCHEDSTATS
52008 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
52009 index 82676e3..5f8518a 100644
52010 --- a/fs/proc/cmdline.c
52011 +++ b/fs/proc/cmdline.c
52012 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
52013
52014 static int __init proc_cmdline_init(void)
52015 {
52016 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
52017 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
52018 +#else
52019 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
52020 +#endif
52021 return 0;
52022 }
52023 module_init(proc_cmdline_init);
52024 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
52025 index b143471..bb105e5 100644
52026 --- a/fs/proc/devices.c
52027 +++ b/fs/proc/devices.c
52028 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
52029
52030 static int __init proc_devices_init(void)
52031 {
52032 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
52033 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
52034 +#else
52035 proc_create("devices", 0, NULL, &proc_devinfo_operations);
52036 +#endif
52037 return 0;
52038 }
52039 module_init(proc_devices_init);
52040 diff --git a/fs/proc/fd.c b/fs/proc/fd.c
52041 index d7a4a28..0201742 100644
52042 --- a/fs/proc/fd.c
52043 +++ b/fs/proc/fd.c
52044 @@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
52045 if (!task)
52046 return -ENOENT;
52047
52048 - files = get_files_struct(task);
52049 + if (!gr_acl_handle_procpidmem(task))
52050 + files = get_files_struct(task);
52051 put_task_struct(task);
52052
52053 if (files) {
52054 @@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
52055 */
52056 int proc_fd_permission(struct inode *inode, int mask)
52057 {
52058 + struct task_struct *task;
52059 int rv = generic_permission(inode, mask);
52060 - if (rv == 0)
52061 - return 0;
52062 +
52063 if (task_pid(current) == proc_pid(inode))
52064 rv = 0;
52065 +
52066 + task = get_proc_task(inode);
52067 + if (task == NULL)
52068 + return rv;
52069 +
52070 + if (gr_acl_handle_procpidmem(task))
52071 + rv = -EACCES;
52072 +
52073 + put_task_struct(task);
52074 +
52075 return rv;
52076 }
52077
52078 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
52079 index 439ae688..c21ac36 100644
52080 --- a/fs/proc/inode.c
52081 +++ b/fs/proc/inode.c
52082 @@ -21,11 +21,17 @@
52083 #include <linux/seq_file.h>
52084 #include <linux/slab.h>
52085 #include <linux/mount.h>
52086 +#include <linux/grsecurity.h>
52087
52088 #include <asm/uaccess.h>
52089
52090 #include "internal.h"
52091
52092 +#ifdef CONFIG_PROC_SYSCTL
52093 +extern const struct inode_operations proc_sys_inode_operations;
52094 +extern const struct inode_operations proc_sys_dir_operations;
52095 +#endif
52096 +
52097 static void proc_evict_inode(struct inode *inode)
52098 {
52099 struct proc_dir_entry *de;
52100 @@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
52101 ns = PROC_I(inode)->ns;
52102 if (ns_ops && ns)
52103 ns_ops->put(ns);
52104 +
52105 +#ifdef CONFIG_PROC_SYSCTL
52106 + if (inode->i_op == &proc_sys_inode_operations ||
52107 + inode->i_op == &proc_sys_dir_operations)
52108 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
52109 +#endif
52110 +
52111 }
52112
52113 static struct kmem_cache * proc_inode_cachep;
52114 @@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
52115 if (de->mode) {
52116 inode->i_mode = de->mode;
52117 inode->i_uid = de->uid;
52118 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52119 + inode->i_gid = grsec_proc_gid;
52120 +#else
52121 inode->i_gid = de->gid;
52122 +#endif
52123 }
52124 if (de->size)
52125 inode->i_size = de->size;
52126 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
52127 index 252544c..04395b9 100644
52128 --- a/fs/proc/internal.h
52129 +++ b/fs/proc/internal.h
52130 @@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52131 struct pid *pid, struct task_struct *task);
52132 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52133 struct pid *pid, struct task_struct *task);
52134 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52135 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
52136 +#endif
52137 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
52138
52139 extern const struct file_operations proc_tid_children_operations;
52140 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
52141 index e96d4f1..8b116ed 100644
52142 --- a/fs/proc/kcore.c
52143 +++ b/fs/proc/kcore.c
52144 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52145 * the addresses in the elf_phdr on our list.
52146 */
52147 start = kc_offset_to_vaddr(*fpos - elf_buflen);
52148 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
52149 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
52150 + if (tsz > buflen)
52151 tsz = buflen;
52152 -
52153 +
52154 while (buflen) {
52155 struct kcore_list *m;
52156
52157 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52158 kfree(elf_buf);
52159 } else {
52160 if (kern_addr_valid(start)) {
52161 - unsigned long n;
52162 + char *elf_buf;
52163 + mm_segment_t oldfs;
52164
52165 - n = copy_to_user(buffer, (char *)start, tsz);
52166 - /*
52167 - * We cannot distinguish between fault on source
52168 - * and fault on destination. When this happens
52169 - * we clear too and hope it will trigger the
52170 - * EFAULT again.
52171 - */
52172 - if (n) {
52173 - if (clear_user(buffer + tsz - n,
52174 - n))
52175 + elf_buf = kmalloc(tsz, GFP_KERNEL);
52176 + if (!elf_buf)
52177 + return -ENOMEM;
52178 + oldfs = get_fs();
52179 + set_fs(KERNEL_DS);
52180 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
52181 + set_fs(oldfs);
52182 + if (copy_to_user(buffer, elf_buf, tsz)) {
52183 + kfree(elf_buf);
52184 return -EFAULT;
52185 + }
52186 }
52187 + set_fs(oldfs);
52188 + kfree(elf_buf);
52189 } else {
52190 if (clear_user(buffer, tsz))
52191 return -EFAULT;
52192 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52193
52194 static int open_kcore(struct inode *inode, struct file *filp)
52195 {
52196 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
52197 + return -EPERM;
52198 +#endif
52199 if (!capable(CAP_SYS_RAWIO))
52200 return -EPERM;
52201 if (kcore_need_update)
52202 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
52203 index 80e4645..53e5fcf 100644
52204 --- a/fs/proc/meminfo.c
52205 +++ b/fs/proc/meminfo.c
52206 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52207 vmi.used >> 10,
52208 vmi.largest_chunk >> 10
52209 #ifdef CONFIG_MEMORY_FAILURE
52210 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
52211 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
52212 #endif
52213 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
52214 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
52215 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
52216 index b1822dd..df622cb 100644
52217 --- a/fs/proc/nommu.c
52218 +++ b/fs/proc/nommu.c
52219 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
52220 if (len < 1)
52221 len = 1;
52222 seq_printf(m, "%*c", len, ' ');
52223 - seq_path(m, &file->f_path, "");
52224 + seq_path(m, &file->f_path, "\n\\");
52225 }
52226
52227 seq_putc(m, '\n');
52228 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
52229 index fe72cd0..21b52ff 100644
52230 --- a/fs/proc/proc_net.c
52231 +++ b/fs/proc/proc_net.c
52232 @@ -23,6 +23,7 @@
52233 #include <linux/nsproxy.h>
52234 #include <net/net_namespace.h>
52235 #include <linux/seq_file.h>
52236 +#include <linux/grsecurity.h>
52237
52238 #include "internal.h"
52239
52240 @@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
52241 struct task_struct *task;
52242 struct nsproxy *ns;
52243 struct net *net = NULL;
52244 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52245 + const struct cred *cred = current_cred();
52246 +#endif
52247 +
52248 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52249 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
52250 + return net;
52251 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52252 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
52253 + return net;
52254 +#endif
52255
52256 rcu_read_lock();
52257 task = pid_task(proc_pid(dir), PIDTYPE_PID);
52258 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
52259 index 1827d88..9a60b01 100644
52260 --- a/fs/proc/proc_sysctl.c
52261 +++ b/fs/proc/proc_sysctl.c
52262 @@ -12,11 +12,15 @@
52263 #include <linux/module.h>
52264 #include "internal.h"
52265
52266 +extern int gr_handle_chroot_sysctl(const int op);
52267 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
52268 + const int op);
52269 +
52270 static const struct dentry_operations proc_sys_dentry_operations;
52271 static const struct file_operations proc_sys_file_operations;
52272 -static const struct inode_operations proc_sys_inode_operations;
52273 +const struct inode_operations proc_sys_inode_operations;
52274 static const struct file_operations proc_sys_dir_file_operations;
52275 -static const struct inode_operations proc_sys_dir_operations;
52276 +const struct inode_operations proc_sys_dir_operations;
52277
52278 void proc_sys_poll_notify(struct ctl_table_poll *poll)
52279 {
52280 @@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
52281
52282 err = NULL;
52283 d_set_d_op(dentry, &proc_sys_dentry_operations);
52284 +
52285 + gr_handle_proc_create(dentry, inode);
52286 +
52287 d_add(dentry, inode);
52288
52289 out:
52290 @@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
52291 struct inode *inode = filp->f_path.dentry->d_inode;
52292 struct ctl_table_header *head = grab_header(inode);
52293 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
52294 + int op = write ? MAY_WRITE : MAY_READ;
52295 ssize_t error;
52296 size_t res;
52297
52298 @@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
52299 * and won't be until we finish.
52300 */
52301 error = -EPERM;
52302 - if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
52303 + if (sysctl_perm(head, table, op))
52304 goto out;
52305
52306 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
52307 @@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
52308 if (!table->proc_handler)
52309 goto out;
52310
52311 +#ifdef CONFIG_GRKERNSEC
52312 + error = -EPERM;
52313 + if (gr_handle_chroot_sysctl(op))
52314 + goto out;
52315 + dget(filp->f_path.dentry);
52316 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
52317 + dput(filp->f_path.dentry);
52318 + goto out;
52319 + }
52320 + dput(filp->f_path.dentry);
52321 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
52322 + goto out;
52323 + if (write && !capable(CAP_SYS_ADMIN))
52324 + goto out;
52325 +#endif
52326 +
52327 /* careful: calling conventions are nasty here */
52328 res = count;
52329 error = table->proc_handler(table, write, buf, &res, ppos);
52330 @@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
52331 return -ENOMEM;
52332 } else {
52333 d_set_d_op(child, &proc_sys_dentry_operations);
52334 +
52335 + gr_handle_proc_create(child, inode);
52336 +
52337 d_add(child, inode);
52338 }
52339 } else {
52340 @@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
52341 if ((*pos)++ < file->f_pos)
52342 return 0;
52343
52344 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
52345 + return 0;
52346 +
52347 if (unlikely(S_ISLNK(table->mode)))
52348 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
52349 else
52350 @@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
52351 if (IS_ERR(head))
52352 return PTR_ERR(head);
52353
52354 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
52355 + return -ENOENT;
52356 +
52357 generic_fillattr(inode, stat);
52358 if (table)
52359 stat->mode = (stat->mode & S_IFMT) | table->mode;
52360 @@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
52361 .llseek = generic_file_llseek,
52362 };
52363
52364 -static const struct inode_operations proc_sys_inode_operations = {
52365 +const struct inode_operations proc_sys_inode_operations = {
52366 .permission = proc_sys_permission,
52367 .setattr = proc_sys_setattr,
52368 .getattr = proc_sys_getattr,
52369 };
52370
52371 -static const struct inode_operations proc_sys_dir_operations = {
52372 +const struct inode_operations proc_sys_dir_operations = {
52373 .lookup = proc_sys_lookup,
52374 .permission = proc_sys_permission,
52375 .setattr = proc_sys_setattr,
52376 diff --git a/fs/proc/root.c b/fs/proc/root.c
52377 index c6e9fac..a740964 100644
52378 --- a/fs/proc/root.c
52379 +++ b/fs/proc/root.c
52380 @@ -176,7 +176,15 @@ void __init proc_root_init(void)
52381 #ifdef CONFIG_PROC_DEVICETREE
52382 proc_device_tree_init();
52383 #endif
52384 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
52385 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52386 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
52387 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52388 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
52389 +#endif
52390 +#else
52391 proc_mkdir("bus", NULL);
52392 +#endif
52393 proc_sys_init();
52394 }
52395
52396 diff --git a/fs/proc/self.c b/fs/proc/self.c
52397 index aa5cc3b..c91a5d0 100644
52398 --- a/fs/proc/self.c
52399 +++ b/fs/proc/self.c
52400 @@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
52401 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
52402 void *cookie)
52403 {
52404 - char *s = nd_get_link(nd);
52405 + const char *s = nd_get_link(nd);
52406 if (!IS_ERR(s))
52407 kfree(s);
52408 }
52409 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
52410 index ca5ce7f..02c1cf0 100644
52411 --- a/fs/proc/task_mmu.c
52412 +++ b/fs/proc/task_mmu.c
52413 @@ -11,12 +11,19 @@
52414 #include <linux/rmap.h>
52415 #include <linux/swap.h>
52416 #include <linux/swapops.h>
52417 +#include <linux/grsecurity.h>
52418
52419 #include <asm/elf.h>
52420 #include <asm/uaccess.h>
52421 #include <asm/tlbflush.h>
52422 #include "internal.h"
52423
52424 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52425 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52426 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
52427 + _mm->pax_flags & MF_PAX_SEGMEXEC))
52428 +#endif
52429 +
52430 void task_mem(struct seq_file *m, struct mm_struct *mm)
52431 {
52432 unsigned long data, text, lib, swap;
52433 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
52434 "VmExe:\t%8lu kB\n"
52435 "VmLib:\t%8lu kB\n"
52436 "VmPTE:\t%8lu kB\n"
52437 - "VmSwap:\t%8lu kB\n",
52438 - hiwater_vm << (PAGE_SHIFT-10),
52439 + "VmSwap:\t%8lu kB\n"
52440 +
52441 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52442 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
52443 +#endif
52444 +
52445 + ,hiwater_vm << (PAGE_SHIFT-10),
52446 total_vm << (PAGE_SHIFT-10),
52447 mm->locked_vm << (PAGE_SHIFT-10),
52448 mm->pinned_vm << (PAGE_SHIFT-10),
52449 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
52450 data << (PAGE_SHIFT-10),
52451 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
52452 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
52453 - swap << (PAGE_SHIFT-10));
52454 + swap << (PAGE_SHIFT-10)
52455 +
52456 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52457 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52458 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
52459 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
52460 +#else
52461 + , mm->context.user_cs_base
52462 + , mm->context.user_cs_limit
52463 +#endif
52464 +#endif
52465 +
52466 + );
52467 }
52468
52469 unsigned long task_vsize(struct mm_struct *mm)
52470 @@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
52471 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
52472 }
52473
52474 - /* We don't show the stack guard page in /proc/maps */
52475 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52476 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
52477 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
52478 +#else
52479 start = vma->vm_start;
52480 - if (stack_guard_page_start(vma, start))
52481 - start += PAGE_SIZE;
52482 end = vma->vm_end;
52483 - if (stack_guard_page_end(vma, end))
52484 - end -= PAGE_SIZE;
52485 +#endif
52486
52487 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
52488 start,
52489 @@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
52490 flags & VM_WRITE ? 'w' : '-',
52491 flags & VM_EXEC ? 'x' : '-',
52492 flags & VM_MAYSHARE ? 's' : 'p',
52493 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52494 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
52495 +#else
52496 pgoff,
52497 +#endif
52498 MAJOR(dev), MINOR(dev), ino, &len);
52499
52500 /*
52501 @@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
52502 */
52503 if (file) {
52504 pad_len_spaces(m, len);
52505 - seq_path(m, &file->f_path, "\n");
52506 + seq_path(m, &file->f_path, "\n\\");
52507 goto done;
52508 }
52509
52510 @@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
52511 * Thread stack in /proc/PID/task/TID/maps or
52512 * the main process stack.
52513 */
52514 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
52515 - vma->vm_end >= mm->start_stack)) {
52516 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
52517 + (vma->vm_start <= mm->start_stack &&
52518 + vma->vm_end >= mm->start_stack)) {
52519 name = "[stack]";
52520 } else {
52521 /* Thread stack in /proc/PID/maps */
52522 @@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
52523 struct proc_maps_private *priv = m->private;
52524 struct task_struct *task = priv->task;
52525
52526 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52527 + if (current->exec_id != m->exec_id) {
52528 + gr_log_badprocpid("maps");
52529 + return 0;
52530 + }
52531 +#endif
52532 +
52533 show_map_vma(m, vma, is_pid);
52534
52535 if (m->count < m->size) /* vma is copied successfully */
52536 @@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
52537 .private = &mss,
52538 };
52539
52540 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52541 + if (current->exec_id != m->exec_id) {
52542 + gr_log_badprocpid("smaps");
52543 + return 0;
52544 + }
52545 +#endif
52546 memset(&mss, 0, sizeof mss);
52547 - mss.vma = vma;
52548 - /* mmap_sem is held in m_start */
52549 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
52550 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
52551 -
52552 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52553 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
52554 +#endif
52555 + mss.vma = vma;
52556 + /* mmap_sem is held in m_start */
52557 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
52558 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
52559 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52560 + }
52561 +#endif
52562 show_map_vma(m, vma, is_pid);
52563
52564 seq_printf(m,
52565 @@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
52566 "KernelPageSize: %8lu kB\n"
52567 "MMUPageSize: %8lu kB\n"
52568 "Locked: %8lu kB\n",
52569 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52570 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
52571 +#else
52572 (vma->vm_end - vma->vm_start) >> 10,
52573 +#endif
52574 mss.resident >> 10,
52575 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
52576 mss.shared_clean >> 10,
52577 @@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
52578 int n;
52579 char buffer[50];
52580
52581 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52582 + if (current->exec_id != m->exec_id) {
52583 + gr_log_badprocpid("numa_maps");
52584 + return 0;
52585 + }
52586 +#endif
52587 +
52588 if (!mm)
52589 return 0;
52590
52591 @@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
52592 mpol_to_str(buffer, sizeof(buffer), pol);
52593 mpol_cond_put(pol);
52594
52595 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52596 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
52597 +#else
52598 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
52599 +#endif
52600
52601 if (file) {
52602 seq_printf(m, " file=");
52603 - seq_path(m, &file->f_path, "\n\t= ");
52604 + seq_path(m, &file->f_path, "\n\t\\= ");
52605 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
52606 seq_printf(m, " heap");
52607 } else {
52608 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
52609 index 1ccfa53..0848f95 100644
52610 --- a/fs/proc/task_nommu.c
52611 +++ b/fs/proc/task_nommu.c
52612 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
52613 else
52614 bytes += kobjsize(mm);
52615
52616 - if (current->fs && current->fs->users > 1)
52617 + if (current->fs && atomic_read(&current->fs->users) > 1)
52618 sbytes += kobjsize(current->fs);
52619 else
52620 bytes += kobjsize(current->fs);
52621 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
52622
52623 if (file) {
52624 pad_len_spaces(m, len);
52625 - seq_path(m, &file->f_path, "");
52626 + seq_path(m, &file->f_path, "\n\\");
52627 } else if (mm) {
52628 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
52629
52630 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
52631 index 16e8abb..2dcf914 100644
52632 --- a/fs/quota/netlink.c
52633 +++ b/fs/quota/netlink.c
52634 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
52635 void quota_send_warning(struct kqid qid, dev_t dev,
52636 const char warntype)
52637 {
52638 - static atomic_t seq;
52639 + static atomic_unchecked_t seq;
52640 struct sk_buff *skb;
52641 void *msg_head;
52642 int ret;
52643 @@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
52644 "VFS: Not enough memory to send quota warning.\n");
52645 return;
52646 }
52647 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
52648 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
52649 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
52650 if (!msg_head) {
52651 printk(KERN_ERR
52652 diff --git a/fs/readdir.c b/fs/readdir.c
52653 index 5e69ef5..e5d9099 100644
52654 --- a/fs/readdir.c
52655 +++ b/fs/readdir.c
52656 @@ -17,6 +17,7 @@
52657 #include <linux/security.h>
52658 #include <linux/syscalls.h>
52659 #include <linux/unistd.h>
52660 +#include <linux/namei.h>
52661
52662 #include <asm/uaccess.h>
52663
52664 @@ -67,6 +68,7 @@ struct old_linux_dirent {
52665
52666 struct readdir_callback {
52667 struct old_linux_dirent __user * dirent;
52668 + struct file * file;
52669 int result;
52670 };
52671
52672 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
52673 buf->result = -EOVERFLOW;
52674 return -EOVERFLOW;
52675 }
52676 +
52677 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52678 + return 0;
52679 +
52680 buf->result++;
52681 dirent = buf->dirent;
52682 if (!access_ok(VERIFY_WRITE, dirent,
52683 @@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
52684
52685 buf.result = 0;
52686 buf.dirent = dirent;
52687 + buf.file = f.file;
52688
52689 error = vfs_readdir(f.file, fillonedir, &buf);
52690 if (buf.result)
52691 @@ -139,6 +146,7 @@ struct linux_dirent {
52692 struct getdents_callback {
52693 struct linux_dirent __user * current_dir;
52694 struct linux_dirent __user * previous;
52695 + struct file * file;
52696 int count;
52697 int error;
52698 };
52699 @@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
52700 buf->error = -EOVERFLOW;
52701 return -EOVERFLOW;
52702 }
52703 +
52704 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52705 + return 0;
52706 +
52707 dirent = buf->previous;
52708 if (dirent) {
52709 if (__put_user(offset, &dirent->d_off))
52710 @@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
52711 buf.previous = NULL;
52712 buf.count = count;
52713 buf.error = 0;
52714 + buf.file = f.file;
52715
52716 error = vfs_readdir(f.file, filldir, &buf);
52717 if (error >= 0)
52718 @@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
52719 struct getdents_callback64 {
52720 struct linux_dirent64 __user * current_dir;
52721 struct linux_dirent64 __user * previous;
52722 + struct file *file;
52723 int count;
52724 int error;
52725 };
52726 @@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
52727 buf->error = -EINVAL; /* only used if we fail.. */
52728 if (reclen > buf->count)
52729 return -EINVAL;
52730 +
52731 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52732 + return 0;
52733 +
52734 dirent = buf->previous;
52735 if (dirent) {
52736 if (__put_user(offset, &dirent->d_off))
52737 @@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
52738
52739 buf.current_dir = dirent;
52740 buf.previous = NULL;
52741 + buf.file = f.file;
52742 buf.count = count;
52743 buf.error = 0;
52744
52745 @@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
52746 error = buf.error;
52747 lastdirent = buf.previous;
52748 if (lastdirent) {
52749 - typeof(lastdirent->d_off) d_off = f.file->f_pos;
52750 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
52751 if (__put_user(d_off, &lastdirent->d_off))
52752 error = -EFAULT;
52753 else
52754 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
52755 index 2b7882b..1c5ef48 100644
52756 --- a/fs/reiserfs/do_balan.c
52757 +++ b/fs/reiserfs/do_balan.c
52758 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
52759 return;
52760 }
52761
52762 - atomic_inc(&(fs_generation(tb->tb_sb)));
52763 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
52764 do_balance_starts(tb);
52765
52766 /* balance leaf returns 0 except if combining L R and S into
52767 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
52768 index e60e870..f40ac16 100644
52769 --- a/fs/reiserfs/procfs.c
52770 +++ b/fs/reiserfs/procfs.c
52771 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
52772 "SMALL_TAILS " : "NO_TAILS ",
52773 replay_only(sb) ? "REPLAY_ONLY " : "",
52774 convert_reiserfs(sb) ? "CONV " : "",
52775 - atomic_read(&r->s_generation_counter),
52776 + atomic_read_unchecked(&r->s_generation_counter),
52777 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
52778 SF(s_do_balance), SF(s_unneeded_left_neighbor),
52779 SF(s_good_search_by_key_reada), SF(s_bmaps),
52780 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
52781 index 157e474..65a6114 100644
52782 --- a/fs/reiserfs/reiserfs.h
52783 +++ b/fs/reiserfs/reiserfs.h
52784 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
52785 /* Comment? -Hans */
52786 wait_queue_head_t s_wait;
52787 /* To be obsoleted soon by per buffer seals.. -Hans */
52788 - atomic_t s_generation_counter; // increased by one every time the
52789 + atomic_unchecked_t s_generation_counter; // increased by one every time the
52790 // tree gets re-balanced
52791 unsigned long s_properties; /* File system properties. Currently holds
52792 on-disk FS format */
52793 @@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
52794 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
52795
52796 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
52797 -#define get_generation(s) atomic_read (&fs_generation(s))
52798 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
52799 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
52800 #define __fs_changed(gen,s) (gen != get_generation (s))
52801 #define fs_changed(gen,s) \
52802 diff --git a/fs/select.c b/fs/select.c
52803 index 2ef72d9..f213b17 100644
52804 --- a/fs/select.c
52805 +++ b/fs/select.c
52806 @@ -20,6 +20,7 @@
52807 #include <linux/export.h>
52808 #include <linux/slab.h>
52809 #include <linux/poll.h>
52810 +#include <linux/security.h>
52811 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
52812 #include <linux/file.h>
52813 #include <linux/fdtable.h>
52814 @@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
52815 struct poll_list *walk = head;
52816 unsigned long todo = nfds;
52817
52818 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
52819 if (nfds > rlimit(RLIMIT_NOFILE))
52820 return -EINVAL;
52821
52822 diff --git a/fs/seq_file.c b/fs/seq_file.c
52823 index f2bc3df..239d4f6 100644
52824 --- a/fs/seq_file.c
52825 +++ b/fs/seq_file.c
52826 @@ -10,6 +10,7 @@
52827 #include <linux/seq_file.h>
52828 #include <linux/slab.h>
52829 #include <linux/cred.h>
52830 +#include <linux/sched.h>
52831
52832 #include <asm/uaccess.h>
52833 #include <asm/page.h>
52834 @@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
52835 #ifdef CONFIG_USER_NS
52836 p->user_ns = file->f_cred->user_ns;
52837 #endif
52838 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52839 + p->exec_id = current->exec_id;
52840 +#endif
52841
52842 /*
52843 * Wrappers around seq_open(e.g. swaps_open) need to be
52844 @@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
52845 return 0;
52846 }
52847 if (!m->buf) {
52848 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
52849 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
52850 if (!m->buf)
52851 return -ENOMEM;
52852 }
52853 @@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
52854 Eoverflow:
52855 m->op->stop(m, p);
52856 kfree(m->buf);
52857 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
52858 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
52859 return !m->buf ? -ENOMEM : -EAGAIN;
52860 }
52861
52862 @@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
52863
52864 /* grab buffer if we didn't have one */
52865 if (!m->buf) {
52866 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
52867 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
52868 if (!m->buf)
52869 goto Enomem;
52870 }
52871 @@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
52872 goto Fill;
52873 m->op->stop(m, p);
52874 kfree(m->buf);
52875 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
52876 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
52877 if (!m->buf)
52878 goto Enomem;
52879 m->count = 0;
52880 @@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
52881 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
52882 void *data)
52883 {
52884 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
52885 + seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
52886 int res = -ENOMEM;
52887
52888 if (op) {
52889 diff --git a/fs/splice.c b/fs/splice.c
52890 index 6909d89..5b2e8f9 100644
52891 --- a/fs/splice.c
52892 +++ b/fs/splice.c
52893 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
52894 pipe_lock(pipe);
52895
52896 for (;;) {
52897 - if (!pipe->readers) {
52898 + if (!atomic_read(&pipe->readers)) {
52899 send_sig(SIGPIPE, current, 0);
52900 if (!ret)
52901 ret = -EPIPE;
52902 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
52903 do_wakeup = 0;
52904 }
52905
52906 - pipe->waiting_writers++;
52907 + atomic_inc(&pipe->waiting_writers);
52908 pipe_wait(pipe);
52909 - pipe->waiting_writers--;
52910 + atomic_dec(&pipe->waiting_writers);
52911 }
52912
52913 pipe_unlock(pipe);
52914 @@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
52915 old_fs = get_fs();
52916 set_fs(get_ds());
52917 /* The cast to a user pointer is valid due to the set_fs() */
52918 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
52919 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
52920 set_fs(old_fs);
52921
52922 return res;
52923 @@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
52924 old_fs = get_fs();
52925 set_fs(get_ds());
52926 /* The cast to a user pointer is valid due to the set_fs() */
52927 - res = vfs_write(file, (const char __user *)buf, count, &pos);
52928 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
52929 set_fs(old_fs);
52930
52931 return res;
52932 @@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
52933 goto err;
52934
52935 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
52936 - vec[i].iov_base = (void __user *) page_address(page);
52937 + vec[i].iov_base = (void __force_user *) page_address(page);
52938 vec[i].iov_len = this_len;
52939 spd.pages[i] = page;
52940 spd.nr_pages++;
52941 @@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
52942 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
52943 {
52944 while (!pipe->nrbufs) {
52945 - if (!pipe->writers)
52946 + if (!atomic_read(&pipe->writers))
52947 return 0;
52948
52949 - if (!pipe->waiting_writers && sd->num_spliced)
52950 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
52951 return 0;
52952
52953 if (sd->flags & SPLICE_F_NONBLOCK)
52954 @@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
52955 * out of the pipe right after the splice_to_pipe(). So set
52956 * PIPE_READERS appropriately.
52957 */
52958 - pipe->readers = 1;
52959 + atomic_set(&pipe->readers, 1);
52960
52961 current->splice_pipe = pipe;
52962 }
52963 @@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
52964 ret = -ERESTARTSYS;
52965 break;
52966 }
52967 - if (!pipe->writers)
52968 + if (!atomic_read(&pipe->writers))
52969 break;
52970 - if (!pipe->waiting_writers) {
52971 + if (!atomic_read(&pipe->waiting_writers)) {
52972 if (flags & SPLICE_F_NONBLOCK) {
52973 ret = -EAGAIN;
52974 break;
52975 @@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
52976 pipe_lock(pipe);
52977
52978 while (pipe->nrbufs >= pipe->buffers) {
52979 - if (!pipe->readers) {
52980 + if (!atomic_read(&pipe->readers)) {
52981 send_sig(SIGPIPE, current, 0);
52982 ret = -EPIPE;
52983 break;
52984 @@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
52985 ret = -ERESTARTSYS;
52986 break;
52987 }
52988 - pipe->waiting_writers++;
52989 + atomic_inc(&pipe->waiting_writers);
52990 pipe_wait(pipe);
52991 - pipe->waiting_writers--;
52992 + atomic_dec(&pipe->waiting_writers);
52993 }
52994
52995 pipe_unlock(pipe);
52996 @@ -1823,14 +1823,14 @@ retry:
52997 pipe_double_lock(ipipe, opipe);
52998
52999 do {
53000 - if (!opipe->readers) {
53001 + if (!atomic_read(&opipe->readers)) {
53002 send_sig(SIGPIPE, current, 0);
53003 if (!ret)
53004 ret = -EPIPE;
53005 break;
53006 }
53007
53008 - if (!ipipe->nrbufs && !ipipe->writers)
53009 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
53010 break;
53011
53012 /*
53013 @@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53014 pipe_double_lock(ipipe, opipe);
53015
53016 do {
53017 - if (!opipe->readers) {
53018 + if (!atomic_read(&opipe->readers)) {
53019 send_sig(SIGPIPE, current, 0);
53020 if (!ret)
53021 ret = -EPIPE;
53022 @@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53023 * return EAGAIN if we have the potential of some data in the
53024 * future, otherwise just return 0
53025 */
53026 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
53027 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
53028 ret = -EAGAIN;
53029
53030 pipe_unlock(ipipe);
53031 diff --git a/fs/stat.c b/fs/stat.c
53032 index 14f4545..9b7f55b 100644
53033 --- a/fs/stat.c
53034 +++ b/fs/stat.c
53035 @@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
53036 stat->gid = inode->i_gid;
53037 stat->rdev = inode->i_rdev;
53038 stat->size = i_size_read(inode);
53039 - stat->atime = inode->i_atime;
53040 - stat->mtime = inode->i_mtime;
53041 + if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
53042 + stat->atime = inode->i_ctime;
53043 + stat->mtime = inode->i_ctime;
53044 + } else {
53045 + stat->atime = inode->i_atime;
53046 + stat->mtime = inode->i_mtime;
53047 + }
53048 stat->ctime = inode->i_ctime;
53049 stat->blksize = (1 << inode->i_blkbits);
53050 stat->blocks = inode->i_blocks;
53051 @@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
53052 if (retval)
53053 return retval;
53054
53055 - if (inode->i_op->getattr)
53056 - return inode->i_op->getattr(mnt, dentry, stat);
53057 + if (inode->i_op->getattr) {
53058 + retval = inode->i_op->getattr(mnt, dentry, stat);
53059 + if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
53060 + stat->atime = stat->ctime;
53061 + stat->mtime = stat->ctime;
53062 + }
53063 + return retval;
53064 + }
53065
53066 generic_fillattr(inode, stat);
53067 return 0;
53068 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
53069 index 2fbdff6..5530a61 100644
53070 --- a/fs/sysfs/dir.c
53071 +++ b/fs/sysfs/dir.c
53072 @@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
53073 struct sysfs_dirent *sd;
53074 int rc;
53075
53076 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
53077 + const char *parent_name = parent_sd->s_name;
53078 +
53079 + mode = S_IFDIR | S_IRWXU;
53080 +
53081 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
53082 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
53083 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
53084 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
53085 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
53086 +#endif
53087 +
53088 /* allocate */
53089 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
53090 if (!sd)
53091 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
53092 index 602f56d..6853db8 100644
53093 --- a/fs/sysfs/file.c
53094 +++ b/fs/sysfs/file.c
53095 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
53096
53097 struct sysfs_open_dirent {
53098 atomic_t refcnt;
53099 - atomic_t event;
53100 + atomic_unchecked_t event;
53101 wait_queue_head_t poll;
53102 struct list_head buffers; /* goes through sysfs_buffer.list */
53103 };
53104 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53105 if (!sysfs_get_active(attr_sd))
53106 return -ENODEV;
53107
53108 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
53109 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
53110 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
53111
53112 sysfs_put_active(attr_sd);
53113 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
53114 return -ENOMEM;
53115
53116 atomic_set(&new_od->refcnt, 0);
53117 - atomic_set(&new_od->event, 1);
53118 + atomic_set_unchecked(&new_od->event, 1);
53119 init_waitqueue_head(&new_od->poll);
53120 INIT_LIST_HEAD(&new_od->buffers);
53121 goto retry;
53122 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
53123
53124 sysfs_put_active(attr_sd);
53125
53126 - if (buffer->event != atomic_read(&od->event))
53127 + if (buffer->event != atomic_read_unchecked(&od->event))
53128 goto trigger;
53129
53130 return DEFAULT_POLLMASK;
53131 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
53132
53133 od = sd->s_attr.open;
53134 if (od) {
53135 - atomic_inc(&od->event);
53136 + atomic_inc_unchecked(&od->event);
53137 wake_up_interruptible(&od->poll);
53138 }
53139
53140 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
53141 index 3c9eb56..9dea5be 100644
53142 --- a/fs/sysfs/symlink.c
53143 +++ b/fs/sysfs/symlink.c
53144 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
53145
53146 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
53147 {
53148 - char *page = nd_get_link(nd);
53149 + const char *page = nd_get_link(nd);
53150 if (!IS_ERR(page))
53151 free_page((unsigned long)page);
53152 }
53153 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
53154 index c175b4d..8f36a16 100644
53155 --- a/fs/udf/misc.c
53156 +++ b/fs/udf/misc.c
53157 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
53158
53159 u8 udf_tag_checksum(const struct tag *t)
53160 {
53161 - u8 *data = (u8 *)t;
53162 + const u8 *data = (const u8 *)t;
53163 u8 checksum = 0;
53164 int i;
53165 for (i = 0; i < sizeof(struct tag); ++i)
53166 diff --git a/fs/utimes.c b/fs/utimes.c
53167 index f4fb7ec..3fe03c0 100644
53168 --- a/fs/utimes.c
53169 +++ b/fs/utimes.c
53170 @@ -1,6 +1,7 @@
53171 #include <linux/compiler.h>
53172 #include <linux/file.h>
53173 #include <linux/fs.h>
53174 +#include <linux/security.h>
53175 #include <linux/linkage.h>
53176 #include <linux/mount.h>
53177 #include <linux/namei.h>
53178 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
53179 goto mnt_drop_write_and_out;
53180 }
53181 }
53182 +
53183 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
53184 + error = -EACCES;
53185 + goto mnt_drop_write_and_out;
53186 + }
53187 +
53188 mutex_lock(&inode->i_mutex);
53189 error = notify_change(path->dentry, &newattrs);
53190 mutex_unlock(&inode->i_mutex);
53191 diff --git a/fs/xattr.c b/fs/xattr.c
53192 index 3377dff..4feded6 100644
53193 --- a/fs/xattr.c
53194 +++ b/fs/xattr.c
53195 @@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
53196 * Extended attribute SET operations
53197 */
53198 static long
53199 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
53200 +setxattr(struct path *path, const char __user *name, const void __user *value,
53201 size_t size, int flags)
53202 {
53203 int error;
53204 @@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
53205 posix_acl_fix_xattr_from_user(kvalue, size);
53206 }
53207
53208 - error = vfs_setxattr(d, kname, kvalue, size, flags);
53209 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
53210 + error = -EACCES;
53211 + goto out;
53212 + }
53213 +
53214 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
53215 out:
53216 if (vvalue)
53217 vfree(vvalue);
53218 @@ -377,7 +382,7 @@ retry:
53219 return error;
53220 error = mnt_want_write(path.mnt);
53221 if (!error) {
53222 - error = setxattr(path.dentry, name, value, size, flags);
53223 + error = setxattr(&path, name, value, size, flags);
53224 mnt_drop_write(path.mnt);
53225 }
53226 path_put(&path);
53227 @@ -401,7 +406,7 @@ retry:
53228 return error;
53229 error = mnt_want_write(path.mnt);
53230 if (!error) {
53231 - error = setxattr(path.dentry, name, value, size, flags);
53232 + error = setxattr(&path, name, value, size, flags);
53233 mnt_drop_write(path.mnt);
53234 }
53235 path_put(&path);
53236 @@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
53237 const void __user *,value, size_t, size, int, flags)
53238 {
53239 struct fd f = fdget(fd);
53240 - struct dentry *dentry;
53241 int error = -EBADF;
53242
53243 if (!f.file)
53244 return error;
53245 - dentry = f.file->f_path.dentry;
53246 - audit_inode(NULL, dentry, 0);
53247 + audit_inode(NULL, f.file->f_path.dentry, 0);
53248 error = mnt_want_write_file(f.file);
53249 if (!error) {
53250 - error = setxattr(dentry, name, value, size, flags);
53251 + error = setxattr(&f.file->f_path, name, value, size, flags);
53252 mnt_drop_write_file(f.file);
53253 }
53254 fdput(f);
53255 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
53256 index 9fbea87..6b19972 100644
53257 --- a/fs/xattr_acl.c
53258 +++ b/fs/xattr_acl.c
53259 @@ -76,8 +76,8 @@ struct posix_acl *
53260 posix_acl_from_xattr(struct user_namespace *user_ns,
53261 const void *value, size_t size)
53262 {
53263 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
53264 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
53265 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
53266 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
53267 int count;
53268 struct posix_acl *acl;
53269 struct posix_acl_entry *acl_e;
53270 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
53271 index 572a858..12a9b0d 100644
53272 --- a/fs/xfs/xfs_bmap.c
53273 +++ b/fs/xfs/xfs_bmap.c
53274 @@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
53275 int nmap,
53276 int ret_nmap);
53277 #else
53278 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
53279 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
53280 #endif /* DEBUG */
53281
53282 STATIC int
53283 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
53284 index 1b9fc3e..e1bdde0 100644
53285 --- a/fs/xfs/xfs_dir2_sf.c
53286 +++ b/fs/xfs/xfs_dir2_sf.c
53287 @@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
53288 }
53289
53290 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
53291 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
53292 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
53293 + char name[sfep->namelen];
53294 + memcpy(name, sfep->name, sfep->namelen);
53295 + if (filldir(dirent, name, sfep->namelen,
53296 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
53297 + *offset = off & 0x7fffffff;
53298 + return 0;
53299 + }
53300 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
53301 off & 0x7fffffff, ino, DT_UNKNOWN)) {
53302 *offset = off & 0x7fffffff;
53303 return 0;
53304 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
53305 index c1c3ef8..0952438 100644
53306 --- a/fs/xfs/xfs_ioctl.c
53307 +++ b/fs/xfs/xfs_ioctl.c
53308 @@ -127,7 +127,7 @@ xfs_find_handle(
53309 }
53310
53311 error = -EFAULT;
53312 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
53313 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
53314 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
53315 goto out_put;
53316
53317 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
53318 index d82efaa..0904a8e 100644
53319 --- a/fs/xfs/xfs_iops.c
53320 +++ b/fs/xfs/xfs_iops.c
53321 @@ -395,7 +395,7 @@ xfs_vn_put_link(
53322 struct nameidata *nd,
53323 void *p)
53324 {
53325 - char *s = nd_get_link(nd);
53326 + const char *s = nd_get_link(nd);
53327
53328 if (!IS_ERR(s))
53329 kfree(s);
53330 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
53331 new file mode 100644
53332 index 0000000..92247e4
53333 --- /dev/null
53334 +++ b/grsecurity/Kconfig
53335 @@ -0,0 +1,1021 @@
53336 +#
53337 +# grecurity configuration
53338 +#
53339 +menu "Memory Protections"
53340 +depends on GRKERNSEC
53341 +
53342 +config GRKERNSEC_KMEM
53343 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
53344 + default y if GRKERNSEC_CONFIG_AUTO
53345 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53346 + help
53347 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53348 + be written to or read from to modify or leak the contents of the running
53349 + kernel. /dev/port will also not be allowed to be opened and support
53350 + for /dev/cpu/*/msr will be removed. If you have module
53351 + support disabled, enabling this will close up five ways that are
53352 + currently used to insert malicious code into the running kernel.
53353 +
53354 + Even with all these features enabled, we still highly recommend that
53355 + you use the RBAC system, as it is still possible for an attacker to
53356 + modify the running kernel through privileged I/O granted by ioperm/iopl.
53357 +
53358 + If you are not using XFree86, you may be able to stop this additional
53359 + case by enabling the 'Disable privileged I/O' option. Though nothing
53360 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53361 + but only to video memory, which is the only writing we allow in this
53362 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53363 + not be allowed to mprotect it with PROT_WRITE later.
53364 + Enabling this feature will prevent the "cpupower" and "powertop" tools
53365 + from working.
53366 +
53367 + It is highly recommended that you say Y here if you meet all the
53368 + conditions above.
53369 +
53370 +config GRKERNSEC_VM86
53371 + bool "Restrict VM86 mode"
53372 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
53373 + depends on X86_32
53374 +
53375 + help
53376 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53377 + make use of a special execution mode on 32bit x86 processors called
53378 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53379 + video cards and will still work with this option enabled. The purpose
53380 + of the option is to prevent exploitation of emulation errors in
53381 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
53382 + Nearly all users should be able to enable this option.
53383 +
53384 +config GRKERNSEC_IO
53385 + bool "Disable privileged I/O"
53386 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
53387 + depends on X86
53388 + select RTC_CLASS
53389 + select RTC_INTF_DEV
53390 + select RTC_DRV_CMOS
53391 +
53392 + help
53393 + If you say Y here, all ioperm and iopl calls will return an error.
53394 + Ioperm and iopl can be used to modify the running kernel.
53395 + Unfortunately, some programs need this access to operate properly,
53396 + the most notable of which are XFree86 and hwclock. hwclock can be
53397 + remedied by having RTC support in the kernel, so real-time
53398 + clock support is enabled if this option is enabled, to ensure
53399 + that hwclock operates correctly. XFree86 still will not
53400 + operate correctly with this option enabled, so DO NOT CHOOSE Y
53401 + IF YOU USE XFree86. If you use XFree86 and you still want to
53402 + protect your kernel against modification, use the RBAC system.
53403 +
53404 +config GRKERNSEC_JIT_HARDEN
53405 + bool "Harden BPF JIT against spray attacks"
53406 + default y if GRKERNSEC_CONFIG_AUTO
53407 + depends on BPF_JIT
53408 + help
53409 + If you say Y here, the native code generated by the kernel's Berkeley
53410 + Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
53411 + attacks that attempt to fit attacker-beneficial instructions in
53412 + 32bit immediate fields of JIT-generated native instructions. The
53413 + attacker will generally aim to cause an unintended instruction sequence
53414 + of JIT-generated native code to execute by jumping into the middle of
53415 + a generated instruction. This feature effectively randomizes the 32bit
53416 + immediate constants present in the generated code to thwart such attacks.
53417 +
53418 + If you're using KERNEXEC, it's recommended that you enable this option
53419 + to supplement the hardening of the kernel.
53420 +
53421 +config GRKERNSEC_RAND_THREADSTACK
53422 + bool "Insert random gaps between thread stacks"
53423 + default y if GRKERNSEC_CONFIG_AUTO
53424 + depends on PAX_RANDMMAP && !PPC
53425 + help
53426 + If you say Y here, a random-sized gap will be enforced between allocated
53427 + thread stacks. Glibc's NPTL and other threading libraries that
53428 + pass MAP_STACK to the kernel for thread stack allocation are supported.
53429 + The implementation currently provides 8 bits of entropy for the gap.
53430 +
53431 + Many distributions do not compile threaded remote services with the
53432 + -fstack-check argument to GCC, causing the variable-sized stack-based
53433 + allocator, alloca(), to not probe the stack on allocation. This
53434 + permits an unbounded alloca() to skip over any guard page and potentially
53435 + modify another thread's stack reliably. An enforced random gap
53436 + reduces the reliability of such an attack and increases the chance
53437 + that such a read/write to another thread's stack instead lands in
53438 + an unmapped area, causing a crash and triggering grsecurity's
53439 + anti-bruteforcing logic.
53440 +
53441 +config GRKERNSEC_PROC_MEMMAP
53442 + bool "Harden ASLR against information leaks and entropy reduction"
53443 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
53444 + depends on PAX_NOEXEC || PAX_ASLR
53445 + help
53446 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53447 + give no information about the addresses of its mappings if
53448 + PaX features that rely on random addresses are enabled on the task.
53449 + In addition to sanitizing this information and disabling other
53450 + dangerous sources of information, this option causes reads of sensitive
53451 + /proc/<pid> entries where the file descriptor was opened in a different
53452 + task than the one performing the read. Such attempts are logged.
53453 + This option also limits argv/env strings for suid/sgid binaries
53454 + to 512KB to prevent a complete exhaustion of the stack entropy provided
53455 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
53456 + binaries to prevent alternative mmap layouts from being abused.
53457 +
53458 + If you use PaX it is essential that you say Y here as it closes up
53459 + several holes that make full ASLR useless locally.
53460 +
53461 +config GRKERNSEC_BRUTE
53462 + bool "Deter exploit bruteforcing"
53463 + default y if GRKERNSEC_CONFIG_AUTO
53464 + help
53465 + If you say Y here, attempts to bruteforce exploits against forking
53466 + daemons such as apache or sshd, as well as against suid/sgid binaries
53467 + will be deterred. When a child of a forking daemon is killed by PaX
53468 + or crashes due to an illegal instruction or other suspicious signal,
53469 + the parent process will be delayed 30 seconds upon every subsequent
53470 + fork until the administrator is able to assess the situation and
53471 + restart the daemon.
53472 + In the suid/sgid case, the attempt is logged, the user has all their
53473 + processes terminated, and they are prevented from executing any further
53474 + processes for 15 minutes.
53475 + It is recommended that you also enable signal logging in the auditing
53476 + section so that logs are generated when a process triggers a suspicious
53477 + signal.
53478 + If the sysctl option is enabled, a sysctl option with name
53479 + "deter_bruteforce" is created.
53480 +
53481 +
53482 +config GRKERNSEC_MODHARDEN
53483 + bool "Harden module auto-loading"
53484 + default y if GRKERNSEC_CONFIG_AUTO
53485 + depends on MODULES
53486 + help
53487 + If you say Y here, module auto-loading in response to use of some
53488 + feature implemented by an unloaded module will be restricted to
53489 + root users. Enabling this option helps defend against attacks
53490 + by unprivileged users who abuse the auto-loading behavior to
53491 + cause a vulnerable module to load that is then exploited.
53492 +
53493 + If this option prevents a legitimate use of auto-loading for a
53494 + non-root user, the administrator can execute modprobe manually
53495 + with the exact name of the module mentioned in the alert log.
53496 + Alternatively, the administrator can add the module to the list
53497 + of modules loaded at boot by modifying init scripts.
53498 +
53499 + Modification of init scripts will most likely be needed on
53500 + Ubuntu servers with encrypted home directory support enabled,
53501 + as the first non-root user logging in will cause the ecb(aes),
53502 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53503 +
53504 +config GRKERNSEC_HIDESYM
53505 + bool "Hide kernel symbols"
53506 + default y if GRKERNSEC_CONFIG_AUTO
53507 + select PAX_USERCOPY_SLABS
53508 + help
53509 + If you say Y here, getting information on loaded modules, and
53510 + displaying all kernel symbols through a syscall will be restricted
53511 + to users with CAP_SYS_MODULE. For software compatibility reasons,
53512 + /proc/kallsyms will be restricted to the root user. The RBAC
53513 + system can hide that entry even from root.
53514 +
53515 + This option also prevents leaking of kernel addresses through
53516 + several /proc entries.
53517 +
53518 + Note that this option is only effective provided the following
53519 + conditions are met:
53520 + 1) The kernel using grsecurity is not precompiled by some distribution
53521 + 2) You have also enabled GRKERNSEC_DMESG
53522 + 3) You are using the RBAC system and hiding other files such as your
53523 + kernel image and System.map. Alternatively, enabling this option
53524 + causes the permissions on /boot, /lib/modules, and the kernel
53525 + source directory to change at compile time to prevent
53526 + reading by non-root users.
53527 + If the above conditions are met, this option will aid in providing a
53528 + useful protection against local kernel exploitation of overflows
53529 + and arbitrary read/write vulnerabilities.
53530 +
53531 +config GRKERNSEC_KERN_LOCKOUT
53532 + bool "Active kernel exploit response"
53533 + default y if GRKERNSEC_CONFIG_AUTO
53534 + depends on X86 || ARM || PPC || SPARC
53535 + help
53536 + If you say Y here, when a PaX alert is triggered due to suspicious
53537 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53538 + or an OOPS occurs due to bad memory accesses, instead of just
53539 + terminating the offending process (and potentially allowing
53540 + a subsequent exploit from the same user), we will take one of two
53541 + actions:
53542 + If the user was root, we will panic the system
53543 + If the user was non-root, we will log the attempt, terminate
53544 + all processes owned by the user, then prevent them from creating
53545 + any new processes until the system is restarted
53546 + This deters repeated kernel exploitation/bruteforcing attempts
53547 + and is useful for later forensics.
53548 +
53549 +endmenu
53550 +menu "Role Based Access Control Options"
53551 +depends on GRKERNSEC
53552 +
53553 +config GRKERNSEC_RBAC_DEBUG
53554 + bool
53555 +
53556 +config GRKERNSEC_NO_RBAC
53557 + bool "Disable RBAC system"
53558 + help
53559 + If you say Y here, the /dev/grsec device will be removed from the kernel,
53560 + preventing the RBAC system from being enabled. You should only say Y
53561 + here if you have no intention of using the RBAC system, so as to prevent
53562 + an attacker with root access from misusing the RBAC system to hide files
53563 + and processes when loadable module support and /dev/[k]mem have been
53564 + locked down.
53565 +
53566 +config GRKERNSEC_ACL_HIDEKERN
53567 + bool "Hide kernel processes"
53568 + help
53569 + If you say Y here, all kernel threads will be hidden to all
53570 + processes but those whose subject has the "view hidden processes"
53571 + flag.
53572 +
53573 +config GRKERNSEC_ACL_MAXTRIES
53574 + int "Maximum tries before password lockout"
53575 + default 3
53576 + help
53577 + This option enforces the maximum number of times a user can attempt
53578 + to authorize themselves with the grsecurity RBAC system before being
53579 + denied the ability to attempt authorization again for a specified time.
53580 + The lower the number, the harder it will be to brute-force a password.
53581 +
53582 +config GRKERNSEC_ACL_TIMEOUT
53583 + int "Time to wait after max password tries, in seconds"
53584 + default 30
53585 + help
53586 + This option specifies the time the user must wait after attempting to
53587 + authorize to the RBAC system with the maximum number of invalid
53588 + passwords. The higher the number, the harder it will be to brute-force
53589 + a password.
53590 +
53591 +endmenu
53592 +menu "Filesystem Protections"
53593 +depends on GRKERNSEC
53594 +
53595 +config GRKERNSEC_PROC
53596 + bool "Proc restrictions"
53597 + default y if GRKERNSEC_CONFIG_AUTO
53598 + help
53599 + If you say Y here, the permissions of the /proc filesystem
53600 + will be altered to enhance system security and privacy. You MUST
53601 + choose either a user only restriction or a user and group restriction.
53602 + Depending upon the option you choose, you can either restrict users to
53603 + see only the processes they themselves run, or choose a group that can
53604 + view all processes and files normally restricted to root if you choose
53605 + the "restrict to user only" option. NOTE: If you're running identd or
53606 + ntpd as a non-root user, you will have to run it as the group you
53607 + specify here.
53608 +
53609 +config GRKERNSEC_PROC_USER
53610 + bool "Restrict /proc to user only"
53611 + depends on GRKERNSEC_PROC
53612 + help
53613 + If you say Y here, non-root users will only be able to view their own
53614 + processes, and restricts them from viewing network-related information,
53615 + and viewing kernel symbol and module information.
53616 +
53617 +config GRKERNSEC_PROC_USERGROUP
53618 + bool "Allow special group"
53619 + default y if GRKERNSEC_CONFIG_AUTO
53620 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53621 + help
53622 + If you say Y here, you will be able to select a group that will be
53623 + able to view all processes and network-related information. If you've
53624 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53625 + remain hidden. This option is useful if you want to run identd as
53626 + a non-root user. The group you select may also be chosen at boot time
53627 + via "grsec_proc_gid=" on the kernel commandline.
53628 +
53629 +config GRKERNSEC_PROC_GID
53630 + int "GID for special group"
53631 + depends on GRKERNSEC_PROC_USERGROUP
53632 + default 1001
53633 +
53634 +config GRKERNSEC_PROC_ADD
53635 + bool "Additional restrictions"
53636 + default y if GRKERNSEC_CONFIG_AUTO
53637 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53638 + help
53639 + If you say Y here, additional restrictions will be placed on
53640 + /proc that keep normal users from viewing device information and
53641 + slabinfo information that could be useful for exploits.
53642 +
53643 +config GRKERNSEC_LINK
53644 + bool "Linking restrictions"
53645 + default y if GRKERNSEC_CONFIG_AUTO
53646 + help
53647 + If you say Y here, /tmp race exploits will be prevented, since users
53648 + will no longer be able to follow symlinks owned by other users in
53649 + world-writable +t directories (e.g. /tmp), unless the owner of the
53650 + symlink is the owner of the directory. users will also not be
53651 + able to hardlink to files they do not own. If the sysctl option is
53652 + enabled, a sysctl option with name "linking_restrictions" is created.
53653 +
53654 +config GRKERNSEC_SYMLINKOWN
53655 + bool "Kernel-enforced SymlinksIfOwnerMatch"
53656 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
53657 + help
53658 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
53659 + that prevents it from being used as a security feature. As Apache
53660 + verifies the symlink by performing a stat() against the target of
53661 + the symlink before it is followed, an attacker can setup a symlink
53662 + to point to a same-owned file, then replace the symlink with one
53663 + that targets another user's file just after Apache "validates" the
53664 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
53665 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
53666 + will be in place for the group you specify. If the sysctl option
53667 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
53668 + created.
53669 +
53670 +config GRKERNSEC_SYMLINKOWN_GID
53671 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
53672 + depends on GRKERNSEC_SYMLINKOWN
53673 + default 1006
53674 + help
53675 + Setting this GID determines what group kernel-enforced
53676 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
53677 + is enabled, a sysctl option with name "symlinkown_gid" is created.
53678 +
53679 +config GRKERNSEC_FIFO
53680 + bool "FIFO restrictions"
53681 + default y if GRKERNSEC_CONFIG_AUTO
53682 + help
53683 + If you say Y here, users will not be able to write to FIFOs they don't
53684 + own in world-writable +t directories (e.g. /tmp), unless the owner of
53685 + the FIFO is the same owner of the directory it's held in. If the sysctl
53686 + option is enabled, a sysctl option with name "fifo_restrictions" is
53687 + created.
53688 +
53689 +config GRKERNSEC_SYSFS_RESTRICT
53690 + bool "Sysfs/debugfs restriction"
53691 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
53692 + depends on SYSFS
53693 + help
53694 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53695 + any filesystem normally mounted under it (e.g. debugfs) will be
53696 + mostly accessible only by root. These filesystems generally provide access
53697 + to hardware and debug information that isn't appropriate for unprivileged
53698 + users of the system. Sysfs and debugfs have also become a large source
53699 + of new vulnerabilities, ranging from infoleaks to local compromise.
53700 + There has been very little oversight with an eye toward security involved
53701 + in adding new exporters of information to these filesystems, so their
53702 + use is discouraged.
53703 + For reasons of compatibility, a few directories have been whitelisted
53704 + for access by non-root users:
53705 + /sys/fs/selinux
53706 + /sys/fs/fuse
53707 + /sys/devices/system/cpu
53708 +
53709 +config GRKERNSEC_ROFS
53710 + bool "Runtime read-only mount protection"
53711 + help
53712 + If you say Y here, a sysctl option with name "romount_protect" will
53713 + be created. By setting this option to 1 at runtime, filesystems
53714 + will be protected in the following ways:
53715 + * No new writable mounts will be allowed
53716 + * Existing read-only mounts won't be able to be remounted read/write
53717 + * Write operations will be denied on all block devices
53718 + This option acts independently of grsec_lock: once it is set to 1,
53719 + it cannot be turned off. Therefore, please be mindful of the resulting
53720 + behavior if this option is enabled in an init script on a read-only
53721 + filesystem. This feature is mainly intended for secure embedded systems.
53722 +
53723 +config GRKERNSEC_DEVICE_SIDECHANNEL
53724 + bool "Eliminate stat/notify-based device sidechannels"
53725 + default y if GRKERNSEC_CONFIG_AUTO
53726 + help
53727 + If you say Y here, timing analyses on block or character
53728 + devices like /dev/ptmx using stat or inotify/dnotify/fanotify
53729 + will be thwarted for unprivileged users. If a process without
53730 + CAP_MKNOD stats such a device, the last access and last modify times
53731 + will match the device's create time. No access or modify events
53732 + will be triggered through inotify/dnotify/fanotify for such devices.
53733 + This feature will prevent attacks that may at a minimum
53734 + allow an attacker to determine the administrator's password length.
53735 +
53736 +config GRKERNSEC_CHROOT
53737 + bool "Chroot jail restrictions"
53738 + default y if GRKERNSEC_CONFIG_AUTO
53739 + help
53740 + If you say Y here, you will be able to choose several options that will
53741 + make breaking out of a chrooted jail much more difficult. If you
53742 + encounter no software incompatibilities with the following options, it
53743 + is recommended that you enable each one.
53744 +
53745 +config GRKERNSEC_CHROOT_MOUNT
53746 + bool "Deny mounts"
53747 + default y if GRKERNSEC_CONFIG_AUTO
53748 + depends on GRKERNSEC_CHROOT
53749 + help
53750 + If you say Y here, processes inside a chroot will not be able to
53751 + mount or remount filesystems. If the sysctl option is enabled, a
53752 + sysctl option with name "chroot_deny_mount" is created.
53753 +
53754 +config GRKERNSEC_CHROOT_DOUBLE
53755 + bool "Deny double-chroots"
53756 + default y if GRKERNSEC_CONFIG_AUTO
53757 + depends on GRKERNSEC_CHROOT
53758 + help
53759 + If you say Y here, processes inside a chroot will not be able to chroot
53760 + again outside the chroot. This is a widely used method of breaking
53761 + out of a chroot jail and should not be allowed. If the sysctl
53762 + option is enabled, a sysctl option with name
53763 + "chroot_deny_chroot" is created.
53764 +
53765 +config GRKERNSEC_CHROOT_PIVOT
53766 + bool "Deny pivot_root in chroot"
53767 + default y if GRKERNSEC_CONFIG_AUTO
53768 + depends on GRKERNSEC_CHROOT
53769 + help
53770 + If you say Y here, processes inside a chroot will not be able to use
53771 + a function called pivot_root() that was introduced in Linux 2.3.41. It
53772 + works similar to chroot in that it changes the root filesystem. This
53773 + function could be misused in a chrooted process to attempt to break out
53774 + of the chroot, and therefore should not be allowed. If the sysctl
53775 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
53776 + created.
53777 +
53778 +config GRKERNSEC_CHROOT_CHDIR
53779 + bool "Enforce chdir(\"/\") on all chroots"
53780 + default y if GRKERNSEC_CONFIG_AUTO
53781 + depends on GRKERNSEC_CHROOT
53782 + help
53783 + If you say Y here, the current working directory of all newly-chrooted
53784 + applications will be set to the the root directory of the chroot.
53785 + The man page on chroot(2) states:
53786 + Note that this call does not change the current working
53787 + directory, so that `.' can be outside the tree rooted at
53788 + `/'. In particular, the super-user can escape from a
53789 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53790 +
53791 + It is recommended that you say Y here, since it's not known to break
53792 + any software. If the sysctl option is enabled, a sysctl option with
53793 + name "chroot_enforce_chdir" is created.
53794 +
53795 +config GRKERNSEC_CHROOT_CHMOD
53796 + bool "Deny (f)chmod +s"
53797 + default y if GRKERNSEC_CONFIG_AUTO
53798 + depends on GRKERNSEC_CHROOT
53799 + help
53800 + If you say Y here, processes inside a chroot will not be able to chmod
53801 + or fchmod files to make them have suid or sgid bits. This protects
53802 + against another published method of breaking a chroot. If the sysctl
53803 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53804 + created.
53805 +
53806 +config GRKERNSEC_CHROOT_FCHDIR
53807 + bool "Deny fchdir out of chroot"
53808 + default y if GRKERNSEC_CONFIG_AUTO
53809 + depends on GRKERNSEC_CHROOT
53810 + help
53811 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53812 + to a file descriptor of the chrooting process that points to a directory
53813 + outside the filesystem will be stopped. If the sysctl option
53814 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53815 +
53816 +config GRKERNSEC_CHROOT_MKNOD
53817 + bool "Deny mknod"
53818 + default y if GRKERNSEC_CONFIG_AUTO
53819 + depends on GRKERNSEC_CHROOT
53820 + help
53821 + If you say Y here, processes inside a chroot will not be allowed to
53822 + mknod. The problem with using mknod inside a chroot is that it
53823 + would allow an attacker to create a device entry that is the same
53824 + as one on the physical root of your system, which could range from
53825 + anything from the console device to a device for your harddrive (which
53826 + they could then use to wipe the drive or steal data). It is recommended
53827 + that you say Y here, unless you run into software incompatibilities.
53828 + If the sysctl option is enabled, a sysctl option with name
53829 + "chroot_deny_mknod" is created.
53830 +
53831 +config GRKERNSEC_CHROOT_SHMAT
53832 + bool "Deny shmat() out of chroot"
53833 + default y if GRKERNSEC_CONFIG_AUTO
53834 + depends on GRKERNSEC_CHROOT
53835 + help
53836 + If you say Y here, processes inside a chroot will not be able to attach
53837 + to shared memory segments that were created outside of the chroot jail.
53838 + It is recommended that you say Y here. If the sysctl option is enabled,
53839 + a sysctl option with name "chroot_deny_shmat" is created.
53840 +
53841 +config GRKERNSEC_CHROOT_UNIX
53842 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53843 + default y if GRKERNSEC_CONFIG_AUTO
53844 + depends on GRKERNSEC_CHROOT
53845 + help
53846 + If you say Y here, processes inside a chroot will not be able to
53847 + connect to abstract (meaning not belonging to a filesystem) Unix
53848 + domain sockets that were bound outside of a chroot. It is recommended
53849 + that you say Y here. If the sysctl option is enabled, a sysctl option
53850 + with name "chroot_deny_unix" is created.
53851 +
53852 +config GRKERNSEC_CHROOT_FINDTASK
53853 + bool "Protect outside processes"
53854 + default y if GRKERNSEC_CONFIG_AUTO
53855 + depends on GRKERNSEC_CHROOT
53856 + help
53857 + If you say Y here, processes inside a chroot will not be able to
53858 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53859 + getsid, or view any process outside of the chroot. If the sysctl
53860 + option is enabled, a sysctl option with name "chroot_findtask" is
53861 + created.
53862 +
53863 +config GRKERNSEC_CHROOT_NICE
53864 + bool "Restrict priority changes"
53865 + default y if GRKERNSEC_CONFIG_AUTO
53866 + depends on GRKERNSEC_CHROOT
53867 + help
53868 + If you say Y here, processes inside a chroot will not be able to raise
53869 + the priority of processes in the chroot, or alter the priority of
53870 + processes outside the chroot. This provides more security than simply
53871 + removing CAP_SYS_NICE from the process' capability set. If the
53872 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53873 + is created.
53874 +
53875 +config GRKERNSEC_CHROOT_SYSCTL
53876 + bool "Deny sysctl writes"
53877 + default y if GRKERNSEC_CONFIG_AUTO
53878 + depends on GRKERNSEC_CHROOT
53879 + help
53880 + If you say Y here, an attacker in a chroot will not be able to
53881 + write to sysctl entries, either by sysctl(2) or through a /proc
53882 + interface. It is strongly recommended that you say Y here. If the
53883 + sysctl option is enabled, a sysctl option with name
53884 + "chroot_deny_sysctl" is created.
53885 +
53886 +config GRKERNSEC_CHROOT_CAPS
53887 + bool "Capability restrictions"
53888 + default y if GRKERNSEC_CONFIG_AUTO
53889 + depends on GRKERNSEC_CHROOT
53890 + help
53891 + If you say Y here, the capabilities on all processes within a
53892 + chroot jail will be lowered to stop module insertion, raw i/o,
53893 + system and net admin tasks, rebooting the system, modifying immutable
53894 + files, modifying IPC owned by another, and changing the system time.
53895 + This is left an option because it can break some apps. Disable this
53896 + if your chrooted apps are having problems performing those kinds of
53897 + tasks. If the sysctl option is enabled, a sysctl option with
53898 + name "chroot_caps" is created.
53899 +
53900 +endmenu
53901 +menu "Kernel Auditing"
53902 +depends on GRKERNSEC
53903 +
53904 +config GRKERNSEC_AUDIT_GROUP
53905 + bool "Single group for auditing"
53906 + help
53907 + If you say Y here, the exec and chdir logging features will only operate
53908 + on a group you specify. This option is recommended if you only want to
53909 + watch certain users instead of having a large amount of logs from the
53910 + entire system. If the sysctl option is enabled, a sysctl option with
53911 + name "audit_group" is created.
53912 +
53913 +config GRKERNSEC_AUDIT_GID
53914 + int "GID for auditing"
53915 + depends on GRKERNSEC_AUDIT_GROUP
53916 + default 1007
53917 +
53918 +config GRKERNSEC_EXECLOG
53919 + bool "Exec logging"
53920 + help
53921 + If you say Y here, all execve() calls will be logged (since the
53922 + other exec*() calls are frontends to execve(), all execution
53923 + will be logged). Useful for shell-servers that like to keep track
53924 + of their users. If the sysctl option is enabled, a sysctl option with
53925 + name "exec_logging" is created.
53926 + WARNING: This option when enabled will produce a LOT of logs, especially
53927 + on an active system.
53928 +
53929 +config GRKERNSEC_RESLOG
53930 + bool "Resource logging"
53931 + default y if GRKERNSEC_CONFIG_AUTO
53932 + help
53933 + If you say Y here, all attempts to overstep resource limits will
53934 + be logged with the resource name, the requested size, and the current
53935 + limit. It is highly recommended that you say Y here. If the sysctl
53936 + option is enabled, a sysctl option with name "resource_logging" is
53937 + created. If the RBAC system is enabled, the sysctl value is ignored.
53938 +
53939 +config GRKERNSEC_CHROOT_EXECLOG
53940 + bool "Log execs within chroot"
53941 + help
53942 + If you say Y here, all executions inside a chroot jail will be logged
53943 + to syslog. This can cause a large amount of logs if certain
53944 + applications (eg. djb's daemontools) are installed on the system, and
53945 + is therefore left as an option. If the sysctl option is enabled, a
53946 + sysctl option with name "chroot_execlog" is created.
53947 +
53948 +config GRKERNSEC_AUDIT_PTRACE
53949 + bool "Ptrace logging"
53950 + help
53951 + If you say Y here, all attempts to attach to a process via ptrace
53952 + will be logged. If the sysctl option is enabled, a sysctl option
53953 + with name "audit_ptrace" is created.
53954 +
53955 +config GRKERNSEC_AUDIT_CHDIR
53956 + bool "Chdir logging"
53957 + help
53958 + If you say Y here, all chdir() calls will be logged. If the sysctl
53959 + option is enabled, a sysctl option with name "audit_chdir" is created.
53960 +
53961 +config GRKERNSEC_AUDIT_MOUNT
53962 + bool "(Un)Mount logging"
53963 + help
53964 + If you say Y here, all mounts and unmounts will be logged. If the
53965 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53966 + created.
53967 +
53968 +config GRKERNSEC_SIGNAL
53969 + bool "Signal logging"
53970 + default y if GRKERNSEC_CONFIG_AUTO
53971 + help
53972 + If you say Y here, certain important signals will be logged, such as
53973 + SIGSEGV, which will as a result inform you of when a error in a program
53974 + occurred, which in some cases could mean a possible exploit attempt.
53975 + If the sysctl option is enabled, a sysctl option with name
53976 + "signal_logging" is created.
53977 +
53978 +config GRKERNSEC_FORKFAIL
53979 + bool "Fork failure logging"
53980 + help
53981 + If you say Y here, all failed fork() attempts will be logged.
53982 + This could suggest a fork bomb, or someone attempting to overstep
53983 + their process limit. If the sysctl option is enabled, a sysctl option
53984 + with name "forkfail_logging" is created.
53985 +
53986 +config GRKERNSEC_TIME
53987 + bool "Time change logging"
53988 + default y if GRKERNSEC_CONFIG_AUTO
53989 + help
53990 + If you say Y here, any changes of the system clock will be logged.
53991 + If the sysctl option is enabled, a sysctl option with name
53992 + "timechange_logging" is created.
53993 +
53994 +config GRKERNSEC_PROC_IPADDR
53995 + bool "/proc/<pid>/ipaddr support"
53996 + default y if GRKERNSEC_CONFIG_AUTO
53997 + help
53998 + If you say Y here, a new entry will be added to each /proc/<pid>
53999 + directory that contains the IP address of the person using the task.
54000 + The IP is carried across local TCP and AF_UNIX stream sockets.
54001 + This information can be useful for IDS/IPSes to perform remote response
54002 + to a local attack. The entry is readable by only the owner of the
54003 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
54004 + the RBAC system), and thus does not create privacy concerns.
54005 +
54006 +config GRKERNSEC_RWXMAP_LOG
54007 + bool 'Denied RWX mmap/mprotect logging'
54008 + default y if GRKERNSEC_CONFIG_AUTO
54009 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
54010 + help
54011 + If you say Y here, calls to mmap() and mprotect() with explicit
54012 + usage of PROT_WRITE and PROT_EXEC together will be logged when
54013 + denied by the PAX_MPROTECT feature. If the sysctl option is
54014 + enabled, a sysctl option with name "rwxmap_logging" is created.
54015 +
54016 +config GRKERNSEC_AUDIT_TEXTREL
54017 + bool 'ELF text relocations logging (READ HELP)'
54018 + depends on PAX_MPROTECT
54019 + help
54020 + If you say Y here, text relocations will be logged with the filename
54021 + of the offending library or binary. The purpose of the feature is
54022 + to help Linux distribution developers get rid of libraries and
54023 + binaries that need text relocations which hinder the future progress
54024 + of PaX. Only Linux distribution developers should say Y here, and
54025 + never on a production machine, as this option creates an information
54026 + leak that could aid an attacker in defeating the randomization of
54027 + a single memory region. If the sysctl option is enabled, a sysctl
54028 + option with name "audit_textrel" is created.
54029 +
54030 +endmenu
54031 +
54032 +menu "Executable Protections"
54033 +depends on GRKERNSEC
54034 +
54035 +config GRKERNSEC_DMESG
54036 + bool "Dmesg(8) restriction"
54037 + default y if GRKERNSEC_CONFIG_AUTO
54038 + help
54039 + If you say Y here, non-root users will not be able to use dmesg(8)
54040 + to view the contents of the kernel's circular log buffer.
54041 + The kernel's log buffer often contains kernel addresses and other
54042 + identifying information useful to an attacker in fingerprinting a
54043 + system for a targeted exploit.
54044 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
54045 + created.
54046 +
54047 +config GRKERNSEC_HARDEN_PTRACE
54048 + bool "Deter ptrace-based process snooping"
54049 + default y if GRKERNSEC_CONFIG_AUTO
54050 + help
54051 + If you say Y here, TTY sniffers and other malicious monitoring
54052 + programs implemented through ptrace will be defeated. If you
54053 + have been using the RBAC system, this option has already been
54054 + enabled for several years for all users, with the ability to make
54055 + fine-grained exceptions.
54056 +
54057 + This option only affects the ability of non-root users to ptrace
54058 + processes that are not a descendent of the ptracing process.
54059 + This means that strace ./binary and gdb ./binary will still work,
54060 + but attaching to arbitrary processes will not. If the sysctl
54061 + option is enabled, a sysctl option with name "harden_ptrace" is
54062 + created.
54063 +
54064 +config GRKERNSEC_PTRACE_READEXEC
54065 + bool "Require read access to ptrace sensitive binaries"
54066 + default y if GRKERNSEC_CONFIG_AUTO
54067 + help
54068 + If you say Y here, unprivileged users will not be able to ptrace unreadable
54069 + binaries. This option is useful in environments that
54070 + remove the read bits (e.g. file mode 4711) from suid binaries to
54071 + prevent infoleaking of their contents. This option adds
54072 + consistency to the use of that file mode, as the binary could normally
54073 + be read out when run without privileges while ptracing.
54074 +
54075 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
54076 + is created.
54077 +
54078 +config GRKERNSEC_SETXID
54079 + bool "Enforce consistent multithreaded privileges"
54080 + default y if GRKERNSEC_CONFIG_AUTO
54081 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
54082 + help
54083 + If you say Y here, a change from a root uid to a non-root uid
54084 + in a multithreaded application will cause the resulting uids,
54085 + gids, supplementary groups, and capabilities in that thread
54086 + to be propagated to the other threads of the process. In most
54087 + cases this is unnecessary, as glibc will emulate this behavior
54088 + on behalf of the application. Other libcs do not act in the
54089 + same way, allowing the other threads of the process to continue
54090 + running with root privileges. If the sysctl option is enabled,
54091 + a sysctl option with name "consistent_setxid" is created.
54092 +
54093 +config GRKERNSEC_TPE
54094 + bool "Trusted Path Execution (TPE)"
54095 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
54096 + help
54097 + If you say Y here, you will be able to choose a gid to add to the
54098 + supplementary groups of users you want to mark as "untrusted."
54099 + These users will not be able to execute any files that are not in
54100 + root-owned directories writable only by root. If the sysctl option
54101 + is enabled, a sysctl option with name "tpe" is created.
54102 +
54103 +config GRKERNSEC_TPE_ALL
54104 + bool "Partially restrict all non-root users"
54105 + depends on GRKERNSEC_TPE
54106 + help
54107 + If you say Y here, all non-root users will be covered under
54108 + a weaker TPE restriction. This is separate from, and in addition to,
54109 + the main TPE options that you have selected elsewhere. Thus, if a
54110 + "trusted" GID is chosen, this restriction applies to even that GID.
54111 + Under this restriction, all non-root users will only be allowed to
54112 + execute files in directories they own that are not group or
54113 + world-writable, or in directories owned by root and writable only by
54114 + root. If the sysctl option is enabled, a sysctl option with name
54115 + "tpe_restrict_all" is created.
54116 +
54117 +config GRKERNSEC_TPE_INVERT
54118 + bool "Invert GID option"
54119 + depends on GRKERNSEC_TPE
54120 + help
54121 + If you say Y here, the group you specify in the TPE configuration will
54122 + decide what group TPE restrictions will be *disabled* for. This
54123 + option is useful if you want TPE restrictions to be applied to most
54124 + users on the system. If the sysctl option is enabled, a sysctl option
54125 + with name "tpe_invert" is created. Unlike other sysctl options, this
54126 + entry will default to on for backward-compatibility.
54127 +
54128 +config GRKERNSEC_TPE_GID
54129 + int
54130 + default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
54131 + default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
54132 +
54133 +config GRKERNSEC_TPE_UNTRUSTED_GID
54134 + int "GID for TPE-untrusted users"
54135 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
54136 + default 1005
54137 + help
54138 + Setting this GID determines what group TPE restrictions will be
54139 + *enabled* for. If the sysctl option is enabled, a sysctl option
54140 + with name "tpe_gid" is created.
54141 +
54142 +config GRKERNSEC_TPE_TRUSTED_GID
54143 + int "GID for TPE-trusted users"
54144 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54145 + default 1005
54146 + help
54147 + Setting this GID determines what group TPE restrictions will be
54148 + *disabled* for. If the sysctl option is enabled, a sysctl option
54149 + with name "tpe_gid" is created.
54150 +
54151 +endmenu
54152 +menu "Network Protections"
54153 +depends on GRKERNSEC
54154 +
54155 +config GRKERNSEC_RANDNET
54156 + bool "Larger entropy pools"
54157 + default y if GRKERNSEC_CONFIG_AUTO
54158 + help
54159 + If you say Y here, the entropy pools used for many features of Linux
54160 + and grsecurity will be doubled in size. Since several grsecurity
54161 + features use additional randomness, it is recommended that you say Y
54162 + here. Saying Y here has a similar effect as modifying
54163 + /proc/sys/kernel/random/poolsize.
54164 +
54165 +config GRKERNSEC_BLACKHOLE
54166 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54167 + default y if GRKERNSEC_CONFIG_AUTO
54168 + depends on NET
54169 + help
54170 + If you say Y here, neither TCP resets nor ICMP
54171 + destination-unreachable packets will be sent in response to packets
54172 + sent to ports for which no associated listening process exists.
54173 + This feature supports both IPV4 and IPV6 and exempts the
54174 + loopback interface from blackholing. Enabling this feature
54175 + makes a host more resilient to DoS attacks and reduces network
54176 + visibility against scanners.
54177 +
54178 + The blackhole feature as-implemented is equivalent to the FreeBSD
54179 + blackhole feature, as it prevents RST responses to all packets, not
54180 + just SYNs. Under most application behavior this causes no
54181 + problems, but applications (like haproxy) may not close certain
54182 + connections in a way that cleanly terminates them on the remote
54183 + end, leaving the remote host in LAST_ACK state. Because of this
54184 + side-effect and to prevent intentional LAST_ACK DoSes, this
54185 + feature also adds automatic mitigation against such attacks.
54186 + The mitigation drastically reduces the amount of time a socket
54187 + can spend in LAST_ACK state. If you're using haproxy and not
54188 + all servers it connects to have this option enabled, consider
54189 + disabling this feature on the haproxy host.
54190 +
54191 + If the sysctl option is enabled, two sysctl options with names
54192 + "ip_blackhole" and "lastack_retries" will be created.
54193 + While "ip_blackhole" takes the standard zero/non-zero on/off
54194 + toggle, "lastack_retries" uses the same kinds of values as
54195 + "tcp_retries1" and "tcp_retries2". The default value of 4
54196 + prevents a socket from lasting more than 45 seconds in LAST_ACK
54197 + state.
54198 +
54199 +config GRKERNSEC_NO_SIMULT_CONNECT
54200 + bool "Disable TCP Simultaneous Connect"
54201 + default y if GRKERNSEC_CONFIG_AUTO
54202 + depends on NET
54203 + help
54204 + If you say Y here, a feature by Willy Tarreau will be enabled that
54205 + removes a weakness in Linux's strict implementation of TCP that
54206 + allows two clients to connect to each other without either entering
54207 + a listening state. The weakness allows an attacker to easily prevent
54208 + a client from connecting to a known server provided the source port
54209 + for the connection is guessed correctly.
54210 +
54211 + As the weakness could be used to prevent an antivirus or IPS from
54212 + fetching updates, or prevent an SSL gateway from fetching a CRL,
54213 + it should be eliminated by enabling this option. Though Linux is
54214 + one of few operating systems supporting simultaneous connect, it
54215 + has no legitimate use in practice and is rarely supported by firewalls.
54216 +
54217 +config GRKERNSEC_SOCKET
54218 + bool "Socket restrictions"
54219 + depends on NET
54220 + help
54221 + If you say Y here, you will be able to choose from several options.
54222 + If you assign a GID on your system and add it to the supplementary
54223 + groups of users you want to restrict socket access to, this patch
54224 + will perform up to three things, based on the option(s) you choose.
54225 +
54226 +config GRKERNSEC_SOCKET_ALL
54227 + bool "Deny any sockets to group"
54228 + depends on GRKERNSEC_SOCKET
54229 + help
54230 + If you say Y here, you will be able to choose a GID of whose users will
54231 + be unable to connect to other hosts from your machine or run server
54232 + applications from your machine. If the sysctl option is enabled, a
54233 + sysctl option with name "socket_all" is created.
54234 +
54235 +config GRKERNSEC_SOCKET_ALL_GID
54236 + int "GID to deny all sockets for"
54237 + depends on GRKERNSEC_SOCKET_ALL
54238 + default 1004
54239 + help
54240 + Here you can choose the GID to disable socket access for. Remember to
54241 + add the users you want socket access disabled for to the GID
54242 + specified here. If the sysctl option is enabled, a sysctl option
54243 + with name "socket_all_gid" is created.
54244 +
54245 +config GRKERNSEC_SOCKET_CLIENT
54246 + bool "Deny client sockets to group"
54247 + depends on GRKERNSEC_SOCKET
54248 + help
54249 + If you say Y here, you will be able to choose a GID of whose users will
54250 + be unable to connect to other hosts from your machine, but will be
54251 + able to run servers. If this option is enabled, all users in the group
54252 + you specify will have to use passive mode when initiating ftp transfers
54253 + from the shell on your machine. If the sysctl option is enabled, a
54254 + sysctl option with name "socket_client" is created.
54255 +
54256 +config GRKERNSEC_SOCKET_CLIENT_GID
54257 + int "GID to deny client sockets for"
54258 + depends on GRKERNSEC_SOCKET_CLIENT
54259 + default 1003
54260 + help
54261 + Here you can choose the GID to disable client socket access for.
54262 + Remember to add the users you want client socket access disabled for to
54263 + the GID specified here. If the sysctl option is enabled, a sysctl
54264 + option with name "socket_client_gid" is created.
54265 +
54266 +config GRKERNSEC_SOCKET_SERVER
54267 + bool "Deny server sockets to group"
54268 + depends on GRKERNSEC_SOCKET
54269 + help
54270 + If you say Y here, you will be able to choose a GID of whose users will
54271 + be unable to run server applications from your machine. If the sysctl
54272 + option is enabled, a sysctl option with name "socket_server" is created.
54273 +
54274 +config GRKERNSEC_SOCKET_SERVER_GID
54275 + int "GID to deny server sockets for"
54276 + depends on GRKERNSEC_SOCKET_SERVER
54277 + default 1002
54278 + help
54279 + Here you can choose the GID to disable server socket access for.
54280 + Remember to add the users you want server socket access disabled for to
54281 + the GID specified here. If the sysctl option is enabled, a sysctl
54282 + option with name "socket_server_gid" is created.
54283 +
54284 +endmenu
54285 +menu "Sysctl Support"
54286 +depends on GRKERNSEC && SYSCTL
54287 +
54288 +config GRKERNSEC_SYSCTL
54289 + bool "Sysctl support"
54290 + default y if GRKERNSEC_CONFIG_AUTO
54291 + help
54292 + If you say Y here, you will be able to change the options that
54293 + grsecurity runs with at bootup, without having to recompile your
54294 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54295 + to enable (1) or disable (0) various features. All the sysctl entries
54296 + are mutable until the "grsec_lock" entry is set to a non-zero value.
54297 + All features enabled in the kernel configuration are disabled at boot
54298 + if you do not say Y to the "Turn on features by default" option.
54299 + All options should be set at startup, and the grsec_lock entry should
54300 + be set to a non-zero value after all the options are set.
54301 + *THIS IS EXTREMELY IMPORTANT*
54302 +
54303 +config GRKERNSEC_SYSCTL_DISTRO
54304 + bool "Extra sysctl support for distro makers (READ HELP)"
54305 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54306 + help
54307 + If you say Y here, additional sysctl options will be created
54308 + for features that affect processes running as root. Therefore,
54309 + it is critical when using this option that the grsec_lock entry be
54310 + enabled after boot. Only distros with prebuilt kernel packages
54311 + with this option enabled that can ensure grsec_lock is enabled
54312 + after boot should use this option.
54313 + *Failure to set grsec_lock after boot makes all grsec features
54314 + this option covers useless*
54315 +
54316 + Currently this option creates the following sysctl entries:
54317 + "Disable Privileged I/O": "disable_priv_io"
54318 +
54319 +config GRKERNSEC_SYSCTL_ON
54320 + bool "Turn on features by default"
54321 + default y if GRKERNSEC_CONFIG_AUTO
54322 + depends on GRKERNSEC_SYSCTL
54323 + help
54324 + If you say Y here, instead of having all features enabled in the
54325 + kernel configuration disabled at boot time, the features will be
54326 + enabled at boot time. It is recommended you say Y here unless
54327 + there is some reason you would want all sysctl-tunable features to
54328 + be disabled by default. As mentioned elsewhere, it is important
54329 + to enable the grsec_lock entry once you have finished modifying
54330 + the sysctl entries.
54331 +
54332 +endmenu
54333 +menu "Logging Options"
54334 +depends on GRKERNSEC
54335 +
54336 +config GRKERNSEC_FLOODTIME
54337 + int "Seconds in between log messages (minimum)"
54338 + default 10
54339 + help
54340 + This option allows you to enforce the number of seconds between
54341 + grsecurity log messages. The default should be suitable for most
54342 + people, however, if you choose to change it, choose a value small enough
54343 + to allow informative logs to be produced, but large enough to
54344 + prevent flooding.
54345 +
54346 +config GRKERNSEC_FLOODBURST
54347 + int "Number of messages in a burst (maximum)"
54348 + default 6
54349 + help
54350 + This option allows you to choose the maximum number of messages allowed
54351 + within the flood time interval you chose in a separate option. The
54352 + default should be suitable for most people, however if you find that
54353 + many of your logs are being interpreted as flooding, you may want to
54354 + raise this value.
54355 +
54356 +endmenu
54357 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
54358 new file mode 100644
54359 index 0000000..1b9afa9
54360 --- /dev/null
54361 +++ b/grsecurity/Makefile
54362 @@ -0,0 +1,38 @@
54363 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54364 +# during 2001-2009 it has been completely redesigned by Brad Spengler
54365 +# into an RBAC system
54366 +#
54367 +# All code in this directory and various hooks inserted throughout the kernel
54368 +# are copyright Brad Spengler - Open Source Security, Inc., and released
54369 +# under the GPL v2 or higher
54370 +
54371 +KBUILD_CFLAGS += -Werror
54372 +
54373 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54374 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
54375 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54376 +
54377 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54378 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54379 + gracl_learn.o grsec_log.o
54380 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54381 +
54382 +ifdef CONFIG_NET
54383 +obj-y += grsec_sock.o
54384 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54385 +endif
54386 +
54387 +ifndef CONFIG_GRKERNSEC
54388 +obj-y += grsec_disabled.o
54389 +endif
54390 +
54391 +ifdef CONFIG_GRKERNSEC_HIDESYM
54392 +extra-y := grsec_hidesym.o
54393 +$(obj)/grsec_hidesym.o:
54394 + @-chmod -f 500 /boot
54395 + @-chmod -f 500 /lib/modules
54396 + @-chmod -f 500 /lib64/modules
54397 + @-chmod -f 500 /lib32/modules
54398 + @-chmod -f 700 .
54399 + @echo ' grsec: protected kernel image paths'
54400 +endif
54401 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
54402 new file mode 100644
54403 index 0000000..6b7b8f7
54404 --- /dev/null
54405 +++ b/grsecurity/gracl.c
54406 @@ -0,0 +1,4067 @@
54407 +#include <linux/kernel.h>
54408 +#include <linux/module.h>
54409 +#include <linux/sched.h>
54410 +#include <linux/mm.h>
54411 +#include <linux/file.h>
54412 +#include <linux/fs.h>
54413 +#include <linux/namei.h>
54414 +#include <linux/mount.h>
54415 +#include <linux/tty.h>
54416 +#include <linux/proc_fs.h>
54417 +#include <linux/lglock.h>
54418 +#include <linux/slab.h>
54419 +#include <linux/vmalloc.h>
54420 +#include <linux/types.h>
54421 +#include <linux/sysctl.h>
54422 +#include <linux/netdevice.h>
54423 +#include <linux/ptrace.h>
54424 +#include <linux/gracl.h>
54425 +#include <linux/gralloc.h>
54426 +#include <linux/security.h>
54427 +#include <linux/grinternal.h>
54428 +#include <linux/pid_namespace.h>
54429 +#include <linux/stop_machine.h>
54430 +#include <linux/fdtable.h>
54431 +#include <linux/percpu.h>
54432 +#include <linux/lglock.h>
54433 +#include <linux/hugetlb.h>
54434 +#include "../fs/mount.h"
54435 +
54436 +#include <asm/uaccess.h>
54437 +#include <asm/errno.h>
54438 +#include <asm/mman.h>
54439 +
54440 +extern struct lglock vfsmount_lock;
54441 +
54442 +static struct acl_role_db acl_role_set;
54443 +static struct name_db name_set;
54444 +static struct inodev_db inodev_set;
54445 +
54446 +/* for keeping track of userspace pointers used for subjects, so we
54447 + can share references in the kernel as well
54448 +*/
54449 +
54450 +static struct path real_root;
54451 +
54452 +static struct acl_subj_map_db subj_map_set;
54453 +
54454 +static struct acl_role_label *default_role;
54455 +
54456 +static struct acl_role_label *role_list;
54457 +
54458 +static u16 acl_sp_role_value;
54459 +
54460 +extern char *gr_shared_page[4];
54461 +static DEFINE_MUTEX(gr_dev_mutex);
54462 +DEFINE_RWLOCK(gr_inode_lock);
54463 +
54464 +struct gr_arg *gr_usermode;
54465 +
54466 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
54467 +
54468 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
54469 +extern void gr_clear_learn_entries(void);
54470 +
54471 +unsigned char *gr_system_salt;
54472 +unsigned char *gr_system_sum;
54473 +
54474 +static struct sprole_pw **acl_special_roles = NULL;
54475 +static __u16 num_sprole_pws = 0;
54476 +
54477 +static struct acl_role_label *kernel_role = NULL;
54478 +
54479 +static unsigned int gr_auth_attempts = 0;
54480 +static unsigned long gr_auth_expires = 0UL;
54481 +
54482 +#ifdef CONFIG_NET
54483 +extern struct vfsmount *sock_mnt;
54484 +#endif
54485 +
54486 +extern struct vfsmount *pipe_mnt;
54487 +extern struct vfsmount *shm_mnt;
54488 +
54489 +#ifdef CONFIG_HUGETLBFS
54490 +extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
54491 +#endif
54492 +
54493 +static struct acl_object_label *fakefs_obj_rw;
54494 +static struct acl_object_label *fakefs_obj_rwx;
54495 +
54496 +extern int gr_init_uidset(void);
54497 +extern void gr_free_uidset(void);
54498 +extern void gr_remove_uid(uid_t uid);
54499 +extern int gr_find_uid(uid_t uid);
54500 +
54501 +__inline__ int
54502 +gr_acl_is_enabled(void)
54503 +{
54504 + return (gr_status & GR_READY);
54505 +}
54506 +
54507 +#ifdef CONFIG_BTRFS_FS
54508 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
54509 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
54510 +#endif
54511 +
54512 +static inline dev_t __get_dev(const struct dentry *dentry)
54513 +{
54514 +#ifdef CONFIG_BTRFS_FS
54515 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
54516 + return get_btrfs_dev_from_inode(dentry->d_inode);
54517 + else
54518 +#endif
54519 + return dentry->d_inode->i_sb->s_dev;
54520 +}
54521 +
54522 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54523 +{
54524 + return __get_dev(dentry);
54525 +}
54526 +
54527 +static char gr_task_roletype_to_char(struct task_struct *task)
54528 +{
54529 + switch (task->role->roletype &
54530 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
54531 + GR_ROLE_SPECIAL)) {
54532 + case GR_ROLE_DEFAULT:
54533 + return 'D';
54534 + case GR_ROLE_USER:
54535 + return 'U';
54536 + case GR_ROLE_GROUP:
54537 + return 'G';
54538 + case GR_ROLE_SPECIAL:
54539 + return 'S';
54540 + }
54541 +
54542 + return 'X';
54543 +}
54544 +
54545 +char gr_roletype_to_char(void)
54546 +{
54547 + return gr_task_roletype_to_char(current);
54548 +}
54549 +
54550 +__inline__ int
54551 +gr_acl_tpe_check(void)
54552 +{
54553 + if (unlikely(!(gr_status & GR_READY)))
54554 + return 0;
54555 + if (current->role->roletype & GR_ROLE_TPE)
54556 + return 1;
54557 + else
54558 + return 0;
54559 +}
54560 +
54561 +int
54562 +gr_handle_rawio(const struct inode *inode)
54563 +{
54564 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54565 + if (inode && S_ISBLK(inode->i_mode) &&
54566 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54567 + !capable(CAP_SYS_RAWIO))
54568 + return 1;
54569 +#endif
54570 + return 0;
54571 +}
54572 +
54573 +static int
54574 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
54575 +{
54576 + if (likely(lena != lenb))
54577 + return 0;
54578 +
54579 + return !memcmp(a, b, lena);
54580 +}
54581 +
54582 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
54583 +{
54584 + *buflen -= namelen;
54585 + if (*buflen < 0)
54586 + return -ENAMETOOLONG;
54587 + *buffer -= namelen;
54588 + memcpy(*buffer, str, namelen);
54589 + return 0;
54590 +}
54591 +
54592 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
54593 +{
54594 + return prepend(buffer, buflen, name->name, name->len);
54595 +}
54596 +
54597 +static int prepend_path(const struct path *path, struct path *root,
54598 + char **buffer, int *buflen)
54599 +{
54600 + struct dentry *dentry = path->dentry;
54601 + struct vfsmount *vfsmnt = path->mnt;
54602 + struct mount *mnt = real_mount(vfsmnt);
54603 + bool slash = false;
54604 + int error = 0;
54605 +
54606 + while (dentry != root->dentry || vfsmnt != root->mnt) {
54607 + struct dentry * parent;
54608 +
54609 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
54610 + /* Global root? */
54611 + if (!mnt_has_parent(mnt)) {
54612 + goto out;
54613 + }
54614 + dentry = mnt->mnt_mountpoint;
54615 + mnt = mnt->mnt_parent;
54616 + vfsmnt = &mnt->mnt;
54617 + continue;
54618 + }
54619 + parent = dentry->d_parent;
54620 + prefetch(parent);
54621 + spin_lock(&dentry->d_lock);
54622 + error = prepend_name(buffer, buflen, &dentry->d_name);
54623 + spin_unlock(&dentry->d_lock);
54624 + if (!error)
54625 + error = prepend(buffer, buflen, "/", 1);
54626 + if (error)
54627 + break;
54628 +
54629 + slash = true;
54630 + dentry = parent;
54631 + }
54632 +
54633 +out:
54634 + if (!error && !slash)
54635 + error = prepend(buffer, buflen, "/", 1);
54636 +
54637 + return error;
54638 +}
54639 +
54640 +/* this must be called with vfsmount_lock and rename_lock held */
54641 +
54642 +static char *__our_d_path(const struct path *path, struct path *root,
54643 + char *buf, int buflen)
54644 +{
54645 + char *res = buf + buflen;
54646 + int error;
54647 +
54648 + prepend(&res, &buflen, "\0", 1);
54649 + error = prepend_path(path, root, &res, &buflen);
54650 + if (error)
54651 + return ERR_PTR(error);
54652 +
54653 + return res;
54654 +}
54655 +
54656 +static char *
54657 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
54658 +{
54659 + char *retval;
54660 +
54661 + retval = __our_d_path(path, root, buf, buflen);
54662 + if (unlikely(IS_ERR(retval)))
54663 + retval = strcpy(buf, "<path too long>");
54664 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
54665 + retval[1] = '\0';
54666 +
54667 + return retval;
54668 +}
54669 +
54670 +static char *
54671 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
54672 + char *buf, int buflen)
54673 +{
54674 + struct path path;
54675 + char *res;
54676 +
54677 + path.dentry = (struct dentry *)dentry;
54678 + path.mnt = (struct vfsmount *)vfsmnt;
54679 +
54680 + /* we can use real_root.dentry, real_root.mnt, because this is only called
54681 + by the RBAC system */
54682 + res = gen_full_path(&path, &real_root, buf, buflen);
54683 +
54684 + return res;
54685 +}
54686 +
54687 +static char *
54688 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
54689 + char *buf, int buflen)
54690 +{
54691 + char *res;
54692 + struct path path;
54693 + struct path root;
54694 + struct task_struct *reaper = init_pid_ns.child_reaper;
54695 +
54696 + path.dentry = (struct dentry *)dentry;
54697 + path.mnt = (struct vfsmount *)vfsmnt;
54698 +
54699 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
54700 + get_fs_root(reaper->fs, &root);
54701 +
54702 + write_seqlock(&rename_lock);
54703 + br_read_lock(&vfsmount_lock);
54704 + res = gen_full_path(&path, &root, buf, buflen);
54705 + br_read_unlock(&vfsmount_lock);
54706 + write_sequnlock(&rename_lock);
54707 +
54708 + path_put(&root);
54709 + return res;
54710 +}
54711 +
54712 +static char *
54713 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
54714 +{
54715 + char *ret;
54716 + write_seqlock(&rename_lock);
54717 + br_read_lock(&vfsmount_lock);
54718 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
54719 + PAGE_SIZE);
54720 + br_read_unlock(&vfsmount_lock);
54721 + write_sequnlock(&rename_lock);
54722 + return ret;
54723 +}
54724 +
54725 +static char *
54726 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
54727 +{
54728 + char *ret;
54729 + char *buf;
54730 + int buflen;
54731 +
54732 + write_seqlock(&rename_lock);
54733 + br_read_lock(&vfsmount_lock);
54734 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54735 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
54736 + buflen = (int)(ret - buf);
54737 + if (buflen >= 5)
54738 + prepend(&ret, &buflen, "/proc", 5);
54739 + else
54740 + ret = strcpy(buf, "<path too long>");
54741 + br_read_unlock(&vfsmount_lock);
54742 + write_sequnlock(&rename_lock);
54743 + return ret;
54744 +}
54745 +
54746 +char *
54747 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
54748 +{
54749 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
54750 + PAGE_SIZE);
54751 +}
54752 +
54753 +char *
54754 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
54755 +{
54756 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54757 + PAGE_SIZE);
54758 +}
54759 +
54760 +char *
54761 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
54762 +{
54763 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
54764 + PAGE_SIZE);
54765 +}
54766 +
54767 +char *
54768 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
54769 +{
54770 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
54771 + PAGE_SIZE);
54772 +}
54773 +
54774 +char *
54775 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
54776 +{
54777 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
54778 + PAGE_SIZE);
54779 +}
54780 +
54781 +__inline__ __u32
54782 +to_gr_audit(const __u32 reqmode)
54783 +{
54784 + /* masks off auditable permission flags, then shifts them to create
54785 + auditing flags, and adds the special case of append auditing if
54786 + we're requesting write */
54787 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
54788 +}
54789 +
54790 +struct acl_subject_label *
54791 +lookup_subject_map(const struct acl_subject_label *userp)
54792 +{
54793 + unsigned int index = gr_shash(userp, subj_map_set.s_size);
54794 + struct subject_map *match;
54795 +
54796 + match = subj_map_set.s_hash[index];
54797 +
54798 + while (match && match->user != userp)
54799 + match = match->next;
54800 +
54801 + if (match != NULL)
54802 + return match->kernel;
54803 + else
54804 + return NULL;
54805 +}
54806 +
54807 +static void
54808 +insert_subj_map_entry(struct subject_map *subjmap)
54809 +{
54810 + unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
54811 + struct subject_map **curr;
54812 +
54813 + subjmap->prev = NULL;
54814 +
54815 + curr = &subj_map_set.s_hash[index];
54816 + if (*curr != NULL)
54817 + (*curr)->prev = subjmap;
54818 +
54819 + subjmap->next = *curr;
54820 + *curr = subjmap;
54821 +
54822 + return;
54823 +}
54824 +
54825 +static struct acl_role_label *
54826 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
54827 + const gid_t gid)
54828 +{
54829 + unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
54830 + struct acl_role_label *match;
54831 + struct role_allowed_ip *ipp;
54832 + unsigned int x;
54833 + u32 curr_ip = task->signal->curr_ip;
54834 +
54835 + task->signal->saved_ip = curr_ip;
54836 +
54837 + match = acl_role_set.r_hash[index];
54838 +
54839 + while (match) {
54840 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
54841 + for (x = 0; x < match->domain_child_num; x++) {
54842 + if (match->domain_children[x] == uid)
54843 + goto found;
54844 + }
54845 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
54846 + break;
54847 + match = match->next;
54848 + }
54849 +found:
54850 + if (match == NULL) {
54851 + try_group:
54852 + index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
54853 + match = acl_role_set.r_hash[index];
54854 +
54855 + while (match) {
54856 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
54857 + for (x = 0; x < match->domain_child_num; x++) {
54858 + if (match->domain_children[x] == gid)
54859 + goto found2;
54860 + }
54861 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
54862 + break;
54863 + match = match->next;
54864 + }
54865 +found2:
54866 + if (match == NULL)
54867 + match = default_role;
54868 + if (match->allowed_ips == NULL)
54869 + return match;
54870 + else {
54871 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
54872 + if (likely
54873 + ((ntohl(curr_ip) & ipp->netmask) ==
54874 + (ntohl(ipp->addr) & ipp->netmask)))
54875 + return match;
54876 + }
54877 + match = default_role;
54878 + }
54879 + } else if (match->allowed_ips == NULL) {
54880 + return match;
54881 + } else {
54882 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
54883 + if (likely
54884 + ((ntohl(curr_ip) & ipp->netmask) ==
54885 + (ntohl(ipp->addr) & ipp->netmask)))
54886 + return match;
54887 + }
54888 + goto try_group;
54889 + }
54890 +
54891 + return match;
54892 +}
54893 +
54894 +struct acl_subject_label *
54895 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
54896 + const struct acl_role_label *role)
54897 +{
54898 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
54899 + struct acl_subject_label *match;
54900 +
54901 + match = role->subj_hash[index];
54902 +
54903 + while (match && (match->inode != ino || match->device != dev ||
54904 + (match->mode & GR_DELETED))) {
54905 + match = match->next;
54906 + }
54907 +
54908 + if (match && !(match->mode & GR_DELETED))
54909 + return match;
54910 + else
54911 + return NULL;
54912 +}
54913 +
54914 +struct acl_subject_label *
54915 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
54916 + const struct acl_role_label *role)
54917 +{
54918 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
54919 + struct acl_subject_label *match;
54920 +
54921 + match = role->subj_hash[index];
54922 +
54923 + while (match && (match->inode != ino || match->device != dev ||
54924 + !(match->mode & GR_DELETED))) {
54925 + match = match->next;
54926 + }
54927 +
54928 + if (match && (match->mode & GR_DELETED))
54929 + return match;
54930 + else
54931 + return NULL;
54932 +}
54933 +
54934 +static struct acl_object_label *
54935 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
54936 + const struct acl_subject_label *subj)
54937 +{
54938 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
54939 + struct acl_object_label *match;
54940 +
54941 + match = subj->obj_hash[index];
54942 +
54943 + while (match && (match->inode != ino || match->device != dev ||
54944 + (match->mode & GR_DELETED))) {
54945 + match = match->next;
54946 + }
54947 +
54948 + if (match && !(match->mode & GR_DELETED))
54949 + return match;
54950 + else
54951 + return NULL;
54952 +}
54953 +
54954 +static struct acl_object_label *
54955 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
54956 + const struct acl_subject_label *subj)
54957 +{
54958 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
54959 + struct acl_object_label *match;
54960 +
54961 + match = subj->obj_hash[index];
54962 +
54963 + while (match && (match->inode != ino || match->device != dev ||
54964 + !(match->mode & GR_DELETED))) {
54965 + match = match->next;
54966 + }
54967 +
54968 + if (match && (match->mode & GR_DELETED))
54969 + return match;
54970 +
54971 + match = subj->obj_hash[index];
54972 +
54973 + while (match && (match->inode != ino || match->device != dev ||
54974 + (match->mode & GR_DELETED))) {
54975 + match = match->next;
54976 + }
54977 +
54978 + if (match && !(match->mode & GR_DELETED))
54979 + return match;
54980 + else
54981 + return NULL;
54982 +}
54983 +
54984 +static struct name_entry *
54985 +lookup_name_entry(const char *name)
54986 +{
54987 + unsigned int len = strlen(name);
54988 + unsigned int key = full_name_hash(name, len);
54989 + unsigned int index = key % name_set.n_size;
54990 + struct name_entry *match;
54991 +
54992 + match = name_set.n_hash[index];
54993 +
54994 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
54995 + match = match->next;
54996 +
54997 + return match;
54998 +}
54999 +
55000 +static struct name_entry *
55001 +lookup_name_entry_create(const char *name)
55002 +{
55003 + unsigned int len = strlen(name);
55004 + unsigned int key = full_name_hash(name, len);
55005 + unsigned int index = key % name_set.n_size;
55006 + struct name_entry *match;
55007 +
55008 + match = name_set.n_hash[index];
55009 +
55010 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55011 + !match->deleted))
55012 + match = match->next;
55013 +
55014 + if (match && match->deleted)
55015 + return match;
55016 +
55017 + match = name_set.n_hash[index];
55018 +
55019 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55020 + match->deleted))
55021 + match = match->next;
55022 +
55023 + if (match && !match->deleted)
55024 + return match;
55025 + else
55026 + return NULL;
55027 +}
55028 +
55029 +static struct inodev_entry *
55030 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
55031 +{
55032 + unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
55033 + struct inodev_entry *match;
55034 +
55035 + match = inodev_set.i_hash[index];
55036 +
55037 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
55038 + match = match->next;
55039 +
55040 + return match;
55041 +}
55042 +
55043 +static void
55044 +insert_inodev_entry(struct inodev_entry *entry)
55045 +{
55046 + unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
55047 + inodev_set.i_size);
55048 + struct inodev_entry **curr;
55049 +
55050 + entry->prev = NULL;
55051 +
55052 + curr = &inodev_set.i_hash[index];
55053 + if (*curr != NULL)
55054 + (*curr)->prev = entry;
55055 +
55056 + entry->next = *curr;
55057 + *curr = entry;
55058 +
55059 + return;
55060 +}
55061 +
55062 +static void
55063 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
55064 +{
55065 + unsigned int index =
55066 + gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
55067 + struct acl_role_label **curr;
55068 + struct acl_role_label *tmp, *tmp2;
55069 +
55070 + curr = &acl_role_set.r_hash[index];
55071 +
55072 + /* simple case, slot is empty, just set it to our role */
55073 + if (*curr == NULL) {
55074 + *curr = role;
55075 + } else {
55076 + /* example:
55077 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
55078 + 2 -> 3
55079 + */
55080 + /* first check to see if we can already be reached via this slot */
55081 + tmp = *curr;
55082 + while (tmp && tmp != role)
55083 + tmp = tmp->next;
55084 + if (tmp == role) {
55085 + /* we don't need to add ourselves to this slot's chain */
55086 + return;
55087 + }
55088 + /* we need to add ourselves to this chain, two cases */
55089 + if (role->next == NULL) {
55090 + /* simple case, append the current chain to our role */
55091 + role->next = *curr;
55092 + *curr = role;
55093 + } else {
55094 + /* 1 -> 2 -> 3 -> 4
55095 + 2 -> 3 -> 4
55096 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
55097 + */
55098 + /* trickier case: walk our role's chain until we find
55099 + the role for the start of the current slot's chain */
55100 + tmp = role;
55101 + tmp2 = *curr;
55102 + while (tmp->next && tmp->next != tmp2)
55103 + tmp = tmp->next;
55104 + if (tmp->next == tmp2) {
55105 + /* from example above, we found 3, so just
55106 + replace this slot's chain with ours */
55107 + *curr = role;
55108 + } else {
55109 + /* we didn't find a subset of our role's chain
55110 + in the current slot's chain, so append their
55111 + chain to ours, and set us as the first role in
55112 + the slot's chain
55113 +
55114 + we could fold this case with the case above,
55115 + but making it explicit for clarity
55116 + */
55117 + tmp->next = tmp2;
55118 + *curr = role;
55119 + }
55120 + }
55121 + }
55122 +
55123 + return;
55124 +}
55125 +
55126 +static void
55127 +insert_acl_role_label(struct acl_role_label *role)
55128 +{
55129 + int i;
55130 +
55131 + if (role_list == NULL) {
55132 + role_list = role;
55133 + role->prev = NULL;
55134 + } else {
55135 + role->prev = role_list;
55136 + role_list = role;
55137 + }
55138 +
55139 + /* used for hash chains */
55140 + role->next = NULL;
55141 +
55142 + if (role->roletype & GR_ROLE_DOMAIN) {
55143 + for (i = 0; i < role->domain_child_num; i++)
55144 + __insert_acl_role_label(role, role->domain_children[i]);
55145 + } else
55146 + __insert_acl_role_label(role, role->uidgid);
55147 +}
55148 +
55149 +static int
55150 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
55151 +{
55152 + struct name_entry **curr, *nentry;
55153 + struct inodev_entry *ientry;
55154 + unsigned int len = strlen(name);
55155 + unsigned int key = full_name_hash(name, len);
55156 + unsigned int index = key % name_set.n_size;
55157 +
55158 + curr = &name_set.n_hash[index];
55159 +
55160 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
55161 + curr = &((*curr)->next);
55162 +
55163 + if (*curr != NULL)
55164 + return 1;
55165 +
55166 + nentry = acl_alloc(sizeof (struct name_entry));
55167 + if (nentry == NULL)
55168 + return 0;
55169 + ientry = acl_alloc(sizeof (struct inodev_entry));
55170 + if (ientry == NULL)
55171 + return 0;
55172 + ientry->nentry = nentry;
55173 +
55174 + nentry->key = key;
55175 + nentry->name = name;
55176 + nentry->inode = inode;
55177 + nentry->device = device;
55178 + nentry->len = len;
55179 + nentry->deleted = deleted;
55180 +
55181 + nentry->prev = NULL;
55182 + curr = &name_set.n_hash[index];
55183 + if (*curr != NULL)
55184 + (*curr)->prev = nentry;
55185 + nentry->next = *curr;
55186 + *curr = nentry;
55187 +
55188 + /* insert us into the table searchable by inode/dev */
55189 + insert_inodev_entry(ientry);
55190 +
55191 + return 1;
55192 +}
55193 +
55194 +static void
55195 +insert_acl_obj_label(struct acl_object_label *obj,
55196 + struct acl_subject_label *subj)
55197 +{
55198 + unsigned int index =
55199 + gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
55200 + struct acl_object_label **curr;
55201 +
55202 +
55203 + obj->prev = NULL;
55204 +
55205 + curr = &subj->obj_hash[index];
55206 + if (*curr != NULL)
55207 + (*curr)->prev = obj;
55208 +
55209 + obj->next = *curr;
55210 + *curr = obj;
55211 +
55212 + return;
55213 +}
55214 +
55215 +static void
55216 +insert_acl_subj_label(struct acl_subject_label *obj,
55217 + struct acl_role_label *role)
55218 +{
55219 + unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
55220 + struct acl_subject_label **curr;
55221 +
55222 + obj->prev = NULL;
55223 +
55224 + curr = &role->subj_hash[index];
55225 + if (*curr != NULL)
55226 + (*curr)->prev = obj;
55227 +
55228 + obj->next = *curr;
55229 + *curr = obj;
55230 +
55231 + return;
55232 +}
55233 +
55234 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
55235 +
55236 +static void *
55237 +create_table(__u32 * len, int elementsize)
55238 +{
55239 + unsigned int table_sizes[] = {
55240 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
55241 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
55242 + 4194301, 8388593, 16777213, 33554393, 67108859
55243 + };
55244 + void *newtable = NULL;
55245 + unsigned int pwr = 0;
55246 +
55247 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
55248 + table_sizes[pwr] <= *len)
55249 + pwr++;
55250 +
55251 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
55252 + return newtable;
55253 +
55254 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
55255 + newtable =
55256 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
55257 + else
55258 + newtable = vmalloc(table_sizes[pwr] * elementsize);
55259 +
55260 + *len = table_sizes[pwr];
55261 +
55262 + return newtable;
55263 +}
55264 +
55265 +static int
55266 +init_variables(const struct gr_arg *arg)
55267 +{
55268 + struct task_struct *reaper = init_pid_ns.child_reaper;
55269 + unsigned int stacksize;
55270 +
55271 + subj_map_set.s_size = arg->role_db.num_subjects;
55272 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
55273 + name_set.n_size = arg->role_db.num_objects;
55274 + inodev_set.i_size = arg->role_db.num_objects;
55275 +
55276 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
55277 + !name_set.n_size || !inodev_set.i_size)
55278 + return 1;
55279 +
55280 + if (!gr_init_uidset())
55281 + return 1;
55282 +
55283 + /* set up the stack that holds allocation info */
55284 +
55285 + stacksize = arg->role_db.num_pointers + 5;
55286 +
55287 + if (!acl_alloc_stack_init(stacksize))
55288 + return 1;
55289 +
55290 + /* grab reference for the real root dentry and vfsmount */
55291 + get_fs_root(reaper->fs, &real_root);
55292 +
55293 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55294 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
55295 +#endif
55296 +
55297 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
55298 + if (fakefs_obj_rw == NULL)
55299 + return 1;
55300 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
55301 +
55302 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
55303 + if (fakefs_obj_rwx == NULL)
55304 + return 1;
55305 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
55306 +
55307 + subj_map_set.s_hash =
55308 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
55309 + acl_role_set.r_hash =
55310 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
55311 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
55312 + inodev_set.i_hash =
55313 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
55314 +
55315 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
55316 + !name_set.n_hash || !inodev_set.i_hash)
55317 + return 1;
55318 +
55319 + memset(subj_map_set.s_hash, 0,
55320 + sizeof(struct subject_map *) * subj_map_set.s_size);
55321 + memset(acl_role_set.r_hash, 0,
55322 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
55323 + memset(name_set.n_hash, 0,
55324 + sizeof (struct name_entry *) * name_set.n_size);
55325 + memset(inodev_set.i_hash, 0,
55326 + sizeof (struct inodev_entry *) * inodev_set.i_size);
55327 +
55328 + return 0;
55329 +}
55330 +
55331 +/* free information not needed after startup
55332 + currently contains user->kernel pointer mappings for subjects
55333 +*/
55334 +
55335 +static void
55336 +free_init_variables(void)
55337 +{
55338 + __u32 i;
55339 +
55340 + if (subj_map_set.s_hash) {
55341 + for (i = 0; i < subj_map_set.s_size; i++) {
55342 + if (subj_map_set.s_hash[i]) {
55343 + kfree(subj_map_set.s_hash[i]);
55344 + subj_map_set.s_hash[i] = NULL;
55345 + }
55346 + }
55347 +
55348 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
55349 + PAGE_SIZE)
55350 + kfree(subj_map_set.s_hash);
55351 + else
55352 + vfree(subj_map_set.s_hash);
55353 + }
55354 +
55355 + return;
55356 +}
55357 +
55358 +static void
55359 +free_variables(void)
55360 +{
55361 + struct acl_subject_label *s;
55362 + struct acl_role_label *r;
55363 + struct task_struct *task, *task2;
55364 + unsigned int x;
55365 +
55366 + gr_clear_learn_entries();
55367 +
55368 + read_lock(&tasklist_lock);
55369 + do_each_thread(task2, task) {
55370 + task->acl_sp_role = 0;
55371 + task->acl_role_id = 0;
55372 + task->acl = NULL;
55373 + task->role = NULL;
55374 + } while_each_thread(task2, task);
55375 + read_unlock(&tasklist_lock);
55376 +
55377 + /* release the reference to the real root dentry and vfsmount */
55378 + path_put(&real_root);
55379 + memset(&real_root, 0, sizeof(real_root));
55380 +
55381 + /* free all object hash tables */
55382 +
55383 + FOR_EACH_ROLE_START(r)
55384 + if (r->subj_hash == NULL)
55385 + goto next_role;
55386 + FOR_EACH_SUBJECT_START(r, s, x)
55387 + if (s->obj_hash == NULL)
55388 + break;
55389 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
55390 + kfree(s->obj_hash);
55391 + else
55392 + vfree(s->obj_hash);
55393 + FOR_EACH_SUBJECT_END(s, x)
55394 + FOR_EACH_NESTED_SUBJECT_START(r, s)
55395 + if (s->obj_hash == NULL)
55396 + break;
55397 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
55398 + kfree(s->obj_hash);
55399 + else
55400 + vfree(s->obj_hash);
55401 + FOR_EACH_NESTED_SUBJECT_END(s)
55402 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
55403 + kfree(r->subj_hash);
55404 + else
55405 + vfree(r->subj_hash);
55406 + r->subj_hash = NULL;
55407 +next_role:
55408 + FOR_EACH_ROLE_END(r)
55409 +
55410 + acl_free_all();
55411 +
55412 + if (acl_role_set.r_hash) {
55413 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
55414 + PAGE_SIZE)
55415 + kfree(acl_role_set.r_hash);
55416 + else
55417 + vfree(acl_role_set.r_hash);
55418 + }
55419 + if (name_set.n_hash) {
55420 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
55421 + PAGE_SIZE)
55422 + kfree(name_set.n_hash);
55423 + else
55424 + vfree(name_set.n_hash);
55425 + }
55426 +
55427 + if (inodev_set.i_hash) {
55428 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
55429 + PAGE_SIZE)
55430 + kfree(inodev_set.i_hash);
55431 + else
55432 + vfree(inodev_set.i_hash);
55433 + }
55434 +
55435 + gr_free_uidset();
55436 +
55437 + memset(&name_set, 0, sizeof (struct name_db));
55438 + memset(&inodev_set, 0, sizeof (struct inodev_db));
55439 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
55440 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
55441 +
55442 + default_role = NULL;
55443 + kernel_role = NULL;
55444 + role_list = NULL;
55445 +
55446 + return;
55447 +}
55448 +
55449 +static __u32
55450 +count_user_objs(struct acl_object_label *userp)
55451 +{
55452 + struct acl_object_label o_tmp;
55453 + __u32 num = 0;
55454 +
55455 + while (userp) {
55456 + if (copy_from_user(&o_tmp, userp,
55457 + sizeof (struct acl_object_label)))
55458 + break;
55459 +
55460 + userp = o_tmp.prev;
55461 + num++;
55462 + }
55463 +
55464 + return num;
55465 +}
55466 +
55467 +static struct acl_subject_label *
55468 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
55469 +
55470 +static int
55471 +copy_user_glob(struct acl_object_label *obj)
55472 +{
55473 + struct acl_object_label *g_tmp, **guser;
55474 + unsigned int len;
55475 + char *tmp;
55476 +
55477 + if (obj->globbed == NULL)
55478 + return 0;
55479 +
55480 + guser = &obj->globbed;
55481 + while (*guser) {
55482 + g_tmp = (struct acl_object_label *)
55483 + acl_alloc(sizeof (struct acl_object_label));
55484 + if (g_tmp == NULL)
55485 + return -ENOMEM;
55486 +
55487 + if (copy_from_user(g_tmp, *guser,
55488 + sizeof (struct acl_object_label)))
55489 + return -EFAULT;
55490 +
55491 + len = strnlen_user(g_tmp->filename, PATH_MAX);
55492 +
55493 + if (!len || len >= PATH_MAX)
55494 + return -EINVAL;
55495 +
55496 + if ((tmp = (char *) acl_alloc(len)) == NULL)
55497 + return -ENOMEM;
55498 +
55499 + if (copy_from_user(tmp, g_tmp->filename, len))
55500 + return -EFAULT;
55501 + tmp[len-1] = '\0';
55502 + g_tmp->filename = tmp;
55503 +
55504 + *guser = g_tmp;
55505 + guser = &(g_tmp->next);
55506 + }
55507 +
55508 + return 0;
55509 +}
55510 +
55511 +static int
55512 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
55513 + struct acl_role_label *role)
55514 +{
55515 + struct acl_object_label *o_tmp;
55516 + unsigned int len;
55517 + int ret;
55518 + char *tmp;
55519 +
55520 + while (userp) {
55521 + if ((o_tmp = (struct acl_object_label *)
55522 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
55523 + return -ENOMEM;
55524 +
55525 + if (copy_from_user(o_tmp, userp,
55526 + sizeof (struct acl_object_label)))
55527 + return -EFAULT;
55528 +
55529 + userp = o_tmp->prev;
55530 +
55531 + len = strnlen_user(o_tmp->filename, PATH_MAX);
55532 +
55533 + if (!len || len >= PATH_MAX)
55534 + return -EINVAL;
55535 +
55536 + if ((tmp = (char *) acl_alloc(len)) == NULL)
55537 + return -ENOMEM;
55538 +
55539 + if (copy_from_user(tmp, o_tmp->filename, len))
55540 + return -EFAULT;
55541 + tmp[len-1] = '\0';
55542 + o_tmp->filename = tmp;
55543 +
55544 + insert_acl_obj_label(o_tmp, subj);
55545 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
55546 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
55547 + return -ENOMEM;
55548 +
55549 + ret = copy_user_glob(o_tmp);
55550 + if (ret)
55551 + return ret;
55552 +
55553 + if (o_tmp->nested) {
55554 + int already_copied;
55555 +
55556 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
55557 + if (IS_ERR(o_tmp->nested))
55558 + return PTR_ERR(o_tmp->nested);
55559 +
55560 + /* insert into nested subject list if we haven't copied this one yet
55561 + to prevent duplicate entries */
55562 + if (!already_copied) {
55563 + o_tmp->nested->next = role->hash->first;
55564 + role->hash->first = o_tmp->nested;
55565 + }
55566 + }
55567 + }
55568 +
55569 + return 0;
55570 +}
55571 +
55572 +static __u32
55573 +count_user_subjs(struct acl_subject_label *userp)
55574 +{
55575 + struct acl_subject_label s_tmp;
55576 + __u32 num = 0;
55577 +
55578 + while (userp) {
55579 + if (copy_from_user(&s_tmp, userp,
55580 + sizeof (struct acl_subject_label)))
55581 + break;
55582 +
55583 + userp = s_tmp.prev;
55584 + }
55585 +
55586 + return num;
55587 +}
55588 +
55589 +static int
55590 +copy_user_allowedips(struct acl_role_label *rolep)
55591 +{
55592 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
55593 +
55594 + ruserip = rolep->allowed_ips;
55595 +
55596 + while (ruserip) {
55597 + rlast = rtmp;
55598 +
55599 + if ((rtmp = (struct role_allowed_ip *)
55600 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
55601 + return -ENOMEM;
55602 +
55603 + if (copy_from_user(rtmp, ruserip,
55604 + sizeof (struct role_allowed_ip)))
55605 + return -EFAULT;
55606 +
55607 + ruserip = rtmp->prev;
55608 +
55609 + if (!rlast) {
55610 + rtmp->prev = NULL;
55611 + rolep->allowed_ips = rtmp;
55612 + } else {
55613 + rlast->next = rtmp;
55614 + rtmp->prev = rlast;
55615 + }
55616 +
55617 + if (!ruserip)
55618 + rtmp->next = NULL;
55619 + }
55620 +
55621 + return 0;
55622 +}
55623 +
55624 +static int
55625 +copy_user_transitions(struct acl_role_label *rolep)
55626 +{
55627 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
55628 +
55629 + unsigned int len;
55630 + char *tmp;
55631 +
55632 + rusertp = rolep->transitions;
55633 +
55634 + while (rusertp) {
55635 + rlast = rtmp;
55636 +
55637 + if ((rtmp = (struct role_transition *)
55638 + acl_alloc(sizeof (struct role_transition))) == NULL)
55639 + return -ENOMEM;
55640 +
55641 + if (copy_from_user(rtmp, rusertp,
55642 + sizeof (struct role_transition)))
55643 + return -EFAULT;
55644 +
55645 + rusertp = rtmp->prev;
55646 +
55647 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
55648 +
55649 + if (!len || len >= GR_SPROLE_LEN)
55650 + return -EINVAL;
55651 +
55652 + if ((tmp = (char *) acl_alloc(len)) == NULL)
55653 + return -ENOMEM;
55654 +
55655 + if (copy_from_user(tmp, rtmp->rolename, len))
55656 + return -EFAULT;
55657 + tmp[len-1] = '\0';
55658 + rtmp->rolename = tmp;
55659 +
55660 + if (!rlast) {
55661 + rtmp->prev = NULL;
55662 + rolep->transitions = rtmp;
55663 + } else {
55664 + rlast->next = rtmp;
55665 + rtmp->prev = rlast;
55666 + }
55667 +
55668 + if (!rusertp)
55669 + rtmp->next = NULL;
55670 + }
55671 +
55672 + return 0;
55673 +}
55674 +
55675 +static struct acl_subject_label *
55676 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
55677 +{
55678 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
55679 + unsigned int len;
55680 + char *tmp;
55681 + __u32 num_objs;
55682 + struct acl_ip_label **i_tmp, *i_utmp2;
55683 + struct gr_hash_struct ghash;
55684 + struct subject_map *subjmap;
55685 + unsigned int i_num;
55686 + int err;
55687 +
55688 + if (already_copied != NULL)
55689 + *already_copied = 0;
55690 +
55691 + s_tmp = lookup_subject_map(userp);
55692 +
55693 + /* we've already copied this subject into the kernel, just return
55694 + the reference to it, and don't copy it over again
55695 + */
55696 + if (s_tmp) {
55697 + if (already_copied != NULL)
55698 + *already_copied = 1;
55699 + return(s_tmp);
55700 + }
55701 +
55702 + if ((s_tmp = (struct acl_subject_label *)
55703 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
55704 + return ERR_PTR(-ENOMEM);
55705 +
55706 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
55707 + if (subjmap == NULL)
55708 + return ERR_PTR(-ENOMEM);
55709 +
55710 + subjmap->user = userp;
55711 + subjmap->kernel = s_tmp;
55712 + insert_subj_map_entry(subjmap);
55713 +
55714 + if (copy_from_user(s_tmp, userp,
55715 + sizeof (struct acl_subject_label)))
55716 + return ERR_PTR(-EFAULT);
55717 +
55718 + len = strnlen_user(s_tmp->filename, PATH_MAX);
55719 +
55720 + if (!len || len >= PATH_MAX)
55721 + return ERR_PTR(-EINVAL);
55722 +
55723 + if ((tmp = (char *) acl_alloc(len)) == NULL)
55724 + return ERR_PTR(-ENOMEM);
55725 +
55726 + if (copy_from_user(tmp, s_tmp->filename, len))
55727 + return ERR_PTR(-EFAULT);
55728 + tmp[len-1] = '\0';
55729 + s_tmp->filename = tmp;
55730 +
55731 + if (!strcmp(s_tmp->filename, "/"))
55732 + role->root_label = s_tmp;
55733 +
55734 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
55735 + return ERR_PTR(-EFAULT);
55736 +
55737 + /* copy user and group transition tables */
55738 +
55739 + if (s_tmp->user_trans_num) {
55740 + uid_t *uidlist;
55741 +
55742 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
55743 + if (uidlist == NULL)
55744 + return ERR_PTR(-ENOMEM);
55745 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
55746 + return ERR_PTR(-EFAULT);
55747 +
55748 + s_tmp->user_transitions = uidlist;
55749 + }
55750 +
55751 + if (s_tmp->group_trans_num) {
55752 + gid_t *gidlist;
55753 +
55754 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
55755 + if (gidlist == NULL)
55756 + return ERR_PTR(-ENOMEM);
55757 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
55758 + return ERR_PTR(-EFAULT);
55759 +
55760 + s_tmp->group_transitions = gidlist;
55761 + }
55762 +
55763 + /* set up object hash table */
55764 + num_objs = count_user_objs(ghash.first);
55765 +
55766 + s_tmp->obj_hash_size = num_objs;
55767 + s_tmp->obj_hash =
55768 + (struct acl_object_label **)
55769 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
55770 +
55771 + if (!s_tmp->obj_hash)
55772 + return ERR_PTR(-ENOMEM);
55773 +
55774 + memset(s_tmp->obj_hash, 0,
55775 + s_tmp->obj_hash_size *
55776 + sizeof (struct acl_object_label *));
55777 +
55778 + /* add in objects */
55779 + err = copy_user_objs(ghash.first, s_tmp, role);
55780 +
55781 + if (err)
55782 + return ERR_PTR(err);
55783 +
55784 + /* set pointer for parent subject */
55785 + if (s_tmp->parent_subject) {
55786 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
55787 +
55788 + if (IS_ERR(s_tmp2))
55789 + return s_tmp2;
55790 +
55791 + s_tmp->parent_subject = s_tmp2;
55792 + }
55793 +
55794 + /* add in ip acls */
55795 +
55796 + if (!s_tmp->ip_num) {
55797 + s_tmp->ips = NULL;
55798 + goto insert;
55799 + }
55800 +
55801 + i_tmp =
55802 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
55803 + sizeof (struct acl_ip_label *));
55804 +
55805 + if (!i_tmp)
55806 + return ERR_PTR(-ENOMEM);
55807 +
55808 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
55809 + *(i_tmp + i_num) =
55810 + (struct acl_ip_label *)
55811 + acl_alloc(sizeof (struct acl_ip_label));
55812 + if (!*(i_tmp + i_num))
55813 + return ERR_PTR(-ENOMEM);
55814 +
55815 + if (copy_from_user
55816 + (&i_utmp2, s_tmp->ips + i_num,
55817 + sizeof (struct acl_ip_label *)))
55818 + return ERR_PTR(-EFAULT);
55819 +
55820 + if (copy_from_user
55821 + (*(i_tmp + i_num), i_utmp2,
55822 + sizeof (struct acl_ip_label)))
55823 + return ERR_PTR(-EFAULT);
55824 +
55825 + if ((*(i_tmp + i_num))->iface == NULL)
55826 + continue;
55827 +
55828 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
55829 + if (!len || len >= IFNAMSIZ)
55830 + return ERR_PTR(-EINVAL);
55831 + tmp = acl_alloc(len);
55832 + if (tmp == NULL)
55833 + return ERR_PTR(-ENOMEM);
55834 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
55835 + return ERR_PTR(-EFAULT);
55836 + (*(i_tmp + i_num))->iface = tmp;
55837 + }
55838 +
55839 + s_tmp->ips = i_tmp;
55840 +
55841 +insert:
55842 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
55843 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
55844 + return ERR_PTR(-ENOMEM);
55845 +
55846 + return s_tmp;
55847 +}
55848 +
55849 +static int
55850 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
55851 +{
55852 + struct acl_subject_label s_pre;
55853 + struct acl_subject_label * ret;
55854 + int err;
55855 +
55856 + while (userp) {
55857 + if (copy_from_user(&s_pre, userp,
55858 + sizeof (struct acl_subject_label)))
55859 + return -EFAULT;
55860 +
55861 + ret = do_copy_user_subj(userp, role, NULL);
55862 +
55863 + err = PTR_ERR(ret);
55864 + if (IS_ERR(ret))
55865 + return err;
55866 +
55867 + insert_acl_subj_label(ret, role);
55868 +
55869 + userp = s_pre.prev;
55870 + }
55871 +
55872 + return 0;
55873 +}
55874 +
55875 +static int
55876 +copy_user_acl(struct gr_arg *arg)
55877 +{
55878 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
55879 + struct acl_subject_label *subj_list;
55880 + struct sprole_pw *sptmp;
55881 + struct gr_hash_struct *ghash;
55882 + uid_t *domainlist;
55883 + unsigned int r_num;
55884 + unsigned int len;
55885 + char *tmp;
55886 + int err = 0;
55887 + __u16 i;
55888 + __u32 num_subjs;
55889 +
55890 + /* we need a default and kernel role */
55891 + if (arg->role_db.num_roles < 2)
55892 + return -EINVAL;
55893 +
55894 + /* copy special role authentication info from userspace */
55895 +
55896 + num_sprole_pws = arg->num_sprole_pws;
55897 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
55898 +
55899 + if (!acl_special_roles && num_sprole_pws)
55900 + return -ENOMEM;
55901 +
55902 + for (i = 0; i < num_sprole_pws; i++) {
55903 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
55904 + if (!sptmp)
55905 + return -ENOMEM;
55906 + if (copy_from_user(sptmp, arg->sprole_pws + i,
55907 + sizeof (struct sprole_pw)))
55908 + return -EFAULT;
55909 +
55910 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
55911 +
55912 + if (!len || len >= GR_SPROLE_LEN)
55913 + return -EINVAL;
55914 +
55915 + if ((tmp = (char *) acl_alloc(len)) == NULL)
55916 + return -ENOMEM;
55917 +
55918 + if (copy_from_user(tmp, sptmp->rolename, len))
55919 + return -EFAULT;
55920 +
55921 + tmp[len-1] = '\0';
55922 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55923 + printk(KERN_ALERT "Copying special role %s\n", tmp);
55924 +#endif
55925 + sptmp->rolename = tmp;
55926 + acl_special_roles[i] = sptmp;
55927 + }
55928 +
55929 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
55930 +
55931 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
55932 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
55933 +
55934 + if (!r_tmp)
55935 + return -ENOMEM;
55936 +
55937 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
55938 + sizeof (struct acl_role_label *)))
55939 + return -EFAULT;
55940 +
55941 + if (copy_from_user(r_tmp, r_utmp2,
55942 + sizeof (struct acl_role_label)))
55943 + return -EFAULT;
55944 +
55945 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
55946 +
55947 + if (!len || len >= PATH_MAX)
55948 + return -EINVAL;
55949 +
55950 + if ((tmp = (char *) acl_alloc(len)) == NULL)
55951 + return -ENOMEM;
55952 +
55953 + if (copy_from_user(tmp, r_tmp->rolename, len))
55954 + return -EFAULT;
55955 +
55956 + tmp[len-1] = '\0';
55957 + r_tmp->rolename = tmp;
55958 +
55959 + if (!strcmp(r_tmp->rolename, "default")
55960 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
55961 + default_role = r_tmp;
55962 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
55963 + kernel_role = r_tmp;
55964 + }
55965 +
55966 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
55967 + return -ENOMEM;
55968 +
55969 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
55970 + return -EFAULT;
55971 +
55972 + r_tmp->hash = ghash;
55973 +
55974 + num_subjs = count_user_subjs(r_tmp->hash->first);
55975 +
55976 + r_tmp->subj_hash_size = num_subjs;
55977 + r_tmp->subj_hash =
55978 + (struct acl_subject_label **)
55979 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
55980 +
55981 + if (!r_tmp->subj_hash)
55982 + return -ENOMEM;
55983 +
55984 + err = copy_user_allowedips(r_tmp);
55985 + if (err)
55986 + return err;
55987 +
55988 + /* copy domain info */
55989 + if (r_tmp->domain_children != NULL) {
55990 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
55991 + if (domainlist == NULL)
55992 + return -ENOMEM;
55993 +
55994 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
55995 + return -EFAULT;
55996 +
55997 + r_tmp->domain_children = domainlist;
55998 + }
55999 +
56000 + err = copy_user_transitions(r_tmp);
56001 + if (err)
56002 + return err;
56003 +
56004 + memset(r_tmp->subj_hash, 0,
56005 + r_tmp->subj_hash_size *
56006 + sizeof (struct acl_subject_label *));
56007 +
56008 + /* acquire the list of subjects, then NULL out
56009 + the list prior to parsing the subjects for this role,
56010 + as during this parsing the list is replaced with a list
56011 + of *nested* subjects for the role
56012 + */
56013 + subj_list = r_tmp->hash->first;
56014 +
56015 + /* set nested subject list to null */
56016 + r_tmp->hash->first = NULL;
56017 +
56018 + err = copy_user_subjs(subj_list, r_tmp);
56019 +
56020 + if (err)
56021 + return err;
56022 +
56023 + insert_acl_role_label(r_tmp);
56024 + }
56025 +
56026 + if (default_role == NULL || kernel_role == NULL)
56027 + return -EINVAL;
56028 +
56029 + return err;
56030 +}
56031 +
56032 +static int
56033 +gracl_init(struct gr_arg *args)
56034 +{
56035 + int error = 0;
56036 +
56037 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
56038 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
56039 +
56040 + if (init_variables(args)) {
56041 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
56042 + error = -ENOMEM;
56043 + free_variables();
56044 + goto out;
56045 + }
56046 +
56047 + error = copy_user_acl(args);
56048 + free_init_variables();
56049 + if (error) {
56050 + free_variables();
56051 + goto out;
56052 + }
56053 +
56054 + if ((error = gr_set_acls(0))) {
56055 + free_variables();
56056 + goto out;
56057 + }
56058 +
56059 + pax_open_kernel();
56060 + gr_status |= GR_READY;
56061 + pax_close_kernel();
56062 +
56063 + out:
56064 + return error;
56065 +}
56066 +
56067 +/* derived from glibc fnmatch() 0: match, 1: no match*/
56068 +
56069 +static int
56070 +glob_match(const char *p, const char *n)
56071 +{
56072 + char c;
56073 +
56074 + while ((c = *p++) != '\0') {
56075 + switch (c) {
56076 + case '?':
56077 + if (*n == '\0')
56078 + return 1;
56079 + else if (*n == '/')
56080 + return 1;
56081 + break;
56082 + case '\\':
56083 + if (*n != c)
56084 + return 1;
56085 + break;
56086 + case '*':
56087 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
56088 + if (*n == '/')
56089 + return 1;
56090 + else if (c == '?') {
56091 + if (*n == '\0')
56092 + return 1;
56093 + else
56094 + ++n;
56095 + }
56096 + }
56097 + if (c == '\0') {
56098 + return 0;
56099 + } else {
56100 + const char *endp;
56101 +
56102 + if ((endp = strchr(n, '/')) == NULL)
56103 + endp = n + strlen(n);
56104 +
56105 + if (c == '[') {
56106 + for (--p; n < endp; ++n)
56107 + if (!glob_match(p, n))
56108 + return 0;
56109 + } else if (c == '/') {
56110 + while (*n != '\0' && *n != '/')
56111 + ++n;
56112 + if (*n == '/' && !glob_match(p, n + 1))
56113 + return 0;
56114 + } else {
56115 + for (--p; n < endp; ++n)
56116 + if (*n == c && !glob_match(p, n))
56117 + return 0;
56118 + }
56119 +
56120 + return 1;
56121 + }
56122 + case '[':
56123 + {
56124 + int not;
56125 + char cold;
56126 +
56127 + if (*n == '\0' || *n == '/')
56128 + return 1;
56129 +
56130 + not = (*p == '!' || *p == '^');
56131 + if (not)
56132 + ++p;
56133 +
56134 + c = *p++;
56135 + for (;;) {
56136 + unsigned char fn = (unsigned char)*n;
56137 +
56138 + if (c == '\0')
56139 + return 1;
56140 + else {
56141 + if (c == fn)
56142 + goto matched;
56143 + cold = c;
56144 + c = *p++;
56145 +
56146 + if (c == '-' && *p != ']') {
56147 + unsigned char cend = *p++;
56148 +
56149 + if (cend == '\0')
56150 + return 1;
56151 +
56152 + if (cold <= fn && fn <= cend)
56153 + goto matched;
56154 +
56155 + c = *p++;
56156 + }
56157 + }
56158 +
56159 + if (c == ']')
56160 + break;
56161 + }
56162 + if (!not)
56163 + return 1;
56164 + break;
56165 + matched:
56166 + while (c != ']') {
56167 + if (c == '\0')
56168 + return 1;
56169 +
56170 + c = *p++;
56171 + }
56172 + if (not)
56173 + return 1;
56174 + }
56175 + break;
56176 + default:
56177 + if (c != *n)
56178 + return 1;
56179 + }
56180 +
56181 + ++n;
56182 + }
56183 +
56184 + if (*n == '\0')
56185 + return 0;
56186 +
56187 + if (*n == '/')
56188 + return 0;
56189 +
56190 + return 1;
56191 +}
56192 +
56193 +static struct acl_object_label *
56194 +chk_glob_label(struct acl_object_label *globbed,
56195 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
56196 +{
56197 + struct acl_object_label *tmp;
56198 +
56199 + if (*path == NULL)
56200 + *path = gr_to_filename_nolock(dentry, mnt);
56201 +
56202 + tmp = globbed;
56203 +
56204 + while (tmp) {
56205 + if (!glob_match(tmp->filename, *path))
56206 + return tmp;
56207 + tmp = tmp->next;
56208 + }
56209 +
56210 + return NULL;
56211 +}
56212 +
56213 +static struct acl_object_label *
56214 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
56215 + const ino_t curr_ino, const dev_t curr_dev,
56216 + const struct acl_subject_label *subj, char **path, const int checkglob)
56217 +{
56218 + struct acl_subject_label *tmpsubj;
56219 + struct acl_object_label *retval;
56220 + struct acl_object_label *retval2;
56221 +
56222 + tmpsubj = (struct acl_subject_label *) subj;
56223 + read_lock(&gr_inode_lock);
56224 + do {
56225 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
56226 + if (retval) {
56227 + if (checkglob && retval->globbed) {
56228 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
56229 + if (retval2)
56230 + retval = retval2;
56231 + }
56232 + break;
56233 + }
56234 + } while ((tmpsubj = tmpsubj->parent_subject));
56235 + read_unlock(&gr_inode_lock);
56236 +
56237 + return retval;
56238 +}
56239 +
56240 +static __inline__ struct acl_object_label *
56241 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
56242 + struct dentry *curr_dentry,
56243 + const struct acl_subject_label *subj, char **path, const int checkglob)
56244 +{
56245 + int newglob = checkglob;
56246 + ino_t inode;
56247 + dev_t device;
56248 +
56249 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
56250 + as we don't want a / * rule to match instead of the / object
56251 + don't do this for create lookups that call this function though, since they're looking up
56252 + on the parent and thus need globbing checks on all paths
56253 + */
56254 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
56255 + newglob = GR_NO_GLOB;
56256 +
56257 + spin_lock(&curr_dentry->d_lock);
56258 + inode = curr_dentry->d_inode->i_ino;
56259 + device = __get_dev(curr_dentry);
56260 + spin_unlock(&curr_dentry->d_lock);
56261 +
56262 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
56263 +}
56264 +
56265 +#ifdef CONFIG_HUGETLBFS
56266 +static inline bool
56267 +is_hugetlbfs_mnt(const struct vfsmount *mnt)
56268 +{
56269 + int i;
56270 + for (i = 0; i < HUGE_MAX_HSTATE; i++) {
56271 + if (unlikely(hugetlbfs_vfsmount[i] == mnt))
56272 + return true;
56273 + }
56274 +
56275 + return false;
56276 +}
56277 +#endif
56278 +
56279 +static struct acl_object_label *
56280 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
56281 + const struct acl_subject_label *subj, char *path, const int checkglob)
56282 +{
56283 + struct dentry *dentry = (struct dentry *) l_dentry;
56284 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
56285 + struct mount *real_mnt = real_mount(mnt);
56286 + struct acl_object_label *retval;
56287 + struct dentry *parent;
56288 +
56289 + write_seqlock(&rename_lock);
56290 + br_read_lock(&vfsmount_lock);
56291 +
56292 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
56293 +#ifdef CONFIG_NET
56294 + mnt == sock_mnt ||
56295 +#endif
56296 +#ifdef CONFIG_HUGETLBFS
56297 + (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
56298 +#endif
56299 + /* ignore Eric Biederman */
56300 + IS_PRIVATE(l_dentry->d_inode))) {
56301 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
56302 + goto out;
56303 + }
56304 +
56305 + for (;;) {
56306 + if (dentry == real_root.dentry && mnt == real_root.mnt)
56307 + break;
56308 +
56309 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
56310 + if (!mnt_has_parent(real_mnt))
56311 + break;
56312 +
56313 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
56314 + if (retval != NULL)
56315 + goto out;
56316 +
56317 + dentry = real_mnt->mnt_mountpoint;
56318 + real_mnt = real_mnt->mnt_parent;
56319 + mnt = &real_mnt->mnt;
56320 + continue;
56321 + }
56322 +
56323 + parent = dentry->d_parent;
56324 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
56325 + if (retval != NULL)
56326 + goto out;
56327 +
56328 + dentry = parent;
56329 + }
56330 +
56331 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
56332 +
56333 + /* real_root is pinned so we don't have to hold a reference */
56334 + if (retval == NULL)
56335 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
56336 +out:
56337 + br_read_unlock(&vfsmount_lock);
56338 + write_sequnlock(&rename_lock);
56339 +
56340 + BUG_ON(retval == NULL);
56341 +
56342 + return retval;
56343 +}
56344 +
56345 +static __inline__ struct acl_object_label *
56346 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
56347 + const struct acl_subject_label *subj)
56348 +{
56349 + char *path = NULL;
56350 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
56351 +}
56352 +
56353 +static __inline__ struct acl_object_label *
56354 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
56355 + const struct acl_subject_label *subj)
56356 +{
56357 + char *path = NULL;
56358 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
56359 +}
56360 +
56361 +static __inline__ struct acl_object_label *
56362 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
56363 + const struct acl_subject_label *subj, char *path)
56364 +{
56365 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
56366 +}
56367 +
56368 +static struct acl_subject_label *
56369 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
56370 + const struct acl_role_label *role)
56371 +{
56372 + struct dentry *dentry = (struct dentry *) l_dentry;
56373 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
56374 + struct mount *real_mnt = real_mount(mnt);
56375 + struct acl_subject_label *retval;
56376 + struct dentry *parent;
56377 +
56378 + write_seqlock(&rename_lock);
56379 + br_read_lock(&vfsmount_lock);
56380 +
56381 + for (;;) {
56382 + if (dentry == real_root.dentry && mnt == real_root.mnt)
56383 + break;
56384 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
56385 + if (!mnt_has_parent(real_mnt))
56386 + break;
56387 +
56388 + spin_lock(&dentry->d_lock);
56389 + read_lock(&gr_inode_lock);
56390 + retval =
56391 + lookup_acl_subj_label(dentry->d_inode->i_ino,
56392 + __get_dev(dentry), role);
56393 + read_unlock(&gr_inode_lock);
56394 + spin_unlock(&dentry->d_lock);
56395 + if (retval != NULL)
56396 + goto out;
56397 +
56398 + dentry = real_mnt->mnt_mountpoint;
56399 + real_mnt = real_mnt->mnt_parent;
56400 + mnt = &real_mnt->mnt;
56401 + continue;
56402 + }
56403 +
56404 + spin_lock(&dentry->d_lock);
56405 + read_lock(&gr_inode_lock);
56406 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
56407 + __get_dev(dentry), role);
56408 + read_unlock(&gr_inode_lock);
56409 + parent = dentry->d_parent;
56410 + spin_unlock(&dentry->d_lock);
56411 +
56412 + if (retval != NULL)
56413 + goto out;
56414 +
56415 + dentry = parent;
56416 + }
56417 +
56418 + spin_lock(&dentry->d_lock);
56419 + read_lock(&gr_inode_lock);
56420 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
56421 + __get_dev(dentry), role);
56422 + read_unlock(&gr_inode_lock);
56423 + spin_unlock(&dentry->d_lock);
56424 +
56425 + if (unlikely(retval == NULL)) {
56426 + /* real_root is pinned, we don't need to hold a reference */
56427 + read_lock(&gr_inode_lock);
56428 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
56429 + __get_dev(real_root.dentry), role);
56430 + read_unlock(&gr_inode_lock);
56431 + }
56432 +out:
56433 + br_read_unlock(&vfsmount_lock);
56434 + write_sequnlock(&rename_lock);
56435 +
56436 + BUG_ON(retval == NULL);
56437 +
56438 + return retval;
56439 +}
56440 +
56441 +static void
56442 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
56443 +{
56444 + struct task_struct *task = current;
56445 + const struct cred *cred = current_cred();
56446 +
56447 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
56448 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
56449 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
56450 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
56451 +
56452 + return;
56453 +}
56454 +
56455 +static void
56456 +gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
56457 +{
56458 + struct task_struct *task = current;
56459 + const struct cred *cred = current_cred();
56460 +
56461 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
56462 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
56463 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
56464 + 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
56465 +
56466 + return;
56467 +}
56468 +
56469 +static void
56470 +gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
56471 +{
56472 + struct task_struct *task = current;
56473 + const struct cred *cred = current_cred();
56474 +
56475 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
56476 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
56477 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
56478 + 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
56479 +
56480 + return;
56481 +}
56482 +
56483 +__u32
56484 +gr_search_file(const struct dentry * dentry, const __u32 mode,
56485 + const struct vfsmount * mnt)
56486 +{
56487 + __u32 retval = mode;
56488 + struct acl_subject_label *curracl;
56489 + struct acl_object_label *currobj;
56490 +
56491 + if (unlikely(!(gr_status & GR_READY)))
56492 + return (mode & ~GR_AUDITS);
56493 +
56494 + curracl = current->acl;
56495 +
56496 + currobj = chk_obj_label(dentry, mnt, curracl);
56497 + retval = currobj->mode & mode;
56498 +
56499 + /* if we're opening a specified transfer file for writing
56500 + (e.g. /dev/initctl), then transfer our role to init
56501 + */
56502 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
56503 + current->role->roletype & GR_ROLE_PERSIST)) {
56504 + struct task_struct *task = init_pid_ns.child_reaper;
56505 +
56506 + if (task->role != current->role) {
56507 + task->acl_sp_role = 0;
56508 + task->acl_role_id = current->acl_role_id;
56509 + task->role = current->role;
56510 + rcu_read_lock();
56511 + read_lock(&grsec_exec_file_lock);
56512 + gr_apply_subject_to_task(task);
56513 + read_unlock(&grsec_exec_file_lock);
56514 + rcu_read_unlock();
56515 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
56516 + }
56517 + }
56518 +
56519 + if (unlikely
56520 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
56521 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
56522 + __u32 new_mode = mode;
56523 +
56524 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
56525 +
56526 + retval = new_mode;
56527 +
56528 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
56529 + new_mode |= GR_INHERIT;
56530 +
56531 + if (!(mode & GR_NOLEARN))
56532 + gr_log_learn(dentry, mnt, new_mode);
56533 + }
56534 +
56535 + return retval;
56536 +}
56537 +
56538 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
56539 + const struct dentry *parent,
56540 + const struct vfsmount *mnt)
56541 +{
56542 + struct name_entry *match;
56543 + struct acl_object_label *matchpo;
56544 + struct acl_subject_label *curracl;
56545 + char *path;
56546 +
56547 + if (unlikely(!(gr_status & GR_READY)))
56548 + return NULL;
56549 +
56550 + preempt_disable();
56551 + path = gr_to_filename_rbac(new_dentry, mnt);
56552 + match = lookup_name_entry_create(path);
56553 +
56554 + curracl = current->acl;
56555 +
56556 + if (match) {
56557 + read_lock(&gr_inode_lock);
56558 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
56559 + read_unlock(&gr_inode_lock);
56560 +
56561 + if (matchpo) {
56562 + preempt_enable();
56563 + return matchpo;
56564 + }
56565 + }
56566 +
56567 + // lookup parent
56568 +
56569 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
56570 +
56571 + preempt_enable();
56572 + return matchpo;
56573 +}
56574 +
56575 +__u32
56576 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
56577 + const struct vfsmount * mnt, const __u32 mode)
56578 +{
56579 + struct acl_object_label *matchpo;
56580 + __u32 retval;
56581 +
56582 + if (unlikely(!(gr_status & GR_READY)))
56583 + return (mode & ~GR_AUDITS);
56584 +
56585 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
56586 +
56587 + retval = matchpo->mode & mode;
56588 +
56589 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
56590 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
56591 + __u32 new_mode = mode;
56592 +
56593 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
56594 +
56595 + gr_log_learn(new_dentry, mnt, new_mode);
56596 + return new_mode;
56597 + }
56598 +
56599 + return retval;
56600 +}
56601 +
56602 +__u32
56603 +gr_check_link(const struct dentry * new_dentry,
56604 + const struct dentry * parent_dentry,
56605 + const struct vfsmount * parent_mnt,
56606 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
56607 +{
56608 + struct acl_object_label *obj;
56609 + __u32 oldmode, newmode;
56610 + __u32 needmode;
56611 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
56612 + GR_DELETE | GR_INHERIT;
56613 +
56614 + if (unlikely(!(gr_status & GR_READY)))
56615 + return (GR_CREATE | GR_LINK);
56616 +
56617 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
56618 + oldmode = obj->mode;
56619 +
56620 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
56621 + newmode = obj->mode;
56622 +
56623 + needmode = newmode & checkmodes;
56624 +
56625 + // old name for hardlink must have at least the permissions of the new name
56626 + if ((oldmode & needmode) != needmode)
56627 + goto bad;
56628 +
56629 + // if old name had restrictions/auditing, make sure the new name does as well
56630 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
56631 +
56632 + // don't allow hardlinking of suid/sgid/fcapped files without permission
56633 + if (is_privileged_binary(old_dentry))
56634 + needmode |= GR_SETID;
56635 +
56636 + if ((newmode & needmode) != needmode)
56637 + goto bad;
56638 +
56639 + // enforce minimum permissions
56640 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
56641 + return newmode;
56642 +bad:
56643 + needmode = oldmode;
56644 + if (is_privileged_binary(old_dentry))
56645 + needmode |= GR_SETID;
56646 +
56647 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
56648 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
56649 + return (GR_CREATE | GR_LINK);
56650 + } else if (newmode & GR_SUPPRESS)
56651 + return GR_SUPPRESS;
56652 + else
56653 + return 0;
56654 +}
56655 +
56656 +int
56657 +gr_check_hidden_task(const struct task_struct *task)
56658 +{
56659 + if (unlikely(!(gr_status & GR_READY)))
56660 + return 0;
56661 +
56662 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
56663 + return 1;
56664 +
56665 + return 0;
56666 +}
56667 +
56668 +int
56669 +gr_check_protected_task(const struct task_struct *task)
56670 +{
56671 + if (unlikely(!(gr_status & GR_READY) || !task))
56672 + return 0;
56673 +
56674 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
56675 + task->acl != current->acl)
56676 + return 1;
56677 +
56678 + return 0;
56679 +}
56680 +
56681 +int
56682 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56683 +{
56684 + struct task_struct *p;
56685 + int ret = 0;
56686 +
56687 + if (unlikely(!(gr_status & GR_READY) || !pid))
56688 + return ret;
56689 +
56690 + read_lock(&tasklist_lock);
56691 + do_each_pid_task(pid, type, p) {
56692 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
56693 + p->acl != current->acl) {
56694 + ret = 1;
56695 + goto out;
56696 + }
56697 + } while_each_pid_task(pid, type, p);
56698 +out:
56699 + read_unlock(&tasklist_lock);
56700 +
56701 + return ret;
56702 +}
56703 +
56704 +void
56705 +gr_copy_label(struct task_struct *tsk)
56706 +{
56707 + tsk->signal->used_accept = 0;
56708 + tsk->acl_sp_role = 0;
56709 + tsk->acl_role_id = current->acl_role_id;
56710 + tsk->acl = current->acl;
56711 + tsk->role = current->role;
56712 + tsk->signal->curr_ip = current->signal->curr_ip;
56713 + tsk->signal->saved_ip = current->signal->saved_ip;
56714 + if (current->exec_file)
56715 + get_file(current->exec_file);
56716 + tsk->exec_file = current->exec_file;
56717 + tsk->is_writable = current->is_writable;
56718 + if (unlikely(current->signal->used_accept)) {
56719 + current->signal->curr_ip = 0;
56720 + current->signal->saved_ip = 0;
56721 + }
56722 +
56723 + return;
56724 +}
56725 +
56726 +static void
56727 +gr_set_proc_res(struct task_struct *task)
56728 +{
56729 + struct acl_subject_label *proc;
56730 + unsigned short i;
56731 +
56732 + proc = task->acl;
56733 +
56734 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
56735 + return;
56736 +
56737 + for (i = 0; i < RLIM_NLIMITS; i++) {
56738 + if (!(proc->resmask & (1 << i)))
56739 + continue;
56740 +
56741 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
56742 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
56743 + }
56744 +
56745 + return;
56746 +}
56747 +
56748 +extern int __gr_process_user_ban(struct user_struct *user);
56749 +
56750 +int
56751 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
56752 +{
56753 + unsigned int i;
56754 + __u16 num;
56755 + uid_t *uidlist;
56756 + uid_t curuid;
56757 + int realok = 0;
56758 + int effectiveok = 0;
56759 + int fsok = 0;
56760 + uid_t globalreal, globaleffective, globalfs;
56761 +
56762 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56763 + struct user_struct *user;
56764 +
56765 + if (!uid_valid(real))
56766 + goto skipit;
56767 +
56768 + /* find user based on global namespace */
56769 +
56770 + globalreal = GR_GLOBAL_UID(real);
56771 +
56772 + user = find_user(make_kuid(&init_user_ns, globalreal));
56773 + if (user == NULL)
56774 + goto skipit;
56775 +
56776 + if (__gr_process_user_ban(user)) {
56777 + /* for find_user */
56778 + free_uid(user);
56779 + return 1;
56780 + }
56781 +
56782 + /* for find_user */
56783 + free_uid(user);
56784 +
56785 +skipit:
56786 +#endif
56787 +
56788 + if (unlikely(!(gr_status & GR_READY)))
56789 + return 0;
56790 +
56791 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
56792 + gr_log_learn_uid_change(real, effective, fs);
56793 +
56794 + num = current->acl->user_trans_num;
56795 + uidlist = current->acl->user_transitions;
56796 +
56797 + if (uidlist == NULL)
56798 + return 0;
56799 +
56800 + if (!uid_valid(real)) {
56801 + realok = 1;
56802 + globalreal = (uid_t)-1;
56803 + } else {
56804 + globalreal = GR_GLOBAL_UID(real);
56805 + }
56806 + if (!uid_valid(effective)) {
56807 + effectiveok = 1;
56808 + globaleffective = (uid_t)-1;
56809 + } else {
56810 + globaleffective = GR_GLOBAL_UID(effective);
56811 + }
56812 + if (!uid_valid(fs)) {
56813 + fsok = 1;
56814 + globalfs = (uid_t)-1;
56815 + } else {
56816 + globalfs = GR_GLOBAL_UID(fs);
56817 + }
56818 +
56819 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
56820 + for (i = 0; i < num; i++) {
56821 + curuid = uidlist[i];
56822 + if (globalreal == curuid)
56823 + realok = 1;
56824 + if (globaleffective == curuid)
56825 + effectiveok = 1;
56826 + if (globalfs == curuid)
56827 + fsok = 1;
56828 + }
56829 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
56830 + for (i = 0; i < num; i++) {
56831 + curuid = uidlist[i];
56832 + if (globalreal == curuid)
56833 + break;
56834 + if (globaleffective == curuid)
56835 + break;
56836 + if (globalfs == curuid)
56837 + break;
56838 + }
56839 + /* not in deny list */
56840 + if (i == num) {
56841 + realok = 1;
56842 + effectiveok = 1;
56843 + fsok = 1;
56844 + }
56845 + }
56846 +
56847 + if (realok && effectiveok && fsok)
56848 + return 0;
56849 + else {
56850 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
56851 + return 1;
56852 + }
56853 +}
56854 +
56855 +int
56856 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
56857 +{
56858 + unsigned int i;
56859 + __u16 num;
56860 + gid_t *gidlist;
56861 + gid_t curgid;
56862 + int realok = 0;
56863 + int effectiveok = 0;
56864 + int fsok = 0;
56865 + gid_t globalreal, globaleffective, globalfs;
56866 +
56867 + if (unlikely(!(gr_status & GR_READY)))
56868 + return 0;
56869 +
56870 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
56871 + gr_log_learn_gid_change(real, effective, fs);
56872 +
56873 + num = current->acl->group_trans_num;
56874 + gidlist = current->acl->group_transitions;
56875 +
56876 + if (gidlist == NULL)
56877 + return 0;
56878 +
56879 + if (!gid_valid(real)) {
56880 + realok = 1;
56881 + globalreal = (gid_t)-1;
56882 + } else {
56883 + globalreal = GR_GLOBAL_GID(real);
56884 + }
56885 + if (!gid_valid(effective)) {
56886 + effectiveok = 1;
56887 + globaleffective = (gid_t)-1;
56888 + } else {
56889 + globaleffective = GR_GLOBAL_GID(effective);
56890 + }
56891 + if (!gid_valid(fs)) {
56892 + fsok = 1;
56893 + globalfs = (gid_t)-1;
56894 + } else {
56895 + globalfs = GR_GLOBAL_GID(fs);
56896 + }
56897 +
56898 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
56899 + for (i = 0; i < num; i++) {
56900 + curgid = gidlist[i];
56901 + if (globalreal == curgid)
56902 + realok = 1;
56903 + if (globaleffective == curgid)
56904 + effectiveok = 1;
56905 + if (globalfs == curgid)
56906 + fsok = 1;
56907 + }
56908 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
56909 + for (i = 0; i < num; i++) {
56910 + curgid = gidlist[i];
56911 + if (globalreal == curgid)
56912 + break;
56913 + if (globaleffective == curgid)
56914 + break;
56915 + if (globalfs == curgid)
56916 + break;
56917 + }
56918 + /* not in deny list */
56919 + if (i == num) {
56920 + realok = 1;
56921 + effectiveok = 1;
56922 + fsok = 1;
56923 + }
56924 + }
56925 +
56926 + if (realok && effectiveok && fsok)
56927 + return 0;
56928 + else {
56929 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
56930 + return 1;
56931 + }
56932 +}
56933 +
56934 +extern int gr_acl_is_capable(const int cap);
56935 +
56936 +void
56937 +gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
56938 +{
56939 + struct acl_role_label *role = task->role;
56940 + struct acl_subject_label *subj = NULL;
56941 + struct acl_object_label *obj;
56942 + struct file *filp;
56943 + uid_t uid;
56944 + gid_t gid;
56945 +
56946 + if (unlikely(!(gr_status & GR_READY)))
56947 + return;
56948 +
56949 + uid = GR_GLOBAL_UID(kuid);
56950 + gid = GR_GLOBAL_GID(kgid);
56951 +
56952 + filp = task->exec_file;
56953 +
56954 + /* kernel process, we'll give them the kernel role */
56955 + if (unlikely(!filp)) {
56956 + task->role = kernel_role;
56957 + task->acl = kernel_role->root_label;
56958 + return;
56959 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
56960 + role = lookup_acl_role_label(task, uid, gid);
56961 +
56962 + /* don't change the role if we're not a privileged process */
56963 + if (role && task->role != role &&
56964 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
56965 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
56966 + return;
56967 +
56968 + /* perform subject lookup in possibly new role
56969 + we can use this result below in the case where role == task->role
56970 + */
56971 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
56972 +
56973 + /* if we changed uid/gid, but result in the same role
56974 + and are using inheritance, don't lose the inherited subject
56975 + if current subject is other than what normal lookup
56976 + would result in, we arrived via inheritance, don't
56977 + lose subject
56978 + */
56979 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
56980 + (subj == task->acl)))
56981 + task->acl = subj;
56982 +
56983 + task->role = role;
56984 +
56985 + task->is_writable = 0;
56986 +
56987 + /* ignore additional mmap checks for processes that are writable
56988 + by the default ACL */
56989 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56990 + if (unlikely(obj->mode & GR_WRITE))
56991 + task->is_writable = 1;
56992 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
56993 + if (unlikely(obj->mode & GR_WRITE))
56994 + task->is_writable = 1;
56995 +
56996 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56997 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
56998 +#endif
56999 +
57000 + gr_set_proc_res(task);
57001 +
57002 + return;
57003 +}
57004 +
57005 +int
57006 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57007 + const int unsafe_flags)
57008 +{
57009 + struct task_struct *task = current;
57010 + struct acl_subject_label *newacl;
57011 + struct acl_object_label *obj;
57012 + __u32 retmode;
57013 +
57014 + if (unlikely(!(gr_status & GR_READY)))
57015 + return 0;
57016 +
57017 + newacl = chk_subj_label(dentry, mnt, task->role);
57018 +
57019 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
57020 + did an exec
57021 + */
57022 + rcu_read_lock();
57023 + read_lock(&tasklist_lock);
57024 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
57025 + (task->parent->acl->mode & GR_POVERRIDE))) {
57026 + read_unlock(&tasklist_lock);
57027 + rcu_read_unlock();
57028 + goto skip_check;
57029 + }
57030 + read_unlock(&tasklist_lock);
57031 + rcu_read_unlock();
57032 +
57033 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
57034 + !(task->role->roletype & GR_ROLE_GOD) &&
57035 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
57036 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
57037 + if (unsafe_flags & LSM_UNSAFE_SHARE)
57038 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
57039 + else
57040 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
57041 + return -EACCES;
57042 + }
57043 +
57044 +skip_check:
57045 +
57046 + obj = chk_obj_label(dentry, mnt, task->acl);
57047 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
57048 +
57049 + if (!(task->acl->mode & GR_INHERITLEARN) &&
57050 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
57051 + if (obj->nested)
57052 + task->acl = obj->nested;
57053 + else
57054 + task->acl = newacl;
57055 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
57056 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
57057 +
57058 + task->is_writable = 0;
57059 +
57060 + /* ignore additional mmap checks for processes that are writable
57061 + by the default ACL */
57062 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
57063 + if (unlikely(obj->mode & GR_WRITE))
57064 + task->is_writable = 1;
57065 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
57066 + if (unlikely(obj->mode & GR_WRITE))
57067 + task->is_writable = 1;
57068 +
57069 + gr_set_proc_res(task);
57070 +
57071 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57072 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57073 +#endif
57074 + return 0;
57075 +}
57076 +
57077 +/* always called with valid inodev ptr */
57078 +static void
57079 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
57080 +{
57081 + struct acl_object_label *matchpo;
57082 + struct acl_subject_label *matchps;
57083 + struct acl_subject_label *subj;
57084 + struct acl_role_label *role;
57085 + unsigned int x;
57086 +
57087 + FOR_EACH_ROLE_START(role)
57088 + FOR_EACH_SUBJECT_START(role, subj, x)
57089 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
57090 + matchpo->mode |= GR_DELETED;
57091 + FOR_EACH_SUBJECT_END(subj,x)
57092 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
57093 + /* nested subjects aren't in the role's subj_hash table */
57094 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
57095 + matchpo->mode |= GR_DELETED;
57096 + FOR_EACH_NESTED_SUBJECT_END(subj)
57097 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
57098 + matchps->mode |= GR_DELETED;
57099 + FOR_EACH_ROLE_END(role)
57100 +
57101 + inodev->nentry->deleted = 1;
57102 +
57103 + return;
57104 +}
57105 +
57106 +void
57107 +gr_handle_delete(const ino_t ino, const dev_t dev)
57108 +{
57109 + struct inodev_entry *inodev;
57110 +
57111 + if (unlikely(!(gr_status & GR_READY)))
57112 + return;
57113 +
57114 + write_lock(&gr_inode_lock);
57115 + inodev = lookup_inodev_entry(ino, dev);
57116 + if (inodev != NULL)
57117 + do_handle_delete(inodev, ino, dev);
57118 + write_unlock(&gr_inode_lock);
57119 +
57120 + return;
57121 +}
57122 +
57123 +static void
57124 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
57125 + const ino_t newinode, const dev_t newdevice,
57126 + struct acl_subject_label *subj)
57127 +{
57128 + unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
57129 + struct acl_object_label *match;
57130 +
57131 + match = subj->obj_hash[index];
57132 +
57133 + while (match && (match->inode != oldinode ||
57134 + match->device != olddevice ||
57135 + !(match->mode & GR_DELETED)))
57136 + match = match->next;
57137 +
57138 + if (match && (match->inode == oldinode)
57139 + && (match->device == olddevice)
57140 + && (match->mode & GR_DELETED)) {
57141 + if (match->prev == NULL) {
57142 + subj->obj_hash[index] = match->next;
57143 + if (match->next != NULL)
57144 + match->next->prev = NULL;
57145 + } else {
57146 + match->prev->next = match->next;
57147 + if (match->next != NULL)
57148 + match->next->prev = match->prev;
57149 + }
57150 + match->prev = NULL;
57151 + match->next = NULL;
57152 + match->inode = newinode;
57153 + match->device = newdevice;
57154 + match->mode &= ~GR_DELETED;
57155 +
57156 + insert_acl_obj_label(match, subj);
57157 + }
57158 +
57159 + return;
57160 +}
57161 +
57162 +static void
57163 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
57164 + const ino_t newinode, const dev_t newdevice,
57165 + struct acl_role_label *role)
57166 +{
57167 + unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
57168 + struct acl_subject_label *match;
57169 +
57170 + match = role->subj_hash[index];
57171 +
57172 + while (match && (match->inode != oldinode ||
57173 + match->device != olddevice ||
57174 + !(match->mode & GR_DELETED)))
57175 + match = match->next;
57176 +
57177 + if (match && (match->inode == oldinode)
57178 + && (match->device == olddevice)
57179 + && (match->mode & GR_DELETED)) {
57180 + if (match->prev == NULL) {
57181 + role->subj_hash[index] = match->next;
57182 + if (match->next != NULL)
57183 + match->next->prev = NULL;
57184 + } else {
57185 + match->prev->next = match->next;
57186 + if (match->next != NULL)
57187 + match->next->prev = match->prev;
57188 + }
57189 + match->prev = NULL;
57190 + match->next = NULL;
57191 + match->inode = newinode;
57192 + match->device = newdevice;
57193 + match->mode &= ~GR_DELETED;
57194 +
57195 + insert_acl_subj_label(match, role);
57196 + }
57197 +
57198 + return;
57199 +}
57200 +
57201 +static void
57202 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
57203 + const ino_t newinode, const dev_t newdevice)
57204 +{
57205 + unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
57206 + struct inodev_entry *match;
57207 +
57208 + match = inodev_set.i_hash[index];
57209 +
57210 + while (match && (match->nentry->inode != oldinode ||
57211 + match->nentry->device != olddevice || !match->nentry->deleted))
57212 + match = match->next;
57213 +
57214 + if (match && (match->nentry->inode == oldinode)
57215 + && (match->nentry->device == olddevice) &&
57216 + match->nentry->deleted) {
57217 + if (match->prev == NULL) {
57218 + inodev_set.i_hash[index] = match->next;
57219 + if (match->next != NULL)
57220 + match->next->prev = NULL;
57221 + } else {
57222 + match->prev->next = match->next;
57223 + if (match->next != NULL)
57224 + match->next->prev = match->prev;
57225 + }
57226 + match->prev = NULL;
57227 + match->next = NULL;
57228 + match->nentry->inode = newinode;
57229 + match->nentry->device = newdevice;
57230 + match->nentry->deleted = 0;
57231 +
57232 + insert_inodev_entry(match);
57233 + }
57234 +
57235 + return;
57236 +}
57237 +
57238 +static void
57239 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
57240 +{
57241 + struct acl_subject_label *subj;
57242 + struct acl_role_label *role;
57243 + unsigned int x;
57244 +
57245 + FOR_EACH_ROLE_START(role)
57246 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
57247 +
57248 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
57249 + if ((subj->inode == ino) && (subj->device == dev)) {
57250 + subj->inode = ino;
57251 + subj->device = dev;
57252 + }
57253 + /* nested subjects aren't in the role's subj_hash table */
57254 + update_acl_obj_label(matchn->inode, matchn->device,
57255 + ino, dev, subj);
57256 + FOR_EACH_NESTED_SUBJECT_END(subj)
57257 + FOR_EACH_SUBJECT_START(role, subj, x)
57258 + update_acl_obj_label(matchn->inode, matchn->device,
57259 + ino, dev, subj);
57260 + FOR_EACH_SUBJECT_END(subj,x)
57261 + FOR_EACH_ROLE_END(role)
57262 +
57263 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
57264 +
57265 + return;
57266 +}
57267 +
57268 +static void
57269 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
57270 + const struct vfsmount *mnt)
57271 +{
57272 + ino_t ino = dentry->d_inode->i_ino;
57273 + dev_t dev = __get_dev(dentry);
57274 +
57275 + __do_handle_create(matchn, ino, dev);
57276 +
57277 + return;
57278 +}
57279 +
57280 +void
57281 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57282 +{
57283 + struct name_entry *matchn;
57284 +
57285 + if (unlikely(!(gr_status & GR_READY)))
57286 + return;
57287 +
57288 + preempt_disable();
57289 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
57290 +
57291 + if (unlikely((unsigned long)matchn)) {
57292 + write_lock(&gr_inode_lock);
57293 + do_handle_create(matchn, dentry, mnt);
57294 + write_unlock(&gr_inode_lock);
57295 + }
57296 + preempt_enable();
57297 +
57298 + return;
57299 +}
57300 +
57301 +void
57302 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57303 +{
57304 + struct name_entry *matchn;
57305 +
57306 + if (unlikely(!(gr_status & GR_READY)))
57307 + return;
57308 +
57309 + preempt_disable();
57310 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
57311 +
57312 + if (unlikely((unsigned long)matchn)) {
57313 + write_lock(&gr_inode_lock);
57314 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
57315 + write_unlock(&gr_inode_lock);
57316 + }
57317 + preempt_enable();
57318 +
57319 + return;
57320 +}
57321 +
57322 +void
57323 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57324 + struct dentry *old_dentry,
57325 + struct dentry *new_dentry,
57326 + struct vfsmount *mnt, const __u8 replace)
57327 +{
57328 + struct name_entry *matchn;
57329 + struct inodev_entry *inodev;
57330 + struct inode *inode = new_dentry->d_inode;
57331 + ino_t old_ino = old_dentry->d_inode->i_ino;
57332 + dev_t old_dev = __get_dev(old_dentry);
57333 +
57334 + /* vfs_rename swaps the name and parent link for old_dentry and
57335 + new_dentry
57336 + at this point, old_dentry has the new name, parent link, and inode
57337 + for the renamed file
57338 + if a file is being replaced by a rename, new_dentry has the inode
57339 + and name for the replaced file
57340 + */
57341 +
57342 + if (unlikely(!(gr_status & GR_READY)))
57343 + return;
57344 +
57345 + preempt_disable();
57346 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
57347 +
57348 + /* we wouldn't have to check d_inode if it weren't for
57349 + NFS silly-renaming
57350 + */
57351 +
57352 + write_lock(&gr_inode_lock);
57353 + if (unlikely(replace && inode)) {
57354 + ino_t new_ino = inode->i_ino;
57355 + dev_t new_dev = __get_dev(new_dentry);
57356 +
57357 + inodev = lookup_inodev_entry(new_ino, new_dev);
57358 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
57359 + do_handle_delete(inodev, new_ino, new_dev);
57360 + }
57361 +
57362 + inodev = lookup_inodev_entry(old_ino, old_dev);
57363 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
57364 + do_handle_delete(inodev, old_ino, old_dev);
57365 +
57366 + if (unlikely((unsigned long)matchn))
57367 + do_handle_create(matchn, old_dentry, mnt);
57368 +
57369 + write_unlock(&gr_inode_lock);
57370 + preempt_enable();
57371 +
57372 + return;
57373 +}
57374 +
57375 +static int
57376 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
57377 + unsigned char **sum)
57378 +{
57379 + struct acl_role_label *r;
57380 + struct role_allowed_ip *ipp;
57381 + struct role_transition *trans;
57382 + unsigned int i;
57383 + int found = 0;
57384 + u32 curr_ip = current->signal->curr_ip;
57385 +
57386 + current->signal->saved_ip = curr_ip;
57387 +
57388 + /* check transition table */
57389 +
57390 + for (trans = current->role->transitions; trans; trans = trans->next) {
57391 + if (!strcmp(rolename, trans->rolename)) {
57392 + found = 1;
57393 + break;
57394 + }
57395 + }
57396 +
57397 + if (!found)
57398 + return 0;
57399 +
57400 + /* handle special roles that do not require authentication
57401 + and check ip */
57402 +
57403 + FOR_EACH_ROLE_START(r)
57404 + if (!strcmp(rolename, r->rolename) &&
57405 + (r->roletype & GR_ROLE_SPECIAL)) {
57406 + found = 0;
57407 + if (r->allowed_ips != NULL) {
57408 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
57409 + if ((ntohl(curr_ip) & ipp->netmask) ==
57410 + (ntohl(ipp->addr) & ipp->netmask))
57411 + found = 1;
57412 + }
57413 + } else
57414 + found = 2;
57415 + if (!found)
57416 + return 0;
57417 +
57418 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
57419 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
57420 + *salt = NULL;
57421 + *sum = NULL;
57422 + return 1;
57423 + }
57424 + }
57425 + FOR_EACH_ROLE_END(r)
57426 +
57427 + for (i = 0; i < num_sprole_pws; i++) {
57428 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
57429 + *salt = acl_special_roles[i]->salt;
57430 + *sum = acl_special_roles[i]->sum;
57431 + return 1;
57432 + }
57433 + }
57434 +
57435 + return 0;
57436 +}
57437 +
57438 +static void
57439 +assign_special_role(char *rolename)
57440 +{
57441 + struct acl_object_label *obj;
57442 + struct acl_role_label *r;
57443 + struct acl_role_label *assigned = NULL;
57444 + struct task_struct *tsk;
57445 + struct file *filp;
57446 +
57447 + FOR_EACH_ROLE_START(r)
57448 + if (!strcmp(rolename, r->rolename) &&
57449 + (r->roletype & GR_ROLE_SPECIAL)) {
57450 + assigned = r;
57451 + break;
57452 + }
57453 + FOR_EACH_ROLE_END(r)
57454 +
57455 + if (!assigned)
57456 + return;
57457 +
57458 + read_lock(&tasklist_lock);
57459 + read_lock(&grsec_exec_file_lock);
57460 +
57461 + tsk = current->real_parent;
57462 + if (tsk == NULL)
57463 + goto out_unlock;
57464 +
57465 + filp = tsk->exec_file;
57466 + if (filp == NULL)
57467 + goto out_unlock;
57468 +
57469 + tsk->is_writable = 0;
57470 +
57471 + tsk->acl_sp_role = 1;
57472 + tsk->acl_role_id = ++acl_sp_role_value;
57473 + tsk->role = assigned;
57474 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
57475 +
57476 + /* ignore additional mmap checks for processes that are writable
57477 + by the default ACL */
57478 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57479 + if (unlikely(obj->mode & GR_WRITE))
57480 + tsk->is_writable = 1;
57481 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
57482 + if (unlikely(obj->mode & GR_WRITE))
57483 + tsk->is_writable = 1;
57484 +
57485 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57486 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
57487 +#endif
57488 +
57489 +out_unlock:
57490 + read_unlock(&grsec_exec_file_lock);
57491 + read_unlock(&tasklist_lock);
57492 + return;
57493 +}
57494 +
57495 +int gr_check_secure_terminal(struct task_struct *task)
57496 +{
57497 + struct task_struct *p, *p2, *p3;
57498 + struct files_struct *files;
57499 + struct fdtable *fdt;
57500 + struct file *our_file = NULL, *file;
57501 + int i;
57502 +
57503 + if (task->signal->tty == NULL)
57504 + return 1;
57505 +
57506 + files = get_files_struct(task);
57507 + if (files != NULL) {
57508 + rcu_read_lock();
57509 + fdt = files_fdtable(files);
57510 + for (i=0; i < fdt->max_fds; i++) {
57511 + file = fcheck_files(files, i);
57512 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
57513 + get_file(file);
57514 + our_file = file;
57515 + }
57516 + }
57517 + rcu_read_unlock();
57518 + put_files_struct(files);
57519 + }
57520 +
57521 + if (our_file == NULL)
57522 + return 1;
57523 +
57524 + read_lock(&tasklist_lock);
57525 + do_each_thread(p2, p) {
57526 + files = get_files_struct(p);
57527 + if (files == NULL ||
57528 + (p->signal && p->signal->tty == task->signal->tty)) {
57529 + if (files != NULL)
57530 + put_files_struct(files);
57531 + continue;
57532 + }
57533 + rcu_read_lock();
57534 + fdt = files_fdtable(files);
57535 + for (i=0; i < fdt->max_fds; i++) {
57536 + file = fcheck_files(files, i);
57537 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
57538 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
57539 + p3 = task;
57540 + while (p3->pid > 0) {
57541 + if (p3 == p)
57542 + break;
57543 + p3 = p3->real_parent;
57544 + }
57545 + if (p3 == p)
57546 + break;
57547 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
57548 + gr_handle_alertkill(p);
57549 + rcu_read_unlock();
57550 + put_files_struct(files);
57551 + read_unlock(&tasklist_lock);
57552 + fput(our_file);
57553 + return 0;
57554 + }
57555 + }
57556 + rcu_read_unlock();
57557 + put_files_struct(files);
57558 + } while_each_thread(p2, p);
57559 + read_unlock(&tasklist_lock);
57560 +
57561 + fput(our_file);
57562 + return 1;
57563 +}
57564 +
57565 +static int gr_rbac_disable(void *unused)
57566 +{
57567 + pax_open_kernel();
57568 + gr_status &= ~GR_READY;
57569 + pax_close_kernel();
57570 +
57571 + return 0;
57572 +}
57573 +
57574 +ssize_t
57575 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
57576 +{
57577 + struct gr_arg_wrapper uwrap;
57578 + unsigned char *sprole_salt = NULL;
57579 + unsigned char *sprole_sum = NULL;
57580 + int error = sizeof (struct gr_arg_wrapper);
57581 + int error2 = 0;
57582 +
57583 + mutex_lock(&gr_dev_mutex);
57584 +
57585 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
57586 + error = -EPERM;
57587 + goto out;
57588 + }
57589 +
57590 + if (count != sizeof (struct gr_arg_wrapper)) {
57591 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
57592 + error = -EINVAL;
57593 + goto out;
57594 + }
57595 +
57596 +
57597 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
57598 + gr_auth_expires = 0;
57599 + gr_auth_attempts = 0;
57600 + }
57601 +
57602 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
57603 + error = -EFAULT;
57604 + goto out;
57605 + }
57606 +
57607 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
57608 + error = -EINVAL;
57609 + goto out;
57610 + }
57611 +
57612 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
57613 + error = -EFAULT;
57614 + goto out;
57615 + }
57616 +
57617 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
57618 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
57619 + time_after(gr_auth_expires, get_seconds())) {
57620 + error = -EBUSY;
57621 + goto out;
57622 + }
57623 +
57624 + /* if non-root trying to do anything other than use a special role,
57625 + do not attempt authentication, do not count towards authentication
57626 + locking
57627 + */
57628 +
57629 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
57630 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
57631 + !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
57632 + error = -EPERM;
57633 + goto out;
57634 + }
57635 +
57636 + /* ensure pw and special role name are null terminated */
57637 +
57638 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
57639 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
57640 +
57641 + /* Okay.
57642 + * We have our enough of the argument structure..(we have yet
57643 + * to copy_from_user the tables themselves) . Copy the tables
57644 + * only if we need them, i.e. for loading operations. */
57645 +
57646 + switch (gr_usermode->mode) {
57647 + case GR_STATUS:
57648 + if (gr_status & GR_READY) {
57649 + error = 1;
57650 + if (!gr_check_secure_terminal(current))
57651 + error = 3;
57652 + } else
57653 + error = 2;
57654 + goto out;
57655 + case GR_SHUTDOWN:
57656 + if ((gr_status & GR_READY)
57657 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
57658 + stop_machine(gr_rbac_disable, NULL, NULL);
57659 + free_variables();
57660 + memset(gr_usermode, 0, sizeof (struct gr_arg));
57661 + memset(gr_system_salt, 0, GR_SALT_LEN);
57662 + memset(gr_system_sum, 0, GR_SHA_LEN);
57663 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
57664 + } else if (gr_status & GR_READY) {
57665 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
57666 + error = -EPERM;
57667 + } else {
57668 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
57669 + error = -EAGAIN;
57670 + }
57671 + break;
57672 + case GR_ENABLE:
57673 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
57674 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
57675 + else {
57676 + if (gr_status & GR_READY)
57677 + error = -EAGAIN;
57678 + else
57679 + error = error2;
57680 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
57681 + }
57682 + break;
57683 + case GR_RELOAD:
57684 + if (!(gr_status & GR_READY)) {
57685 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
57686 + error = -EAGAIN;
57687 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
57688 + stop_machine(gr_rbac_disable, NULL, NULL);
57689 + free_variables();
57690 + error2 = gracl_init(gr_usermode);
57691 + if (!error2)
57692 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
57693 + else {
57694 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
57695 + error = error2;
57696 + }
57697 + } else {
57698 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
57699 + error = -EPERM;
57700 + }
57701 + break;
57702 + case GR_SEGVMOD:
57703 + if (unlikely(!(gr_status & GR_READY))) {
57704 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
57705 + error = -EAGAIN;
57706 + break;
57707 + }
57708 +
57709 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
57710 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
57711 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
57712 + struct acl_subject_label *segvacl;
57713 + segvacl =
57714 + lookup_acl_subj_label(gr_usermode->segv_inode,
57715 + gr_usermode->segv_device,
57716 + current->role);
57717 + if (segvacl) {
57718 + segvacl->crashes = 0;
57719 + segvacl->expires = 0;
57720 + }
57721 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
57722 + gr_remove_uid(gr_usermode->segv_uid);
57723 + }
57724 + } else {
57725 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
57726 + error = -EPERM;
57727 + }
57728 + break;
57729 + case GR_SPROLE:
57730 + case GR_SPROLEPAM:
57731 + if (unlikely(!(gr_status & GR_READY))) {
57732 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
57733 + error = -EAGAIN;
57734 + break;
57735 + }
57736 +
57737 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
57738 + current->role->expires = 0;
57739 + current->role->auth_attempts = 0;
57740 + }
57741 +
57742 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
57743 + time_after(current->role->expires, get_seconds())) {
57744 + error = -EBUSY;
57745 + goto out;
57746 + }
57747 +
57748 + if (lookup_special_role_auth
57749 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
57750 + && ((!sprole_salt && !sprole_sum)
57751 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
57752 + char *p = "";
57753 + assign_special_role(gr_usermode->sp_role);
57754 + read_lock(&tasklist_lock);
57755 + if (current->real_parent)
57756 + p = current->real_parent->role->rolename;
57757 + read_unlock(&tasklist_lock);
57758 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
57759 + p, acl_sp_role_value);
57760 + } else {
57761 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
57762 + error = -EPERM;
57763 + if(!(current->role->auth_attempts++))
57764 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
57765 +
57766 + goto out;
57767 + }
57768 + break;
57769 + case GR_UNSPROLE:
57770 + if (unlikely(!(gr_status & GR_READY))) {
57771 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
57772 + error = -EAGAIN;
57773 + break;
57774 + }
57775 +
57776 + if (current->role->roletype & GR_ROLE_SPECIAL) {
57777 + char *p = "";
57778 + int i = 0;
57779 +
57780 + read_lock(&tasklist_lock);
57781 + if (current->real_parent) {
57782 + p = current->real_parent->role->rolename;
57783 + i = current->real_parent->acl_role_id;
57784 + }
57785 + read_unlock(&tasklist_lock);
57786 +
57787 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
57788 + gr_set_acls(1);
57789 + } else {
57790 + error = -EPERM;
57791 + goto out;
57792 + }
57793 + break;
57794 + default:
57795 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
57796 + error = -EINVAL;
57797 + break;
57798 + }
57799 +
57800 + if (error != -EPERM)
57801 + goto out;
57802 +
57803 + if(!(gr_auth_attempts++))
57804 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
57805 +
57806 + out:
57807 + mutex_unlock(&gr_dev_mutex);
57808 + return error;
57809 +}
57810 +
57811 +/* must be called with
57812 + rcu_read_lock();
57813 + read_lock(&tasklist_lock);
57814 + read_lock(&grsec_exec_file_lock);
57815 +*/
57816 +int gr_apply_subject_to_task(struct task_struct *task)
57817 +{
57818 + struct acl_object_label *obj;
57819 + char *tmpname;
57820 + struct acl_subject_label *tmpsubj;
57821 + struct file *filp;
57822 + struct name_entry *nmatch;
57823 +
57824 + filp = task->exec_file;
57825 + if (filp == NULL)
57826 + return 0;
57827 +
57828 + /* the following is to apply the correct subject
57829 + on binaries running when the RBAC system
57830 + is enabled, when the binaries have been
57831 + replaced or deleted since their execution
57832 + -----
57833 + when the RBAC system starts, the inode/dev
57834 + from exec_file will be one the RBAC system
57835 + is unaware of. It only knows the inode/dev
57836 + of the present file on disk, or the absence
57837 + of it.
57838 + */
57839 + preempt_disable();
57840 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
57841 +
57842 + nmatch = lookup_name_entry(tmpname);
57843 + preempt_enable();
57844 + tmpsubj = NULL;
57845 + if (nmatch) {
57846 + if (nmatch->deleted)
57847 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
57848 + else
57849 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
57850 + if (tmpsubj != NULL)
57851 + task->acl = tmpsubj;
57852 + }
57853 + if (tmpsubj == NULL)
57854 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
57855 + task->role);
57856 + if (task->acl) {
57857 + task->is_writable = 0;
57858 + /* ignore additional mmap checks for processes that are writable
57859 + by the default ACL */
57860 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57861 + if (unlikely(obj->mode & GR_WRITE))
57862 + task->is_writable = 1;
57863 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
57864 + if (unlikely(obj->mode & GR_WRITE))
57865 + task->is_writable = 1;
57866 +
57867 + gr_set_proc_res(task);
57868 +
57869 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57870 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57871 +#endif
57872 + } else {
57873 + return 1;
57874 + }
57875 +
57876 + return 0;
57877 +}
57878 +
57879 +int
57880 +gr_set_acls(const int type)
57881 +{
57882 + struct task_struct *task, *task2;
57883 + struct acl_role_label *role = current->role;
57884 + __u16 acl_role_id = current->acl_role_id;
57885 + const struct cred *cred;
57886 + int ret;
57887 +
57888 + rcu_read_lock();
57889 + read_lock(&tasklist_lock);
57890 + read_lock(&grsec_exec_file_lock);
57891 + do_each_thread(task2, task) {
57892 + /* check to see if we're called from the exit handler,
57893 + if so, only replace ACLs that have inherited the admin
57894 + ACL */
57895 +
57896 + if (type && (task->role != role ||
57897 + task->acl_role_id != acl_role_id))
57898 + continue;
57899 +
57900 + task->acl_role_id = 0;
57901 + task->acl_sp_role = 0;
57902 +
57903 + if (task->exec_file) {
57904 + cred = __task_cred(task);
57905 + task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
57906 + ret = gr_apply_subject_to_task(task);
57907 + if (ret) {
57908 + read_unlock(&grsec_exec_file_lock);
57909 + read_unlock(&tasklist_lock);
57910 + rcu_read_unlock();
57911 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
57912 + return ret;
57913 + }
57914 + } else {
57915 + // it's a kernel process
57916 + task->role = kernel_role;
57917 + task->acl = kernel_role->root_label;
57918 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
57919 + task->acl->mode &= ~GR_PROCFIND;
57920 +#endif
57921 + }
57922 + } while_each_thread(task2, task);
57923 + read_unlock(&grsec_exec_file_lock);
57924 + read_unlock(&tasklist_lock);
57925 + rcu_read_unlock();
57926 +
57927 + return 0;
57928 +}
57929 +
57930 +#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
57931 +static const unsigned long res_learn_bumps[GR_NLIMITS] = {
57932 + [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
57933 + [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
57934 + [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
57935 + [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
57936 + [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
57937 + [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
57938 + [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
57939 + [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
57940 + [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
57941 + [RLIMIT_AS] = GR_RLIM_AS_BUMP,
57942 + [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
57943 + [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
57944 + [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
57945 + [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
57946 + [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
57947 + [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
57948 +};
57949 +
57950 +void
57951 +gr_learn_resource(const struct task_struct *task,
57952 + const int res, const unsigned long wanted, const int gt)
57953 +{
57954 + struct acl_subject_label *acl;
57955 + const struct cred *cred;
57956 +
57957 + if (unlikely((gr_status & GR_READY) &&
57958 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
57959 + goto skip_reslog;
57960 +
57961 + gr_log_resource(task, res, wanted, gt);
57962 +skip_reslog:
57963 +
57964 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
57965 + return;
57966 +
57967 + acl = task->acl;
57968 +
57969 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
57970 + !(acl->resmask & (1 << (unsigned short) res))))
57971 + return;
57972 +
57973 + if (wanted >= acl->res[res].rlim_cur) {
57974 + unsigned long res_add;
57975 +
57976 + res_add = wanted + res_learn_bumps[res];
57977 +
57978 + acl->res[res].rlim_cur = res_add;
57979 +
57980 + if (wanted > acl->res[res].rlim_max)
57981 + acl->res[res].rlim_max = res_add;
57982 +
57983 + /* only log the subject filename, since resource logging is supported for
57984 + single-subject learning only */
57985 + rcu_read_lock();
57986 + cred = __task_cred(task);
57987 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
57988 + task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
57989 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
57990 + "", (unsigned long) res, &task->signal->saved_ip);
57991 + rcu_read_unlock();
57992 + }
57993 +
57994 + return;
57995 +}
57996 +EXPORT_SYMBOL(gr_learn_resource);
57997 +#endif
57998 +
57999 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
58000 +void
58001 +pax_set_initial_flags(struct linux_binprm *bprm)
58002 +{
58003 + struct task_struct *task = current;
58004 + struct acl_subject_label *proc;
58005 + unsigned long flags;
58006 +
58007 + if (unlikely(!(gr_status & GR_READY)))
58008 + return;
58009 +
58010 + flags = pax_get_flags(task);
58011 +
58012 + proc = task->acl;
58013 +
58014 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
58015 + flags &= ~MF_PAX_PAGEEXEC;
58016 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
58017 + flags &= ~MF_PAX_SEGMEXEC;
58018 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
58019 + flags &= ~MF_PAX_RANDMMAP;
58020 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
58021 + flags &= ~MF_PAX_EMUTRAMP;
58022 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
58023 + flags &= ~MF_PAX_MPROTECT;
58024 +
58025 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
58026 + flags |= MF_PAX_PAGEEXEC;
58027 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
58028 + flags |= MF_PAX_SEGMEXEC;
58029 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
58030 + flags |= MF_PAX_RANDMMAP;
58031 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
58032 + flags |= MF_PAX_EMUTRAMP;
58033 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
58034 + flags |= MF_PAX_MPROTECT;
58035 +
58036 + pax_set_flags(task, flags);
58037 +
58038 + return;
58039 +}
58040 +#endif
58041 +
58042 +int
58043 +gr_handle_proc_ptrace(struct task_struct *task)
58044 +{
58045 + struct file *filp;
58046 + struct task_struct *tmp = task;
58047 + struct task_struct *curtemp = current;
58048 + __u32 retmode;
58049 +
58050 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
58051 + if (unlikely(!(gr_status & GR_READY)))
58052 + return 0;
58053 +#endif
58054 +
58055 + read_lock(&tasklist_lock);
58056 + read_lock(&grsec_exec_file_lock);
58057 + filp = task->exec_file;
58058 +
58059 + while (tmp->pid > 0) {
58060 + if (tmp == curtemp)
58061 + break;
58062 + tmp = tmp->real_parent;
58063 + }
58064 +
58065 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
58066 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
58067 + read_unlock(&grsec_exec_file_lock);
58068 + read_unlock(&tasklist_lock);
58069 + return 1;
58070 + }
58071 +
58072 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58073 + if (!(gr_status & GR_READY)) {
58074 + read_unlock(&grsec_exec_file_lock);
58075 + read_unlock(&tasklist_lock);
58076 + return 0;
58077 + }
58078 +#endif
58079 +
58080 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
58081 + read_unlock(&grsec_exec_file_lock);
58082 + read_unlock(&tasklist_lock);
58083 +
58084 + if (retmode & GR_NOPTRACE)
58085 + return 1;
58086 +
58087 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
58088 + && (current->acl != task->acl || (current->acl != current->role->root_label
58089 + && current->pid != task->pid)))
58090 + return 1;
58091 +
58092 + return 0;
58093 +}
58094 +
58095 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
58096 +{
58097 + if (unlikely(!(gr_status & GR_READY)))
58098 + return;
58099 +
58100 + if (!(current->role->roletype & GR_ROLE_GOD))
58101 + return;
58102 +
58103 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
58104 + p->role->rolename, gr_task_roletype_to_char(p),
58105 + p->acl->filename);
58106 +}
58107 +
58108 +int
58109 +gr_handle_ptrace(struct task_struct *task, const long request)
58110 +{
58111 + struct task_struct *tmp = task;
58112 + struct task_struct *curtemp = current;
58113 + __u32 retmode;
58114 +
58115 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
58116 + if (unlikely(!(gr_status & GR_READY)))
58117 + return 0;
58118 +#endif
58119 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
58120 + read_lock(&tasklist_lock);
58121 + while (tmp->pid > 0) {
58122 + if (tmp == curtemp)
58123 + break;
58124 + tmp = tmp->real_parent;
58125 + }
58126 +
58127 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
58128 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
58129 + read_unlock(&tasklist_lock);
58130 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
58131 + return 1;
58132 + }
58133 + read_unlock(&tasklist_lock);
58134 + }
58135 +
58136 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58137 + if (!(gr_status & GR_READY))
58138 + return 0;
58139 +#endif
58140 +
58141 + read_lock(&grsec_exec_file_lock);
58142 + if (unlikely(!task->exec_file)) {
58143 + read_unlock(&grsec_exec_file_lock);
58144 + return 0;
58145 + }
58146 +
58147 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
58148 + read_unlock(&grsec_exec_file_lock);
58149 +
58150 + if (retmode & GR_NOPTRACE) {
58151 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
58152 + return 1;
58153 + }
58154 +
58155 + if (retmode & GR_PTRACERD) {
58156 + switch (request) {
58157 + case PTRACE_SEIZE:
58158 + case PTRACE_POKETEXT:
58159 + case PTRACE_POKEDATA:
58160 + case PTRACE_POKEUSR:
58161 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
58162 + case PTRACE_SETREGS:
58163 + case PTRACE_SETFPREGS:
58164 +#endif
58165 +#ifdef CONFIG_X86
58166 + case PTRACE_SETFPXREGS:
58167 +#endif
58168 +#ifdef CONFIG_ALTIVEC
58169 + case PTRACE_SETVRREGS:
58170 +#endif
58171 + return 1;
58172 + default:
58173 + return 0;
58174 + }
58175 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
58176 + !(current->role->roletype & GR_ROLE_GOD) &&
58177 + (current->acl != task->acl)) {
58178 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
58179 + return 1;
58180 + }
58181 +
58182 + return 0;
58183 +}
58184 +
58185 +static int is_writable_mmap(const struct file *filp)
58186 +{
58187 + struct task_struct *task = current;
58188 + struct acl_object_label *obj, *obj2;
58189 +
58190 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
58191 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
58192 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58193 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
58194 + task->role->root_label);
58195 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
58196 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
58197 + return 1;
58198 + }
58199 + }
58200 + return 0;
58201 +}
58202 +
58203 +int
58204 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
58205 +{
58206 + __u32 mode;
58207 +
58208 + if (unlikely(!file || !(prot & PROT_EXEC)))
58209 + return 1;
58210 +
58211 + if (is_writable_mmap(file))
58212 + return 0;
58213 +
58214 + mode =
58215 + gr_search_file(file->f_path.dentry,
58216 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
58217 + file->f_path.mnt);
58218 +
58219 + if (!gr_tpe_allow(file))
58220 + return 0;
58221 +
58222 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
58223 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
58224 + return 0;
58225 + } else if (unlikely(!(mode & GR_EXEC))) {
58226 + return 0;
58227 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
58228 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
58229 + return 1;
58230 + }
58231 +
58232 + return 1;
58233 +}
58234 +
58235 +int
58236 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
58237 +{
58238 + __u32 mode;
58239 +
58240 + if (unlikely(!file || !(prot & PROT_EXEC)))
58241 + return 1;
58242 +
58243 + if (is_writable_mmap(file))
58244 + return 0;
58245 +
58246 + mode =
58247 + gr_search_file(file->f_path.dentry,
58248 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
58249 + file->f_path.mnt);
58250 +
58251 + if (!gr_tpe_allow(file))
58252 + return 0;
58253 +
58254 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
58255 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
58256 + return 0;
58257 + } else if (unlikely(!(mode & GR_EXEC))) {
58258 + return 0;
58259 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
58260 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
58261 + return 1;
58262 + }
58263 +
58264 + return 1;
58265 +}
58266 +
58267 +void
58268 +gr_acl_handle_psacct(struct task_struct *task, const long code)
58269 +{
58270 + unsigned long runtime;
58271 + unsigned long cputime;
58272 + unsigned int wday, cday;
58273 + __u8 whr, chr;
58274 + __u8 wmin, cmin;
58275 + __u8 wsec, csec;
58276 + struct timespec timeval;
58277 +
58278 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
58279 + !(task->acl->mode & GR_PROCACCT)))
58280 + return;
58281 +
58282 + do_posix_clock_monotonic_gettime(&timeval);
58283 + runtime = timeval.tv_sec - task->start_time.tv_sec;
58284 + wday = runtime / (3600 * 24);
58285 + runtime -= wday * (3600 * 24);
58286 + whr = runtime / 3600;
58287 + runtime -= whr * 3600;
58288 + wmin = runtime / 60;
58289 + runtime -= wmin * 60;
58290 + wsec = runtime;
58291 +
58292 + cputime = (task->utime + task->stime) / HZ;
58293 + cday = cputime / (3600 * 24);
58294 + cputime -= cday * (3600 * 24);
58295 + chr = cputime / 3600;
58296 + cputime -= chr * 3600;
58297 + cmin = cputime / 60;
58298 + cputime -= cmin * 60;
58299 + csec = cputime;
58300 +
58301 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
58302 +
58303 + return;
58304 +}
58305 +
58306 +void gr_set_kernel_label(struct task_struct *task)
58307 +{
58308 + if (gr_status & GR_READY) {
58309 + task->role = kernel_role;
58310 + task->acl = kernel_role->root_label;
58311 + }
58312 + return;
58313 +}
58314 +
58315 +#ifdef CONFIG_TASKSTATS
58316 +int gr_is_taskstats_denied(int pid)
58317 +{
58318 + struct task_struct *task;
58319 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58320 + const struct cred *cred;
58321 +#endif
58322 + int ret = 0;
58323 +
58324 + /* restrict taskstats viewing to un-chrooted root users
58325 + who have the 'view' subject flag if the RBAC system is enabled
58326 + */
58327 +
58328 + rcu_read_lock();
58329 + read_lock(&tasklist_lock);
58330 + task = find_task_by_vpid(pid);
58331 + if (task) {
58332 +#ifdef CONFIG_GRKERNSEC_CHROOT
58333 + if (proc_is_chrooted(task))
58334 + ret = -EACCES;
58335 +#endif
58336 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58337 + cred = __task_cred(task);
58338 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58339 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
58340 + ret = -EACCES;
58341 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58342 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, grsec_proc_gid))
58343 + ret = -EACCES;
58344 +#endif
58345 +#endif
58346 + if (gr_status & GR_READY) {
58347 + if (!(task->acl->mode & GR_VIEW))
58348 + ret = -EACCES;
58349 + }
58350 + } else
58351 + ret = -ENOENT;
58352 +
58353 + read_unlock(&tasklist_lock);
58354 + rcu_read_unlock();
58355 +
58356 + return ret;
58357 +}
58358 +#endif
58359 +
58360 +/* AUXV entries are filled via a descendant of search_binary_handler
58361 + after we've already applied the subject for the target
58362 +*/
58363 +int gr_acl_enable_at_secure(void)
58364 +{
58365 + if (unlikely(!(gr_status & GR_READY)))
58366 + return 0;
58367 +
58368 + if (current->acl->mode & GR_ATSECURE)
58369 + return 1;
58370 +
58371 + return 0;
58372 +}
58373 +
58374 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
58375 +{
58376 + struct task_struct *task = current;
58377 + struct dentry *dentry = file->f_path.dentry;
58378 + struct vfsmount *mnt = file->f_path.mnt;
58379 + struct acl_object_label *obj, *tmp;
58380 + struct acl_subject_label *subj;
58381 + unsigned int bufsize;
58382 + int is_not_root;
58383 + char *path;
58384 + dev_t dev = __get_dev(dentry);
58385 +
58386 + if (unlikely(!(gr_status & GR_READY)))
58387 + return 1;
58388 +
58389 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58390 + return 1;
58391 +
58392 + /* ignore Eric Biederman */
58393 + if (IS_PRIVATE(dentry->d_inode))
58394 + return 1;
58395 +
58396 + subj = task->acl;
58397 + read_lock(&gr_inode_lock);
58398 + do {
58399 + obj = lookup_acl_obj_label(ino, dev, subj);
58400 + if (obj != NULL) {
58401 + read_unlock(&gr_inode_lock);
58402 + return (obj->mode & GR_FIND) ? 1 : 0;
58403 + }
58404 + } while ((subj = subj->parent_subject));
58405 + read_unlock(&gr_inode_lock);
58406 +
58407 + /* this is purely an optimization since we're looking for an object
58408 + for the directory we're doing a readdir on
58409 + if it's possible for any globbed object to match the entry we're
58410 + filling into the directory, then the object we find here will be
58411 + an anchor point with attached globbed objects
58412 + */
58413 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
58414 + if (obj->globbed == NULL)
58415 + return (obj->mode & GR_FIND) ? 1 : 0;
58416 +
58417 + is_not_root = ((obj->filename[0] == '/') &&
58418 + (obj->filename[1] == '\0')) ? 0 : 1;
58419 + bufsize = PAGE_SIZE - namelen - is_not_root;
58420 +
58421 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
58422 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
58423 + return 1;
58424 +
58425 + preempt_disable();
58426 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58427 + bufsize);
58428 +
58429 + bufsize = strlen(path);
58430 +
58431 + /* if base is "/", don't append an additional slash */
58432 + if (is_not_root)
58433 + *(path + bufsize) = '/';
58434 + memcpy(path + bufsize + is_not_root, name, namelen);
58435 + *(path + bufsize + namelen + is_not_root) = '\0';
58436 +
58437 + tmp = obj->globbed;
58438 + while (tmp) {
58439 + if (!glob_match(tmp->filename, path)) {
58440 + preempt_enable();
58441 + return (tmp->mode & GR_FIND) ? 1 : 0;
58442 + }
58443 + tmp = tmp->next;
58444 + }
58445 + preempt_enable();
58446 + return (obj->mode & GR_FIND) ? 1 : 0;
58447 +}
58448 +
58449 +void gr_put_exec_file(struct task_struct *task)
58450 +{
58451 + struct file *filp;
58452 +
58453 + write_lock(&grsec_exec_file_lock);
58454 + filp = task->exec_file;
58455 + task->exec_file = NULL;
58456 + write_unlock(&grsec_exec_file_lock);
58457 +
58458 + if (filp)
58459 + fput(filp);
58460 +
58461 + return;
58462 +}
58463 +
58464 +
58465 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
58466 +EXPORT_SYMBOL(gr_acl_is_enabled);
58467 +#endif
58468 +EXPORT_SYMBOL(gr_set_kernel_label);
58469 +#ifdef CONFIG_SECURITY
58470 +EXPORT_SYMBOL(gr_check_user_change);
58471 +EXPORT_SYMBOL(gr_check_group_change);
58472 +#endif
58473 +
58474 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
58475 new file mode 100644
58476 index 0000000..34fefda
58477 --- /dev/null
58478 +++ b/grsecurity/gracl_alloc.c
58479 @@ -0,0 +1,105 @@
58480 +#include <linux/kernel.h>
58481 +#include <linux/mm.h>
58482 +#include <linux/slab.h>
58483 +#include <linux/vmalloc.h>
58484 +#include <linux/gracl.h>
58485 +#include <linux/grsecurity.h>
58486 +
58487 +static unsigned long alloc_stack_next = 1;
58488 +static unsigned long alloc_stack_size = 1;
58489 +static void **alloc_stack;
58490 +
58491 +static __inline__ int
58492 +alloc_pop(void)
58493 +{
58494 + if (alloc_stack_next == 1)
58495 + return 0;
58496 +
58497 + kfree(alloc_stack[alloc_stack_next - 2]);
58498 +
58499 + alloc_stack_next--;
58500 +
58501 + return 1;
58502 +}
58503 +
58504 +static __inline__ int
58505 +alloc_push(void *buf)
58506 +{
58507 + if (alloc_stack_next >= alloc_stack_size)
58508 + return 1;
58509 +
58510 + alloc_stack[alloc_stack_next - 1] = buf;
58511 +
58512 + alloc_stack_next++;
58513 +
58514 + return 0;
58515 +}
58516 +
58517 +void *
58518 +acl_alloc(unsigned long len)
58519 +{
58520 + void *ret = NULL;
58521 +
58522 + if (!len || len > PAGE_SIZE)
58523 + goto out;
58524 +
58525 + ret = kmalloc(len, GFP_KERNEL);
58526 +
58527 + if (ret) {
58528 + if (alloc_push(ret)) {
58529 + kfree(ret);
58530 + ret = NULL;
58531 + }
58532 + }
58533 +
58534 +out:
58535 + return ret;
58536 +}
58537 +
58538 +void *
58539 +acl_alloc_num(unsigned long num, unsigned long len)
58540 +{
58541 + if (!len || (num > (PAGE_SIZE / len)))
58542 + return NULL;
58543 +
58544 + return acl_alloc(num * len);
58545 +}
58546 +
58547 +void
58548 +acl_free_all(void)
58549 +{
58550 + if (gr_acl_is_enabled() || !alloc_stack)
58551 + return;
58552 +
58553 + while (alloc_pop()) ;
58554 +
58555 + if (alloc_stack) {
58556 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
58557 + kfree(alloc_stack);
58558 + else
58559 + vfree(alloc_stack);
58560 + }
58561 +
58562 + alloc_stack = NULL;
58563 + alloc_stack_size = 1;
58564 + alloc_stack_next = 1;
58565 +
58566 + return;
58567 +}
58568 +
58569 +int
58570 +acl_alloc_stack_init(unsigned long size)
58571 +{
58572 + if ((size * sizeof (void *)) <= PAGE_SIZE)
58573 + alloc_stack =
58574 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
58575 + else
58576 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
58577 +
58578 + alloc_stack_size = size;
58579 +
58580 + if (!alloc_stack)
58581 + return 0;
58582 + else
58583 + return 1;
58584 +}
58585 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
58586 new file mode 100644
58587 index 0000000..bdd51ea
58588 --- /dev/null
58589 +++ b/grsecurity/gracl_cap.c
58590 @@ -0,0 +1,110 @@
58591 +#include <linux/kernel.h>
58592 +#include <linux/module.h>
58593 +#include <linux/sched.h>
58594 +#include <linux/gracl.h>
58595 +#include <linux/grsecurity.h>
58596 +#include <linux/grinternal.h>
58597 +
58598 +extern const char *captab_log[];
58599 +extern int captab_log_entries;
58600 +
58601 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58602 +{
58603 + struct acl_subject_label *curracl;
58604 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
58605 + kernel_cap_t cap_audit = __cap_empty_set;
58606 +
58607 + if (!gr_acl_is_enabled())
58608 + return 1;
58609 +
58610 + curracl = task->acl;
58611 +
58612 + cap_drop = curracl->cap_lower;
58613 + cap_mask = curracl->cap_mask;
58614 + cap_audit = curracl->cap_invert_audit;
58615 +
58616 + while ((curracl = curracl->parent_subject)) {
58617 + /* if the cap isn't specified in the current computed mask but is specified in the
58618 + current level subject, and is lowered in the current level subject, then add
58619 + it to the set of dropped capabilities
58620 + otherwise, add the current level subject's mask to the current computed mask
58621 + */
58622 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
58623 + cap_raise(cap_mask, cap);
58624 + if (cap_raised(curracl->cap_lower, cap))
58625 + cap_raise(cap_drop, cap);
58626 + if (cap_raised(curracl->cap_invert_audit, cap))
58627 + cap_raise(cap_audit, cap);
58628 + }
58629 + }
58630 +
58631 + if (!cap_raised(cap_drop, cap)) {
58632 + if (cap_raised(cap_audit, cap))
58633 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
58634 + return 1;
58635 + }
58636 +
58637 + curracl = task->acl;
58638 +
58639 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
58640 + && cap_raised(cred->cap_effective, cap)) {
58641 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
58642 + task->role->roletype, GR_GLOBAL_UID(cred->uid),
58643 + GR_GLOBAL_GID(cred->gid), task->exec_file ?
58644 + gr_to_filename(task->exec_file->f_path.dentry,
58645 + task->exec_file->f_path.mnt) : curracl->filename,
58646 + curracl->filename, 0UL,
58647 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
58648 + return 1;
58649 + }
58650 +
58651 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
58652 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
58653 +
58654 + return 0;
58655 +}
58656 +
58657 +int
58658 +gr_acl_is_capable(const int cap)
58659 +{
58660 + return gr_task_acl_is_capable(current, current_cred(), cap);
58661 +}
58662 +
58663 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
58664 +{
58665 + struct acl_subject_label *curracl;
58666 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
58667 +
58668 + if (!gr_acl_is_enabled())
58669 + return 1;
58670 +
58671 + curracl = task->acl;
58672 +
58673 + cap_drop = curracl->cap_lower;
58674 + cap_mask = curracl->cap_mask;
58675 +
58676 + while ((curracl = curracl->parent_subject)) {
58677 + /* if the cap isn't specified in the current computed mask but is specified in the
58678 + current level subject, and is lowered in the current level subject, then add
58679 + it to the set of dropped capabilities
58680 + otherwise, add the current level subject's mask to the current computed mask
58681 + */
58682 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
58683 + cap_raise(cap_mask, cap);
58684 + if (cap_raised(curracl->cap_lower, cap))
58685 + cap_raise(cap_drop, cap);
58686 + }
58687 + }
58688 +
58689 + if (!cap_raised(cap_drop, cap))
58690 + return 1;
58691 +
58692 + return 0;
58693 +}
58694 +
58695 +int
58696 +gr_acl_is_capable_nolog(const int cap)
58697 +{
58698 + return gr_task_acl_is_capable_nolog(current, cap);
58699 +}
58700 +
58701 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
58702 new file mode 100644
58703 index 0000000..a340c17
58704 --- /dev/null
58705 +++ b/grsecurity/gracl_fs.c
58706 @@ -0,0 +1,431 @@
58707 +#include <linux/kernel.h>
58708 +#include <linux/sched.h>
58709 +#include <linux/types.h>
58710 +#include <linux/fs.h>
58711 +#include <linux/file.h>
58712 +#include <linux/stat.h>
58713 +#include <linux/grsecurity.h>
58714 +#include <linux/grinternal.h>
58715 +#include <linux/gracl.h>
58716 +
58717 +umode_t
58718 +gr_acl_umask(void)
58719 +{
58720 + if (unlikely(!gr_acl_is_enabled()))
58721 + return 0;
58722 +
58723 + return current->role->umask;
58724 +}
58725 +
58726 +__u32
58727 +gr_acl_handle_hidden_file(const struct dentry * dentry,
58728 + const struct vfsmount * mnt)
58729 +{
58730 + __u32 mode;
58731 +
58732 + if (unlikely(!dentry->d_inode))
58733 + return GR_FIND;
58734 +
58735 + mode =
58736 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
58737 +
58738 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
58739 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
58740 + return mode;
58741 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
58742 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
58743 + return 0;
58744 + } else if (unlikely(!(mode & GR_FIND)))
58745 + return 0;
58746 +
58747 + return GR_FIND;
58748 +}
58749 +
58750 +__u32
58751 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
58752 + int acc_mode)
58753 +{
58754 + __u32 reqmode = GR_FIND;
58755 + __u32 mode;
58756 +
58757 + if (unlikely(!dentry->d_inode))
58758 + return reqmode;
58759 +
58760 + if (acc_mode & MAY_APPEND)
58761 + reqmode |= GR_APPEND;
58762 + else if (acc_mode & MAY_WRITE)
58763 + reqmode |= GR_WRITE;
58764 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
58765 + reqmode |= GR_READ;
58766 +
58767 + mode =
58768 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
58769 + mnt);
58770 +
58771 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
58772 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
58773 + reqmode & GR_READ ? " reading" : "",
58774 + reqmode & GR_WRITE ? " writing" : reqmode &
58775 + GR_APPEND ? " appending" : "");
58776 + return reqmode;
58777 + } else
58778 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
58779 + {
58780 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
58781 + reqmode & GR_READ ? " reading" : "",
58782 + reqmode & GR_WRITE ? " writing" : reqmode &
58783 + GR_APPEND ? " appending" : "");
58784 + return 0;
58785 + } else if (unlikely((mode & reqmode) != reqmode))
58786 + return 0;
58787 +
58788 + return reqmode;
58789 +}
58790 +
58791 +__u32
58792 +gr_acl_handle_creat(const struct dentry * dentry,
58793 + const struct dentry * p_dentry,
58794 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58795 + const int imode)
58796 +{
58797 + __u32 reqmode = GR_WRITE | GR_CREATE;
58798 + __u32 mode;
58799 +
58800 + if (acc_mode & MAY_APPEND)
58801 + reqmode |= GR_APPEND;
58802 + // if a directory was required or the directory already exists, then
58803 + // don't count this open as a read
58804 + if ((acc_mode & MAY_READ) &&
58805 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
58806 + reqmode |= GR_READ;
58807 + if ((open_flags & O_CREAT) &&
58808 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
58809 + reqmode |= GR_SETID;
58810 +
58811 + mode =
58812 + gr_check_create(dentry, p_dentry, p_mnt,
58813 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
58814 +
58815 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
58816 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
58817 + reqmode & GR_READ ? " reading" : "",
58818 + reqmode & GR_WRITE ? " writing" : reqmode &
58819 + GR_APPEND ? " appending" : "");
58820 + return reqmode;
58821 + } else
58822 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
58823 + {
58824 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
58825 + reqmode & GR_READ ? " reading" : "",
58826 + reqmode & GR_WRITE ? " writing" : reqmode &
58827 + GR_APPEND ? " appending" : "");
58828 + return 0;
58829 + } else if (unlikely((mode & reqmode) != reqmode))
58830 + return 0;
58831 +
58832 + return reqmode;
58833 +}
58834 +
58835 +__u32
58836 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
58837 + const int fmode)
58838 +{
58839 + __u32 mode, reqmode = GR_FIND;
58840 +
58841 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
58842 + reqmode |= GR_EXEC;
58843 + if (fmode & S_IWOTH)
58844 + reqmode |= GR_WRITE;
58845 + if (fmode & S_IROTH)
58846 + reqmode |= GR_READ;
58847 +
58848 + mode =
58849 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
58850 + mnt);
58851 +
58852 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
58853 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
58854 + reqmode & GR_READ ? " reading" : "",
58855 + reqmode & GR_WRITE ? " writing" : "",
58856 + reqmode & GR_EXEC ? " executing" : "");
58857 + return reqmode;
58858 + } else
58859 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
58860 + {
58861 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
58862 + reqmode & GR_READ ? " reading" : "",
58863 + reqmode & GR_WRITE ? " writing" : "",
58864 + reqmode & GR_EXEC ? " executing" : "");
58865 + return 0;
58866 + } else if (unlikely((mode & reqmode) != reqmode))
58867 + return 0;
58868 +
58869 + return reqmode;
58870 +}
58871 +
58872 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
58873 +{
58874 + __u32 mode;
58875 +
58876 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
58877 +
58878 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
58879 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
58880 + return mode;
58881 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
58882 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
58883 + return 0;
58884 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
58885 + return 0;
58886 +
58887 + return (reqmode);
58888 +}
58889 +
58890 +__u32
58891 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
58892 +{
58893 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
58894 +}
58895 +
58896 +__u32
58897 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
58898 +{
58899 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
58900 +}
58901 +
58902 +__u32
58903 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
58904 +{
58905 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
58906 +}
58907 +
58908 +__u32
58909 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
58910 +{
58911 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
58912 +}
58913 +
58914 +__u32
58915 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
58916 + umode_t *modeptr)
58917 +{
58918 + umode_t mode;
58919 +
58920 + *modeptr &= ~gr_acl_umask();
58921 + mode = *modeptr;
58922 +
58923 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
58924 + return 1;
58925 +
58926 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
58927 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
58928 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
58929 + GR_CHMOD_ACL_MSG);
58930 + } else {
58931 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
58932 + }
58933 +}
58934 +
58935 +__u32
58936 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
58937 +{
58938 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
58939 +}
58940 +
58941 +__u32
58942 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
58943 +{
58944 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
58945 +}
58946 +
58947 +__u32
58948 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
58949 +{
58950 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
58951 +}
58952 +
58953 +__u32
58954 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
58955 +{
58956 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
58957 + GR_UNIXCONNECT_ACL_MSG);
58958 +}
58959 +
58960 +/* hardlinks require at minimum create and link permission,
58961 + any additional privilege required is based on the
58962 + privilege of the file being linked to
58963 +*/
58964 +__u32
58965 +gr_acl_handle_link(const struct dentry * new_dentry,
58966 + const struct dentry * parent_dentry,
58967 + const struct vfsmount * parent_mnt,
58968 + const struct dentry * old_dentry,
58969 + const struct vfsmount * old_mnt, const struct filename *to)
58970 +{
58971 + __u32 mode;
58972 + __u32 needmode = GR_CREATE | GR_LINK;
58973 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
58974 +
58975 + mode =
58976 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
58977 + old_mnt);
58978 +
58979 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
58980 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
58981 + return mode;
58982 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
58983 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
58984 + return 0;
58985 + } else if (unlikely((mode & needmode) != needmode))
58986 + return 0;
58987 +
58988 + return 1;
58989 +}
58990 +
58991 +__u32
58992 +gr_acl_handle_symlink(const struct dentry * new_dentry,
58993 + const struct dentry * parent_dentry,
58994 + const struct vfsmount * parent_mnt, const struct filename *from)
58995 +{
58996 + __u32 needmode = GR_WRITE | GR_CREATE;
58997 + __u32 mode;
58998 +
58999 + mode =
59000 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
59001 + GR_CREATE | GR_AUDIT_CREATE |
59002 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
59003 +
59004 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
59005 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
59006 + return mode;
59007 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
59008 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
59009 + return 0;
59010 + } else if (unlikely((mode & needmode) != needmode))
59011 + return 0;
59012 +
59013 + return (GR_WRITE | GR_CREATE);
59014 +}
59015 +
59016 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
59017 +{
59018 + __u32 mode;
59019 +
59020 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
59021 +
59022 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
59023 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
59024 + return mode;
59025 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
59026 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
59027 + return 0;
59028 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
59029 + return 0;
59030 +
59031 + return (reqmode);
59032 +}
59033 +
59034 +__u32
59035 +gr_acl_handle_mknod(const struct dentry * new_dentry,
59036 + const struct dentry * parent_dentry,
59037 + const struct vfsmount * parent_mnt,
59038 + const int mode)
59039 +{
59040 + __u32 reqmode = GR_WRITE | GR_CREATE;
59041 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
59042 + reqmode |= GR_SETID;
59043 +
59044 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
59045 + reqmode, GR_MKNOD_ACL_MSG);
59046 +}
59047 +
59048 +__u32
59049 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
59050 + const struct dentry *parent_dentry,
59051 + const struct vfsmount *parent_mnt)
59052 +{
59053 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
59054 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
59055 +}
59056 +
59057 +#define RENAME_CHECK_SUCCESS(old, new) \
59058 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
59059 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
59060 +
59061 +int
59062 +gr_acl_handle_rename(struct dentry *new_dentry,
59063 + struct dentry *parent_dentry,
59064 + const struct vfsmount *parent_mnt,
59065 + struct dentry *old_dentry,
59066 + struct inode *old_parent_inode,
59067 + struct vfsmount *old_mnt, const struct filename *newname)
59068 +{
59069 + __u32 comp1, comp2;
59070 + int error = 0;
59071 +
59072 + if (unlikely(!gr_acl_is_enabled()))
59073 + return 0;
59074 +
59075 + if (!new_dentry->d_inode) {
59076 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
59077 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
59078 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
59079 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
59080 + GR_DELETE | GR_AUDIT_DELETE |
59081 + GR_AUDIT_READ | GR_AUDIT_WRITE |
59082 + GR_SUPPRESS, old_mnt);
59083 + } else {
59084 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
59085 + GR_CREATE | GR_DELETE |
59086 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
59087 + GR_AUDIT_READ | GR_AUDIT_WRITE |
59088 + GR_SUPPRESS, parent_mnt);
59089 + comp2 =
59090 + gr_search_file(old_dentry,
59091 + GR_READ | GR_WRITE | GR_AUDIT_READ |
59092 + GR_DELETE | GR_AUDIT_DELETE |
59093 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
59094 + }
59095 +
59096 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
59097 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
59098 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
59099 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
59100 + && !(comp2 & GR_SUPPRESS)) {
59101 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
59102 + error = -EACCES;
59103 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
59104 + error = -EACCES;
59105 +
59106 + return error;
59107 +}
59108 +
59109 +void
59110 +gr_acl_handle_exit(void)
59111 +{
59112 + u16 id;
59113 + char *rolename;
59114 +
59115 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
59116 + !(current->role->roletype & GR_ROLE_PERSIST))) {
59117 + id = current->acl_role_id;
59118 + rolename = current->role->rolename;
59119 + gr_set_acls(1);
59120 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
59121 + }
59122 +
59123 + gr_put_exec_file(current);
59124 + return;
59125 +}
59126 +
59127 +int
59128 +gr_acl_handle_procpidmem(const struct task_struct *task)
59129 +{
59130 + if (unlikely(!gr_acl_is_enabled()))
59131 + return 0;
59132 +
59133 + if (task != current && task->acl->mode & GR_PROTPROCFD)
59134 + return -EACCES;
59135 +
59136 + return 0;
59137 +}
59138 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
59139 new file mode 100644
59140 index 0000000..4699807
59141 --- /dev/null
59142 +++ b/grsecurity/gracl_ip.c
59143 @@ -0,0 +1,384 @@
59144 +#include <linux/kernel.h>
59145 +#include <asm/uaccess.h>
59146 +#include <asm/errno.h>
59147 +#include <net/sock.h>
59148 +#include <linux/file.h>
59149 +#include <linux/fs.h>
59150 +#include <linux/net.h>
59151 +#include <linux/in.h>
59152 +#include <linux/skbuff.h>
59153 +#include <linux/ip.h>
59154 +#include <linux/udp.h>
59155 +#include <linux/types.h>
59156 +#include <linux/sched.h>
59157 +#include <linux/netdevice.h>
59158 +#include <linux/inetdevice.h>
59159 +#include <linux/gracl.h>
59160 +#include <linux/grsecurity.h>
59161 +#include <linux/grinternal.h>
59162 +
59163 +#define GR_BIND 0x01
59164 +#define GR_CONNECT 0x02
59165 +#define GR_INVERT 0x04
59166 +#define GR_BINDOVERRIDE 0x08
59167 +#define GR_CONNECTOVERRIDE 0x10
59168 +#define GR_SOCK_FAMILY 0x20
59169 +
59170 +static const char * gr_protocols[IPPROTO_MAX] = {
59171 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
59172 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
59173 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
59174 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
59175 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
59176 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
59177 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
59178 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
59179 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
59180 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
59181 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
59182 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
59183 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
59184 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
59185 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
59186 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
59187 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
59188 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
59189 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
59190 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
59191 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
59192 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
59193 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
59194 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
59195 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
59196 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
59197 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
59198 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
59199 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
59200 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
59201 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
59202 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
59203 + };
59204 +
59205 +static const char * gr_socktypes[SOCK_MAX] = {
59206 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
59207 + "unknown:7", "unknown:8", "unknown:9", "packet"
59208 + };
59209 +
59210 +static const char * gr_sockfamilies[AF_MAX+1] = {
59211 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
59212 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
59213 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
59214 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
59215 + };
59216 +
59217 +const char *
59218 +gr_proto_to_name(unsigned char proto)
59219 +{
59220 + return gr_protocols[proto];
59221 +}
59222 +
59223 +const char *
59224 +gr_socktype_to_name(unsigned char type)
59225 +{
59226 + return gr_socktypes[type];
59227 +}
59228 +
59229 +const char *
59230 +gr_sockfamily_to_name(unsigned char family)
59231 +{
59232 + return gr_sockfamilies[family];
59233 +}
59234 +
59235 +int
59236 +gr_search_socket(const int domain, const int type, const int protocol)
59237 +{
59238 + struct acl_subject_label *curr;
59239 + const struct cred *cred = current_cred();
59240 +
59241 + if (unlikely(!gr_acl_is_enabled()))
59242 + goto exit;
59243 +
59244 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
59245 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
59246 + goto exit; // let the kernel handle it
59247 +
59248 + curr = current->acl;
59249 +
59250 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
59251 + /* the family is allowed, if this is PF_INET allow it only if
59252 + the extra sock type/protocol checks pass */
59253 + if (domain == PF_INET)
59254 + goto inet_check;
59255 + goto exit;
59256 + } else {
59257 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
59258 + __u32 fakeip = 0;
59259 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
59260 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
59261 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
59262 + gr_to_filename(current->exec_file->f_path.dentry,
59263 + current->exec_file->f_path.mnt) :
59264 + curr->filename, curr->filename,
59265 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
59266 + &current->signal->saved_ip);
59267 + goto exit;
59268 + }
59269 + goto exit_fail;
59270 + }
59271 +
59272 +inet_check:
59273 + /* the rest of this checking is for IPv4 only */
59274 + if (!curr->ips)
59275 + goto exit;
59276 +
59277 + if ((curr->ip_type & (1 << type)) &&
59278 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
59279 + goto exit;
59280 +
59281 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
59282 + /* we don't place acls on raw sockets , and sometimes
59283 + dgram/ip sockets are opened for ioctl and not
59284 + bind/connect, so we'll fake a bind learn log */
59285 + if (type == SOCK_RAW || type == SOCK_PACKET) {
59286 + __u32 fakeip = 0;
59287 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
59288 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
59289 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
59290 + gr_to_filename(current->exec_file->f_path.dentry,
59291 + current->exec_file->f_path.mnt) :
59292 + curr->filename, curr->filename,
59293 + &fakeip, 0, type,
59294 + protocol, GR_CONNECT, &current->signal->saved_ip);
59295 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
59296 + __u32 fakeip = 0;
59297 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
59298 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
59299 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
59300 + gr_to_filename(current->exec_file->f_path.dentry,
59301 + current->exec_file->f_path.mnt) :
59302 + curr->filename, curr->filename,
59303 + &fakeip, 0, type,
59304 + protocol, GR_BIND, &current->signal->saved_ip);
59305 + }
59306 + /* we'll log when they use connect or bind */
59307 + goto exit;
59308 + }
59309 +
59310 +exit_fail:
59311 + if (domain == PF_INET)
59312 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
59313 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
59314 + else
59315 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
59316 + gr_socktype_to_name(type), protocol);
59317 +
59318 + return 0;
59319 +exit:
59320 + return 1;
59321 +}
59322 +
59323 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
59324 +{
59325 + if ((ip->mode & mode) &&
59326 + (ip_port >= ip->low) &&
59327 + (ip_port <= ip->high) &&
59328 + ((ntohl(ip_addr) & our_netmask) ==
59329 + (ntohl(our_addr) & our_netmask))
59330 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
59331 + && (ip->type & (1 << type))) {
59332 + if (ip->mode & GR_INVERT)
59333 + return 2; // specifically denied
59334 + else
59335 + return 1; // allowed
59336 + }
59337 +
59338 + return 0; // not specifically allowed, may continue parsing
59339 +}
59340 +
59341 +static int
59342 +gr_search_connectbind(const int full_mode, struct sock *sk,
59343 + struct sockaddr_in *addr, const int type)
59344 +{
59345 + char iface[IFNAMSIZ] = {0};
59346 + struct acl_subject_label *curr;
59347 + struct acl_ip_label *ip;
59348 + struct inet_sock *isk;
59349 + struct net_device *dev;
59350 + struct in_device *idev;
59351 + unsigned long i;
59352 + int ret;
59353 + int mode = full_mode & (GR_BIND | GR_CONNECT);
59354 + __u32 ip_addr = 0;
59355 + __u32 our_addr;
59356 + __u32 our_netmask;
59357 + char *p;
59358 + __u16 ip_port = 0;
59359 + const struct cred *cred = current_cred();
59360 +
59361 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
59362 + return 0;
59363 +
59364 + curr = current->acl;
59365 + isk = inet_sk(sk);
59366 +
59367 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
59368 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
59369 + addr->sin_addr.s_addr = curr->inaddr_any_override;
59370 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
59371 + struct sockaddr_in saddr;
59372 + int err;
59373 +
59374 + saddr.sin_family = AF_INET;
59375 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
59376 + saddr.sin_port = isk->inet_sport;
59377 +
59378 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
59379 + if (err)
59380 + return err;
59381 +
59382 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
59383 + if (err)
59384 + return err;
59385 + }
59386 +
59387 + if (!curr->ips)
59388 + return 0;
59389 +
59390 + ip_addr = addr->sin_addr.s_addr;
59391 + ip_port = ntohs(addr->sin_port);
59392 +
59393 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
59394 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
59395 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
59396 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
59397 + gr_to_filename(current->exec_file->f_path.dentry,
59398 + current->exec_file->f_path.mnt) :
59399 + curr->filename, curr->filename,
59400 + &ip_addr, ip_port, type,
59401 + sk->sk_protocol, mode, &current->signal->saved_ip);
59402 + return 0;
59403 + }
59404 +
59405 + for (i = 0; i < curr->ip_num; i++) {
59406 + ip = *(curr->ips + i);
59407 + if (ip->iface != NULL) {
59408 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
59409 + p = strchr(iface, ':');
59410 + if (p != NULL)
59411 + *p = '\0';
59412 + dev = dev_get_by_name(sock_net(sk), iface);
59413 + if (dev == NULL)
59414 + continue;
59415 + idev = in_dev_get(dev);
59416 + if (idev == NULL) {
59417 + dev_put(dev);
59418 + continue;
59419 + }
59420 + rcu_read_lock();
59421 + for_ifa(idev) {
59422 + if (!strcmp(ip->iface, ifa->ifa_label)) {
59423 + our_addr = ifa->ifa_address;
59424 + our_netmask = 0xffffffff;
59425 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
59426 + if (ret == 1) {
59427 + rcu_read_unlock();
59428 + in_dev_put(idev);
59429 + dev_put(dev);
59430 + return 0;
59431 + } else if (ret == 2) {
59432 + rcu_read_unlock();
59433 + in_dev_put(idev);
59434 + dev_put(dev);
59435 + goto denied;
59436 + }
59437 + }
59438 + } endfor_ifa(idev);
59439 + rcu_read_unlock();
59440 + in_dev_put(idev);
59441 + dev_put(dev);
59442 + } else {
59443 + our_addr = ip->addr;
59444 + our_netmask = ip->netmask;
59445 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
59446 + if (ret == 1)
59447 + return 0;
59448 + else if (ret == 2)
59449 + goto denied;
59450 + }
59451 + }
59452 +
59453 +denied:
59454 + if (mode == GR_BIND)
59455 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
59456 + else if (mode == GR_CONNECT)
59457 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
59458 +
59459 + return -EACCES;
59460 +}
59461 +
59462 +int
59463 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
59464 +{
59465 + /* always allow disconnection of dgram sockets with connect */
59466 + if (addr->sin_family == AF_UNSPEC)
59467 + return 0;
59468 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
59469 +}
59470 +
59471 +int
59472 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
59473 +{
59474 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
59475 +}
59476 +
59477 +int gr_search_listen(struct socket *sock)
59478 +{
59479 + struct sock *sk = sock->sk;
59480 + struct sockaddr_in addr;
59481 +
59482 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
59483 + addr.sin_port = inet_sk(sk)->inet_sport;
59484 +
59485 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
59486 +}
59487 +
59488 +int gr_search_accept(struct socket *sock)
59489 +{
59490 + struct sock *sk = sock->sk;
59491 + struct sockaddr_in addr;
59492 +
59493 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
59494 + addr.sin_port = inet_sk(sk)->inet_sport;
59495 +
59496 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
59497 +}
59498 +
59499 +int
59500 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
59501 +{
59502 + if (addr)
59503 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
59504 + else {
59505 + struct sockaddr_in sin;
59506 + const struct inet_sock *inet = inet_sk(sk);
59507 +
59508 + sin.sin_addr.s_addr = inet->inet_daddr;
59509 + sin.sin_port = inet->inet_dport;
59510 +
59511 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
59512 + }
59513 +}
59514 +
59515 +int
59516 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
59517 +{
59518 + struct sockaddr_in sin;
59519 +
59520 + if (unlikely(skb->len < sizeof (struct udphdr)))
59521 + return 0; // skip this packet
59522 +
59523 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
59524 + sin.sin_port = udp_hdr(skb)->source;
59525 +
59526 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
59527 +}
59528 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
59529 new file mode 100644
59530 index 0000000..25f54ef
59531 --- /dev/null
59532 +++ b/grsecurity/gracl_learn.c
59533 @@ -0,0 +1,207 @@
59534 +#include <linux/kernel.h>
59535 +#include <linux/mm.h>
59536 +#include <linux/sched.h>
59537 +#include <linux/poll.h>
59538 +#include <linux/string.h>
59539 +#include <linux/file.h>
59540 +#include <linux/types.h>
59541 +#include <linux/vmalloc.h>
59542 +#include <linux/grinternal.h>
59543 +
59544 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
59545 + size_t count, loff_t *ppos);
59546 +extern int gr_acl_is_enabled(void);
59547 +
59548 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
59549 +static int gr_learn_attached;
59550 +
59551 +/* use a 512k buffer */
59552 +#define LEARN_BUFFER_SIZE (512 * 1024)
59553 +
59554 +static DEFINE_SPINLOCK(gr_learn_lock);
59555 +static DEFINE_MUTEX(gr_learn_user_mutex);
59556 +
59557 +/* we need to maintain two buffers, so that the kernel context of grlearn
59558 + uses a semaphore around the userspace copying, and the other kernel contexts
59559 + use a spinlock when copying into the buffer, since they cannot sleep
59560 +*/
59561 +static char *learn_buffer;
59562 +static char *learn_buffer_user;
59563 +static int learn_buffer_len;
59564 +static int learn_buffer_user_len;
59565 +
59566 +static ssize_t
59567 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
59568 +{
59569 + DECLARE_WAITQUEUE(wait, current);
59570 + ssize_t retval = 0;
59571 +
59572 + add_wait_queue(&learn_wait, &wait);
59573 + set_current_state(TASK_INTERRUPTIBLE);
59574 + do {
59575 + mutex_lock(&gr_learn_user_mutex);
59576 + spin_lock(&gr_learn_lock);
59577 + if (learn_buffer_len)
59578 + break;
59579 + spin_unlock(&gr_learn_lock);
59580 + mutex_unlock(&gr_learn_user_mutex);
59581 + if (file->f_flags & O_NONBLOCK) {
59582 + retval = -EAGAIN;
59583 + goto out;
59584 + }
59585 + if (signal_pending(current)) {
59586 + retval = -ERESTARTSYS;
59587 + goto out;
59588 + }
59589 +
59590 + schedule();
59591 + } while (1);
59592 +
59593 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
59594 + learn_buffer_user_len = learn_buffer_len;
59595 + retval = learn_buffer_len;
59596 + learn_buffer_len = 0;
59597 +
59598 + spin_unlock(&gr_learn_lock);
59599 +
59600 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
59601 + retval = -EFAULT;
59602 +
59603 + mutex_unlock(&gr_learn_user_mutex);
59604 +out:
59605 + set_current_state(TASK_RUNNING);
59606 + remove_wait_queue(&learn_wait, &wait);
59607 + return retval;
59608 +}
59609 +
59610 +static unsigned int
59611 +poll_learn(struct file * file, poll_table * wait)
59612 +{
59613 + poll_wait(file, &learn_wait, wait);
59614 +
59615 + if (learn_buffer_len)
59616 + return (POLLIN | POLLRDNORM);
59617 +
59618 + return 0;
59619 +}
59620 +
59621 +void
59622 +gr_clear_learn_entries(void)
59623 +{
59624 + char *tmp;
59625 +
59626 + mutex_lock(&gr_learn_user_mutex);
59627 + spin_lock(&gr_learn_lock);
59628 + tmp = learn_buffer;
59629 + learn_buffer = NULL;
59630 + spin_unlock(&gr_learn_lock);
59631 + if (tmp)
59632 + vfree(tmp);
59633 + if (learn_buffer_user != NULL) {
59634 + vfree(learn_buffer_user);
59635 + learn_buffer_user = NULL;
59636 + }
59637 + learn_buffer_len = 0;
59638 + mutex_unlock(&gr_learn_user_mutex);
59639 +
59640 + return;
59641 +}
59642 +
59643 +void
59644 +gr_add_learn_entry(const char *fmt, ...)
59645 +{
59646 + va_list args;
59647 + unsigned int len;
59648 +
59649 + if (!gr_learn_attached)
59650 + return;
59651 +
59652 + spin_lock(&gr_learn_lock);
59653 +
59654 + /* leave a gap at the end so we know when it's "full" but don't have to
59655 + compute the exact length of the string we're trying to append
59656 + */
59657 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
59658 + spin_unlock(&gr_learn_lock);
59659 + wake_up_interruptible(&learn_wait);
59660 + return;
59661 + }
59662 + if (learn_buffer == NULL) {
59663 + spin_unlock(&gr_learn_lock);
59664 + return;
59665 + }
59666 +
59667 + va_start(args, fmt);
59668 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
59669 + va_end(args);
59670 +
59671 + learn_buffer_len += len + 1;
59672 +
59673 + spin_unlock(&gr_learn_lock);
59674 + wake_up_interruptible(&learn_wait);
59675 +
59676 + return;
59677 +}
59678 +
59679 +static int
59680 +open_learn(struct inode *inode, struct file *file)
59681 +{
59682 + if (file->f_mode & FMODE_READ && gr_learn_attached)
59683 + return -EBUSY;
59684 + if (file->f_mode & FMODE_READ) {
59685 + int retval = 0;
59686 + mutex_lock(&gr_learn_user_mutex);
59687 + if (learn_buffer == NULL)
59688 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
59689 + if (learn_buffer_user == NULL)
59690 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
59691 + if (learn_buffer == NULL) {
59692 + retval = -ENOMEM;
59693 + goto out_error;
59694 + }
59695 + if (learn_buffer_user == NULL) {
59696 + retval = -ENOMEM;
59697 + goto out_error;
59698 + }
59699 + learn_buffer_len = 0;
59700 + learn_buffer_user_len = 0;
59701 + gr_learn_attached = 1;
59702 +out_error:
59703 + mutex_unlock(&gr_learn_user_mutex);
59704 + return retval;
59705 + }
59706 + return 0;
59707 +}
59708 +
59709 +static int
59710 +close_learn(struct inode *inode, struct file *file)
59711 +{
59712 + if (file->f_mode & FMODE_READ) {
59713 + char *tmp = NULL;
59714 + mutex_lock(&gr_learn_user_mutex);
59715 + spin_lock(&gr_learn_lock);
59716 + tmp = learn_buffer;
59717 + learn_buffer = NULL;
59718 + spin_unlock(&gr_learn_lock);
59719 + if (tmp)
59720 + vfree(tmp);
59721 + if (learn_buffer_user != NULL) {
59722 + vfree(learn_buffer_user);
59723 + learn_buffer_user = NULL;
59724 + }
59725 + learn_buffer_len = 0;
59726 + learn_buffer_user_len = 0;
59727 + gr_learn_attached = 0;
59728 + mutex_unlock(&gr_learn_user_mutex);
59729 + }
59730 +
59731 + return 0;
59732 +}
59733 +
59734 +const struct file_operations grsec_fops = {
59735 + .read = read_learn,
59736 + .write = write_grsec_handler,
59737 + .open = open_learn,
59738 + .release = close_learn,
59739 + .poll = poll_learn,
59740 +};
59741 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
59742 new file mode 100644
59743 index 0000000..39645c9
59744 --- /dev/null
59745 +++ b/grsecurity/gracl_res.c
59746 @@ -0,0 +1,68 @@
59747 +#include <linux/kernel.h>
59748 +#include <linux/sched.h>
59749 +#include <linux/gracl.h>
59750 +#include <linux/grinternal.h>
59751 +
59752 +static const char *restab_log[] = {
59753 + [RLIMIT_CPU] = "RLIMIT_CPU",
59754 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
59755 + [RLIMIT_DATA] = "RLIMIT_DATA",
59756 + [RLIMIT_STACK] = "RLIMIT_STACK",
59757 + [RLIMIT_CORE] = "RLIMIT_CORE",
59758 + [RLIMIT_RSS] = "RLIMIT_RSS",
59759 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
59760 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
59761 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
59762 + [RLIMIT_AS] = "RLIMIT_AS",
59763 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
59764 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
59765 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
59766 + [RLIMIT_NICE] = "RLIMIT_NICE",
59767 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
59768 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
59769 + [GR_CRASH_RES] = "RLIMIT_CRASH"
59770 +};
59771 +
59772 +void
59773 +gr_log_resource(const struct task_struct *task,
59774 + const int res, const unsigned long wanted, const int gt)
59775 +{
59776 + const struct cred *cred;
59777 + unsigned long rlim;
59778 +
59779 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
59780 + return;
59781 +
59782 + // not yet supported resource
59783 + if (unlikely(!restab_log[res]))
59784 + return;
59785 +
59786 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
59787 + rlim = task_rlimit_max(task, res);
59788 + else
59789 + rlim = task_rlimit(task, res);
59790 +
59791 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
59792 + return;
59793 +
59794 + rcu_read_lock();
59795 + cred = __task_cred(task);
59796 +
59797 + if (res == RLIMIT_NPROC &&
59798 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
59799 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
59800 + goto out_rcu_unlock;
59801 + else if (res == RLIMIT_MEMLOCK &&
59802 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
59803 + goto out_rcu_unlock;
59804 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
59805 + goto out_rcu_unlock;
59806 + rcu_read_unlock();
59807 +
59808 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
59809 +
59810 + return;
59811 +out_rcu_unlock:
59812 + rcu_read_unlock();
59813 + return;
59814 +}
59815 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
59816 new file mode 100644
59817 index 0000000..10398db
59818 --- /dev/null
59819 +++ b/grsecurity/gracl_segv.c
59820 @@ -0,0 +1,303 @@
59821 +#include <linux/kernel.h>
59822 +#include <linux/mm.h>
59823 +#include <asm/uaccess.h>
59824 +#include <asm/errno.h>
59825 +#include <asm/mman.h>
59826 +#include <net/sock.h>
59827 +#include <linux/file.h>
59828 +#include <linux/fs.h>
59829 +#include <linux/net.h>
59830 +#include <linux/in.h>
59831 +#include <linux/slab.h>
59832 +#include <linux/types.h>
59833 +#include <linux/sched.h>
59834 +#include <linux/timer.h>
59835 +#include <linux/gracl.h>
59836 +#include <linux/grsecurity.h>
59837 +#include <linux/grinternal.h>
59838 +
59839 +static struct crash_uid *uid_set;
59840 +static unsigned short uid_used;
59841 +static DEFINE_SPINLOCK(gr_uid_lock);
59842 +extern rwlock_t gr_inode_lock;
59843 +extern struct acl_subject_label *
59844 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
59845 + struct acl_role_label *role);
59846 +
59847 +#ifdef CONFIG_BTRFS_FS
59848 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
59849 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
59850 +#endif
59851 +
59852 +static inline dev_t __get_dev(const struct dentry *dentry)
59853 +{
59854 +#ifdef CONFIG_BTRFS_FS
59855 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
59856 + return get_btrfs_dev_from_inode(dentry->d_inode);
59857 + else
59858 +#endif
59859 + return dentry->d_inode->i_sb->s_dev;
59860 +}
59861 +
59862 +int
59863 +gr_init_uidset(void)
59864 +{
59865 + uid_set =
59866 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
59867 + uid_used = 0;
59868 +
59869 + return uid_set ? 1 : 0;
59870 +}
59871 +
59872 +void
59873 +gr_free_uidset(void)
59874 +{
59875 + if (uid_set)
59876 + kfree(uid_set);
59877 +
59878 + return;
59879 +}
59880 +
59881 +int
59882 +gr_find_uid(const uid_t uid)
59883 +{
59884 + struct crash_uid *tmp = uid_set;
59885 + uid_t buid;
59886 + int low = 0, high = uid_used - 1, mid;
59887 +
59888 + while (high >= low) {
59889 + mid = (low + high) >> 1;
59890 + buid = tmp[mid].uid;
59891 + if (buid == uid)
59892 + return mid;
59893 + if (buid > uid)
59894 + high = mid - 1;
59895 + if (buid < uid)
59896 + low = mid + 1;
59897 + }
59898 +
59899 + return -1;
59900 +}
59901 +
59902 +static __inline__ void
59903 +gr_insertsort(void)
59904 +{
59905 + unsigned short i, j;
59906 + struct crash_uid index;
59907 +
59908 + for (i = 1; i < uid_used; i++) {
59909 + index = uid_set[i];
59910 + j = i;
59911 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
59912 + uid_set[j] = uid_set[j - 1];
59913 + j--;
59914 + }
59915 + uid_set[j] = index;
59916 + }
59917 +
59918 + return;
59919 +}
59920 +
59921 +static __inline__ void
59922 +gr_insert_uid(const kuid_t kuid, const unsigned long expires)
59923 +{
59924 + int loc;
59925 + uid_t uid = GR_GLOBAL_UID(kuid);
59926 +
59927 + if (uid_used == GR_UIDTABLE_MAX)
59928 + return;
59929 +
59930 + loc = gr_find_uid(uid);
59931 +
59932 + if (loc >= 0) {
59933 + uid_set[loc].expires = expires;
59934 + return;
59935 + }
59936 +
59937 + uid_set[uid_used].uid = uid;
59938 + uid_set[uid_used].expires = expires;
59939 + uid_used++;
59940 +
59941 + gr_insertsort();
59942 +
59943 + return;
59944 +}
59945 +
59946 +void
59947 +gr_remove_uid(const unsigned short loc)
59948 +{
59949 + unsigned short i;
59950 +
59951 + for (i = loc + 1; i < uid_used; i++)
59952 + uid_set[i - 1] = uid_set[i];
59953 +
59954 + uid_used--;
59955 +
59956 + return;
59957 +}
59958 +
59959 +int
59960 +gr_check_crash_uid(const kuid_t kuid)
59961 +{
59962 + int loc;
59963 + int ret = 0;
59964 + uid_t uid;
59965 +
59966 + if (unlikely(!gr_acl_is_enabled()))
59967 + return 0;
59968 +
59969 + uid = GR_GLOBAL_UID(kuid);
59970 +
59971 + spin_lock(&gr_uid_lock);
59972 + loc = gr_find_uid(uid);
59973 +
59974 + if (loc < 0)
59975 + goto out_unlock;
59976 +
59977 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
59978 + gr_remove_uid(loc);
59979 + else
59980 + ret = 1;
59981 +
59982 +out_unlock:
59983 + spin_unlock(&gr_uid_lock);
59984 + return ret;
59985 +}
59986 +
59987 +static __inline__ int
59988 +proc_is_setxid(const struct cred *cred)
59989 +{
59990 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
59991 + !uid_eq(cred->uid, cred->fsuid))
59992 + return 1;
59993 + if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
59994 + !gid_eq(cred->gid, cred->fsgid))
59995 + return 1;
59996 +
59997 + return 0;
59998 +}
59999 +
60000 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
60001 +
60002 +void
60003 +gr_handle_crash(struct task_struct *task, const int sig)
60004 +{
60005 + struct acl_subject_label *curr;
60006 + struct task_struct *tsk, *tsk2;
60007 + const struct cred *cred;
60008 + const struct cred *cred2;
60009 +
60010 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
60011 + return;
60012 +
60013 + if (unlikely(!gr_acl_is_enabled()))
60014 + return;
60015 +
60016 + curr = task->acl;
60017 +
60018 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
60019 + return;
60020 +
60021 + if (time_before_eq(curr->expires, get_seconds())) {
60022 + curr->expires = 0;
60023 + curr->crashes = 0;
60024 + }
60025 +
60026 + curr->crashes++;
60027 +
60028 + if (!curr->expires)
60029 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
60030 +
60031 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
60032 + time_after(curr->expires, get_seconds())) {
60033 + rcu_read_lock();
60034 + cred = __task_cred(task);
60035 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
60036 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
60037 + spin_lock(&gr_uid_lock);
60038 + gr_insert_uid(cred->uid, curr->expires);
60039 + spin_unlock(&gr_uid_lock);
60040 + curr->expires = 0;
60041 + curr->crashes = 0;
60042 + read_lock(&tasklist_lock);
60043 + do_each_thread(tsk2, tsk) {
60044 + cred2 = __task_cred(tsk);
60045 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
60046 + gr_fake_force_sig(SIGKILL, tsk);
60047 + } while_each_thread(tsk2, tsk);
60048 + read_unlock(&tasklist_lock);
60049 + } else {
60050 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
60051 + read_lock(&tasklist_lock);
60052 + read_lock(&grsec_exec_file_lock);
60053 + do_each_thread(tsk2, tsk) {
60054 + if (likely(tsk != task)) {
60055 + // if this thread has the same subject as the one that triggered
60056 + // RES_CRASH and it's the same binary, kill it
60057 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
60058 + gr_fake_force_sig(SIGKILL, tsk);
60059 + }
60060 + } while_each_thread(tsk2, tsk);
60061 + read_unlock(&grsec_exec_file_lock);
60062 + read_unlock(&tasklist_lock);
60063 + }
60064 + rcu_read_unlock();
60065 + }
60066 +
60067 + return;
60068 +}
60069 +
60070 +int
60071 +gr_check_crash_exec(const struct file *filp)
60072 +{
60073 + struct acl_subject_label *curr;
60074 +
60075 + if (unlikely(!gr_acl_is_enabled()))
60076 + return 0;
60077 +
60078 + read_lock(&gr_inode_lock);
60079 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
60080 + __get_dev(filp->f_path.dentry),
60081 + current->role);
60082 + read_unlock(&gr_inode_lock);
60083 +
60084 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
60085 + (!curr->crashes && !curr->expires))
60086 + return 0;
60087 +
60088 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
60089 + time_after(curr->expires, get_seconds()))
60090 + return 1;
60091 + else if (time_before_eq(curr->expires, get_seconds())) {
60092 + curr->crashes = 0;
60093 + curr->expires = 0;
60094 + }
60095 +
60096 + return 0;
60097 +}
60098 +
60099 +void
60100 +gr_handle_alertkill(struct task_struct *task)
60101 +{
60102 + struct acl_subject_label *curracl;
60103 + __u32 curr_ip;
60104 + struct task_struct *p, *p2;
60105 +
60106 + if (unlikely(!gr_acl_is_enabled()))
60107 + return;
60108 +
60109 + curracl = task->acl;
60110 + curr_ip = task->signal->curr_ip;
60111 +
60112 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
60113 + read_lock(&tasklist_lock);
60114 + do_each_thread(p2, p) {
60115 + if (p->signal->curr_ip == curr_ip)
60116 + gr_fake_force_sig(SIGKILL, p);
60117 + } while_each_thread(p2, p);
60118 + read_unlock(&tasklist_lock);
60119 + } else if (curracl->mode & GR_KILLPROC)
60120 + gr_fake_force_sig(SIGKILL, task);
60121 +
60122 + return;
60123 +}
60124 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
60125 new file mode 100644
60126 index 0000000..120978a
60127 --- /dev/null
60128 +++ b/grsecurity/gracl_shm.c
60129 @@ -0,0 +1,40 @@
60130 +#include <linux/kernel.h>
60131 +#include <linux/mm.h>
60132 +#include <linux/sched.h>
60133 +#include <linux/file.h>
60134 +#include <linux/ipc.h>
60135 +#include <linux/gracl.h>
60136 +#include <linux/grsecurity.h>
60137 +#include <linux/grinternal.h>
60138 +
60139 +int
60140 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60141 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
60142 +{
60143 + struct task_struct *task;
60144 +
60145 + if (!gr_acl_is_enabled())
60146 + return 1;
60147 +
60148 + rcu_read_lock();
60149 + read_lock(&tasklist_lock);
60150 +
60151 + task = find_task_by_vpid(shm_cprid);
60152 +
60153 + if (unlikely(!task))
60154 + task = find_task_by_vpid(shm_lapid);
60155 +
60156 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
60157 + (task->pid == shm_lapid)) &&
60158 + (task->acl->mode & GR_PROTSHM) &&
60159 + (task->acl != current->acl))) {
60160 + read_unlock(&tasklist_lock);
60161 + rcu_read_unlock();
60162 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
60163 + return 0;
60164 + }
60165 + read_unlock(&tasklist_lock);
60166 + rcu_read_unlock();
60167 +
60168 + return 1;
60169 +}
60170 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
60171 new file mode 100644
60172 index 0000000..bc0be01
60173 --- /dev/null
60174 +++ b/grsecurity/grsec_chdir.c
60175 @@ -0,0 +1,19 @@
60176 +#include <linux/kernel.h>
60177 +#include <linux/sched.h>
60178 +#include <linux/fs.h>
60179 +#include <linux/file.h>
60180 +#include <linux/grsecurity.h>
60181 +#include <linux/grinternal.h>
60182 +
60183 +void
60184 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
60185 +{
60186 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60187 + if ((grsec_enable_chdir && grsec_enable_group &&
60188 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
60189 + !grsec_enable_group)) {
60190 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
60191 + }
60192 +#endif
60193 + return;
60194 +}
60195 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
60196 new file mode 100644
60197 index 0000000..70fe0ae
60198 --- /dev/null
60199 +++ b/grsecurity/grsec_chroot.c
60200 @@ -0,0 +1,357 @@
60201 +#include <linux/kernel.h>
60202 +#include <linux/module.h>
60203 +#include <linux/sched.h>
60204 +#include <linux/file.h>
60205 +#include <linux/fs.h>
60206 +#include <linux/mount.h>
60207 +#include <linux/types.h>
60208 +#include "../fs/mount.h"
60209 +#include <linux/grsecurity.h>
60210 +#include <linux/grinternal.h>
60211 +
60212 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
60213 +{
60214 +#ifdef CONFIG_GRKERNSEC
60215 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
60216 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
60217 + task->gr_is_chrooted = 1;
60218 + else
60219 + task->gr_is_chrooted = 0;
60220 +
60221 + task->gr_chroot_dentry = path->dentry;
60222 +#endif
60223 + return;
60224 +}
60225 +
60226 +void gr_clear_chroot_entries(struct task_struct *task)
60227 +{
60228 +#ifdef CONFIG_GRKERNSEC
60229 + task->gr_is_chrooted = 0;
60230 + task->gr_chroot_dentry = NULL;
60231 +#endif
60232 + return;
60233 +}
60234 +
60235 +int
60236 +gr_handle_chroot_unix(const pid_t pid)
60237 +{
60238 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
60239 + struct task_struct *p;
60240 +
60241 + if (unlikely(!grsec_enable_chroot_unix))
60242 + return 1;
60243 +
60244 + if (likely(!proc_is_chrooted(current)))
60245 + return 1;
60246 +
60247 + rcu_read_lock();
60248 + read_lock(&tasklist_lock);
60249 + p = find_task_by_vpid_unrestricted(pid);
60250 + if (unlikely(p && !have_same_root(current, p))) {
60251 + read_unlock(&tasklist_lock);
60252 + rcu_read_unlock();
60253 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
60254 + return 0;
60255 + }
60256 + read_unlock(&tasklist_lock);
60257 + rcu_read_unlock();
60258 +#endif
60259 + return 1;
60260 +}
60261 +
60262 +int
60263 +gr_handle_chroot_nice(void)
60264 +{
60265 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60266 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
60267 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
60268 + return -EPERM;
60269 + }
60270 +#endif
60271 + return 0;
60272 +}
60273 +
60274 +int
60275 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
60276 +{
60277 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60278 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
60279 + && proc_is_chrooted(current)) {
60280 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
60281 + return -EACCES;
60282 + }
60283 +#endif
60284 + return 0;
60285 +}
60286 +
60287 +int
60288 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
60289 +{
60290 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60291 + struct task_struct *p;
60292 + int ret = 0;
60293 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
60294 + return ret;
60295 +
60296 + read_lock(&tasklist_lock);
60297 + do_each_pid_task(pid, type, p) {
60298 + if (!have_same_root(current, p)) {
60299 + ret = 1;
60300 + goto out;
60301 + }
60302 + } while_each_pid_task(pid, type, p);
60303 +out:
60304 + read_unlock(&tasklist_lock);
60305 + return ret;
60306 +#endif
60307 + return 0;
60308 +}
60309 +
60310 +int
60311 +gr_pid_is_chrooted(struct task_struct *p)
60312 +{
60313 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60314 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
60315 + return 0;
60316 +
60317 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
60318 + !have_same_root(current, p)) {
60319 + return 1;
60320 + }
60321 +#endif
60322 + return 0;
60323 +}
60324 +
60325 +EXPORT_SYMBOL(gr_pid_is_chrooted);
60326 +
60327 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
60328 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
60329 +{
60330 + struct path path, currentroot;
60331 + int ret = 0;
60332 +
60333 + path.dentry = (struct dentry *)u_dentry;
60334 + path.mnt = (struct vfsmount *)u_mnt;
60335 + get_fs_root(current->fs, &currentroot);
60336 + if (path_is_under(&path, &currentroot))
60337 + ret = 1;
60338 + path_put(&currentroot);
60339 +
60340 + return ret;
60341 +}
60342 +#endif
60343 +
60344 +int
60345 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
60346 +{
60347 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
60348 + if (!grsec_enable_chroot_fchdir)
60349 + return 1;
60350 +
60351 + if (!proc_is_chrooted(current))
60352 + return 1;
60353 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
60354 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
60355 + return 0;
60356 + }
60357 +#endif
60358 + return 1;
60359 +}
60360 +
60361 +int
60362 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60363 + const time_t shm_createtime)
60364 +{
60365 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
60366 + struct task_struct *p;
60367 + time_t starttime;
60368 +
60369 + if (unlikely(!grsec_enable_chroot_shmat))
60370 + return 1;
60371 +
60372 + if (likely(!proc_is_chrooted(current)))
60373 + return 1;
60374 +
60375 + rcu_read_lock();
60376 + read_lock(&tasklist_lock);
60377 +
60378 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
60379 + starttime = p->start_time.tv_sec;
60380 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
60381 + if (have_same_root(current, p)) {
60382 + goto allow;
60383 + } else {
60384 + read_unlock(&tasklist_lock);
60385 + rcu_read_unlock();
60386 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
60387 + return 0;
60388 + }
60389 + }
60390 + /* creator exited, pid reuse, fall through to next check */
60391 + }
60392 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
60393 + if (unlikely(!have_same_root(current, p))) {
60394 + read_unlock(&tasklist_lock);
60395 + rcu_read_unlock();
60396 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
60397 + return 0;
60398 + }
60399 + }
60400 +
60401 +allow:
60402 + read_unlock(&tasklist_lock);
60403 + rcu_read_unlock();
60404 +#endif
60405 + return 1;
60406 +}
60407 +
60408 +void
60409 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
60410 +{
60411 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
60412 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
60413 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
60414 +#endif
60415 + return;
60416 +}
60417 +
60418 +int
60419 +gr_handle_chroot_mknod(const struct dentry *dentry,
60420 + const struct vfsmount *mnt, const int mode)
60421 +{
60422 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
60423 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
60424 + proc_is_chrooted(current)) {
60425 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
60426 + return -EPERM;
60427 + }
60428 +#endif
60429 + return 0;
60430 +}
60431 +
60432 +int
60433 +gr_handle_chroot_mount(const struct dentry *dentry,
60434 + const struct vfsmount *mnt, const char *dev_name)
60435 +{
60436 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
60437 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
60438 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
60439 + return -EPERM;
60440 + }
60441 +#endif
60442 + return 0;
60443 +}
60444 +
60445 +int
60446 +gr_handle_chroot_pivot(void)
60447 +{
60448 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
60449 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
60450 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
60451 + return -EPERM;
60452 + }
60453 +#endif
60454 + return 0;
60455 +}
60456 +
60457 +int
60458 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
60459 +{
60460 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
60461 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
60462 + !gr_is_outside_chroot(dentry, mnt)) {
60463 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
60464 + return -EPERM;
60465 + }
60466 +#endif
60467 + return 0;
60468 +}
60469 +
60470 +extern const char *captab_log[];
60471 +extern int captab_log_entries;
60472 +
60473 +int
60474 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
60475 +{
60476 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60477 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
60478 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
60479 + if (cap_raised(chroot_caps, cap)) {
60480 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
60481 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
60482 + }
60483 + return 0;
60484 + }
60485 + }
60486 +#endif
60487 + return 1;
60488 +}
60489 +
60490 +int
60491 +gr_chroot_is_capable(const int cap)
60492 +{
60493 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60494 + return gr_task_chroot_is_capable(current, current_cred(), cap);
60495 +#endif
60496 + return 1;
60497 +}
60498 +
60499 +int
60500 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
60501 +{
60502 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60503 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
60504 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
60505 + if (cap_raised(chroot_caps, cap)) {
60506 + return 0;
60507 + }
60508 + }
60509 +#endif
60510 + return 1;
60511 +}
60512 +
60513 +int
60514 +gr_chroot_is_capable_nolog(const int cap)
60515 +{
60516 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60517 + return gr_task_chroot_is_capable_nolog(current, cap);
60518 +#endif
60519 + return 1;
60520 +}
60521 +
60522 +int
60523 +gr_handle_chroot_sysctl(const int op)
60524 +{
60525 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
60526 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
60527 + proc_is_chrooted(current))
60528 + return -EACCES;
60529 +#endif
60530 + return 0;
60531 +}
60532 +
60533 +void
60534 +gr_handle_chroot_chdir(struct path *path)
60535 +{
60536 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
60537 + if (grsec_enable_chroot_chdir)
60538 + set_fs_pwd(current->fs, path);
60539 +#endif
60540 + return;
60541 +}
60542 +
60543 +int
60544 +gr_handle_chroot_chmod(const struct dentry *dentry,
60545 + const struct vfsmount *mnt, const int mode)
60546 +{
60547 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
60548 + /* allow chmod +s on directories, but not files */
60549 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
60550 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
60551 + proc_is_chrooted(current)) {
60552 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
60553 + return -EPERM;
60554 + }
60555 +#endif
60556 + return 0;
60557 +}
60558 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
60559 new file mode 100644
60560 index 0000000..207d409
60561 --- /dev/null
60562 +++ b/grsecurity/grsec_disabled.c
60563 @@ -0,0 +1,434 @@
60564 +#include <linux/kernel.h>
60565 +#include <linux/module.h>
60566 +#include <linux/sched.h>
60567 +#include <linux/file.h>
60568 +#include <linux/fs.h>
60569 +#include <linux/kdev_t.h>
60570 +#include <linux/net.h>
60571 +#include <linux/in.h>
60572 +#include <linux/ip.h>
60573 +#include <linux/skbuff.h>
60574 +#include <linux/sysctl.h>
60575 +
60576 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60577 +void
60578 +pax_set_initial_flags(struct linux_binprm *bprm)
60579 +{
60580 + return;
60581 +}
60582 +#endif
60583 +
60584 +#ifdef CONFIG_SYSCTL
60585 +__u32
60586 +gr_handle_sysctl(const struct ctl_table * table, const int op)
60587 +{
60588 + return 0;
60589 +}
60590 +#endif
60591 +
60592 +#ifdef CONFIG_TASKSTATS
60593 +int gr_is_taskstats_denied(int pid)
60594 +{
60595 + return 0;
60596 +}
60597 +#endif
60598 +
60599 +int
60600 +gr_acl_is_enabled(void)
60601 +{
60602 + return 0;
60603 +}
60604 +
60605 +void
60606 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
60607 +{
60608 + return;
60609 +}
60610 +
60611 +int
60612 +gr_handle_rawio(const struct inode *inode)
60613 +{
60614 + return 0;
60615 +}
60616 +
60617 +void
60618 +gr_acl_handle_psacct(struct task_struct *task, const long code)
60619 +{
60620 + return;
60621 +}
60622 +
60623 +int
60624 +gr_handle_ptrace(struct task_struct *task, const long request)
60625 +{
60626 + return 0;
60627 +}
60628 +
60629 +int
60630 +gr_handle_proc_ptrace(struct task_struct *task)
60631 +{
60632 + return 0;
60633 +}
60634 +
60635 +int
60636 +gr_set_acls(const int type)
60637 +{
60638 + return 0;
60639 +}
60640 +
60641 +int
60642 +gr_check_hidden_task(const struct task_struct *tsk)
60643 +{
60644 + return 0;
60645 +}
60646 +
60647 +int
60648 +gr_check_protected_task(const struct task_struct *task)
60649 +{
60650 + return 0;
60651 +}
60652 +
60653 +int
60654 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60655 +{
60656 + return 0;
60657 +}
60658 +
60659 +void
60660 +gr_copy_label(struct task_struct *tsk)
60661 +{
60662 + return;
60663 +}
60664 +
60665 +void
60666 +gr_set_pax_flags(struct task_struct *task)
60667 +{
60668 + return;
60669 +}
60670 +
60671 +int
60672 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60673 + const int unsafe_share)
60674 +{
60675 + return 0;
60676 +}
60677 +
60678 +void
60679 +gr_handle_delete(const ino_t ino, const dev_t dev)
60680 +{
60681 + return;
60682 +}
60683 +
60684 +void
60685 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
60686 +{
60687 + return;
60688 +}
60689 +
60690 +void
60691 +gr_handle_crash(struct task_struct *task, const int sig)
60692 +{
60693 + return;
60694 +}
60695 +
60696 +int
60697 +gr_check_crash_exec(const struct file *filp)
60698 +{
60699 + return 0;
60700 +}
60701 +
60702 +int
60703 +gr_check_crash_uid(const kuid_t uid)
60704 +{
60705 + return 0;
60706 +}
60707 +
60708 +void
60709 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60710 + struct dentry *old_dentry,
60711 + struct dentry *new_dentry,
60712 + struct vfsmount *mnt, const __u8 replace)
60713 +{
60714 + return;
60715 +}
60716 +
60717 +int
60718 +gr_search_socket(const int family, const int type, const int protocol)
60719 +{
60720 + return 1;
60721 +}
60722 +
60723 +int
60724 +gr_search_connectbind(const int mode, const struct socket *sock,
60725 + const struct sockaddr_in *addr)
60726 +{
60727 + return 0;
60728 +}
60729 +
60730 +void
60731 +gr_handle_alertkill(struct task_struct *task)
60732 +{
60733 + return;
60734 +}
60735 +
60736 +__u32
60737 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
60738 +{
60739 + return 1;
60740 +}
60741 +
60742 +__u32
60743 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60744 + const struct vfsmount * mnt)
60745 +{
60746 + return 1;
60747 +}
60748 +
60749 +__u32
60750 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60751 + int acc_mode)
60752 +{
60753 + return 1;
60754 +}
60755 +
60756 +__u32
60757 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60758 +{
60759 + return 1;
60760 +}
60761 +
60762 +__u32
60763 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
60764 +{
60765 + return 1;
60766 +}
60767 +
60768 +int
60769 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
60770 + unsigned int *vm_flags)
60771 +{
60772 + return 1;
60773 +}
60774 +
60775 +__u32
60776 +gr_acl_handle_truncate(const struct dentry * dentry,
60777 + const struct vfsmount * mnt)
60778 +{
60779 + return 1;
60780 +}
60781 +
60782 +__u32
60783 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
60784 +{
60785 + return 1;
60786 +}
60787 +
60788 +__u32
60789 +gr_acl_handle_access(const struct dentry * dentry,
60790 + const struct vfsmount * mnt, const int fmode)
60791 +{
60792 + return 1;
60793 +}
60794 +
60795 +__u32
60796 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
60797 + umode_t *mode)
60798 +{
60799 + return 1;
60800 +}
60801 +
60802 +__u32
60803 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
60804 +{
60805 + return 1;
60806 +}
60807 +
60808 +__u32
60809 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
60810 +{
60811 + return 1;
60812 +}
60813 +
60814 +void
60815 +grsecurity_init(void)
60816 +{
60817 + return;
60818 +}
60819 +
60820 +umode_t gr_acl_umask(void)
60821 +{
60822 + return 0;
60823 +}
60824 +
60825 +__u32
60826 +gr_acl_handle_mknod(const struct dentry * new_dentry,
60827 + const struct dentry * parent_dentry,
60828 + const struct vfsmount * parent_mnt,
60829 + const int mode)
60830 +{
60831 + return 1;
60832 +}
60833 +
60834 +__u32
60835 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
60836 + const struct dentry * parent_dentry,
60837 + const struct vfsmount * parent_mnt)
60838 +{
60839 + return 1;
60840 +}
60841 +
60842 +__u32
60843 +gr_acl_handle_symlink(const struct dentry * new_dentry,
60844 + const struct dentry * parent_dentry,
60845 + const struct vfsmount * parent_mnt, const struct filename *from)
60846 +{
60847 + return 1;
60848 +}
60849 +
60850 +__u32
60851 +gr_acl_handle_link(const struct dentry * new_dentry,
60852 + const struct dentry * parent_dentry,
60853 + const struct vfsmount * parent_mnt,
60854 + const struct dentry * old_dentry,
60855 + const struct vfsmount * old_mnt, const struct filename *to)
60856 +{
60857 + return 1;
60858 +}
60859 +
60860 +int
60861 +gr_acl_handle_rename(const struct dentry *new_dentry,
60862 + const struct dentry *parent_dentry,
60863 + const struct vfsmount *parent_mnt,
60864 + const struct dentry *old_dentry,
60865 + const struct inode *old_parent_inode,
60866 + const struct vfsmount *old_mnt, const struct filename *newname)
60867 +{
60868 + return 0;
60869 +}
60870 +
60871 +int
60872 +gr_acl_handle_filldir(const struct file *file, const char *name,
60873 + const int namelen, const ino_t ino)
60874 +{
60875 + return 1;
60876 +}
60877 +
60878 +int
60879 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
60880 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
60881 +{
60882 + return 1;
60883 +}
60884 +
60885 +int
60886 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
60887 +{
60888 + return 0;
60889 +}
60890 +
60891 +int
60892 +gr_search_accept(const struct socket *sock)
60893 +{
60894 + return 0;
60895 +}
60896 +
60897 +int
60898 +gr_search_listen(const struct socket *sock)
60899 +{
60900 + return 0;
60901 +}
60902 +
60903 +int
60904 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
60905 +{
60906 + return 0;
60907 +}
60908 +
60909 +__u32
60910 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
60911 +{
60912 + return 1;
60913 +}
60914 +
60915 +__u32
60916 +gr_acl_handle_creat(const struct dentry * dentry,
60917 + const struct dentry * p_dentry,
60918 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60919 + const int imode)
60920 +{
60921 + return 1;
60922 +}
60923 +
60924 +void
60925 +gr_acl_handle_exit(void)
60926 +{
60927 + return;
60928 +}
60929 +
60930 +int
60931 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60932 +{
60933 + return 1;
60934 +}
60935 +
60936 +void
60937 +gr_set_role_label(const kuid_t uid, const kgid_t gid)
60938 +{
60939 + return;
60940 +}
60941 +
60942 +int
60943 +gr_acl_handle_procpidmem(const struct task_struct *task)
60944 +{
60945 + return 0;
60946 +}
60947 +
60948 +int
60949 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
60950 +{
60951 + return 0;
60952 +}
60953 +
60954 +int
60955 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
60956 +{
60957 + return 0;
60958 +}
60959 +
60960 +void
60961 +gr_set_kernel_label(struct task_struct *task)
60962 +{
60963 + return;
60964 +}
60965 +
60966 +int
60967 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60968 +{
60969 + return 0;
60970 +}
60971 +
60972 +int
60973 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60974 +{
60975 + return 0;
60976 +}
60977 +
60978 +int gr_acl_enable_at_secure(void)
60979 +{
60980 + return 0;
60981 +}
60982 +
60983 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
60984 +{
60985 + return dentry->d_inode->i_sb->s_dev;
60986 +}
60987 +
60988 +void gr_put_exec_file(struct task_struct *task)
60989 +{
60990 + return;
60991 +}
60992 +
60993 +EXPORT_SYMBOL(gr_set_kernel_label);
60994 +#ifdef CONFIG_SECURITY
60995 +EXPORT_SYMBOL(gr_check_user_change);
60996 +EXPORT_SYMBOL(gr_check_group_change);
60997 +#endif
60998 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
60999 new file mode 100644
61000 index 0000000..abfa971
61001 --- /dev/null
61002 +++ b/grsecurity/grsec_exec.c
61003 @@ -0,0 +1,174 @@
61004 +#include <linux/kernel.h>
61005 +#include <linux/sched.h>
61006 +#include <linux/file.h>
61007 +#include <linux/binfmts.h>
61008 +#include <linux/fs.h>
61009 +#include <linux/types.h>
61010 +#include <linux/grdefs.h>
61011 +#include <linux/grsecurity.h>
61012 +#include <linux/grinternal.h>
61013 +#include <linux/capability.h>
61014 +#include <linux/module.h>
61015 +
61016 +#include <asm/uaccess.h>
61017 +
61018 +#ifdef CONFIG_GRKERNSEC_EXECLOG
61019 +static char gr_exec_arg_buf[132];
61020 +static DEFINE_MUTEX(gr_exec_arg_mutex);
61021 +#endif
61022 +
61023 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
61024 +
61025 +void
61026 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
61027 +{
61028 +#ifdef CONFIG_GRKERNSEC_EXECLOG
61029 + char *grarg = gr_exec_arg_buf;
61030 + unsigned int i, x, execlen = 0;
61031 + char c;
61032 +
61033 + if (!((grsec_enable_execlog && grsec_enable_group &&
61034 + in_group_p(grsec_audit_gid))
61035 + || (grsec_enable_execlog && !grsec_enable_group)))
61036 + return;
61037 +
61038 + mutex_lock(&gr_exec_arg_mutex);
61039 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
61040 +
61041 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
61042 + const char __user *p;
61043 + unsigned int len;
61044 +
61045 + p = get_user_arg_ptr(argv, i);
61046 + if (IS_ERR(p))
61047 + goto log;
61048 +
61049 + len = strnlen_user(p, 128 - execlen);
61050 + if (len > 128 - execlen)
61051 + len = 128 - execlen;
61052 + else if (len > 0)
61053 + len--;
61054 + if (copy_from_user(grarg + execlen, p, len))
61055 + goto log;
61056 +
61057 + /* rewrite unprintable characters */
61058 + for (x = 0; x < len; x++) {
61059 + c = *(grarg + execlen + x);
61060 + if (c < 32 || c > 126)
61061 + *(grarg + execlen + x) = ' ';
61062 + }
61063 +
61064 + execlen += len;
61065 + *(grarg + execlen) = ' ';
61066 + *(grarg + execlen + 1) = '\0';
61067 + execlen++;
61068 + }
61069 +
61070 + log:
61071 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
61072 + bprm->file->f_path.mnt, grarg);
61073 + mutex_unlock(&gr_exec_arg_mutex);
61074 +#endif
61075 + return;
61076 +}
61077 +
61078 +#ifdef CONFIG_GRKERNSEC
61079 +extern int gr_acl_is_capable(const int cap);
61080 +extern int gr_acl_is_capable_nolog(const int cap);
61081 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61082 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
61083 +extern int gr_chroot_is_capable(const int cap);
61084 +extern int gr_chroot_is_capable_nolog(const int cap);
61085 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61086 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
61087 +#endif
61088 +
61089 +const char *captab_log[] = {
61090 + "CAP_CHOWN",
61091 + "CAP_DAC_OVERRIDE",
61092 + "CAP_DAC_READ_SEARCH",
61093 + "CAP_FOWNER",
61094 + "CAP_FSETID",
61095 + "CAP_KILL",
61096 + "CAP_SETGID",
61097 + "CAP_SETUID",
61098 + "CAP_SETPCAP",
61099 + "CAP_LINUX_IMMUTABLE",
61100 + "CAP_NET_BIND_SERVICE",
61101 + "CAP_NET_BROADCAST",
61102 + "CAP_NET_ADMIN",
61103 + "CAP_NET_RAW",
61104 + "CAP_IPC_LOCK",
61105 + "CAP_IPC_OWNER",
61106 + "CAP_SYS_MODULE",
61107 + "CAP_SYS_RAWIO",
61108 + "CAP_SYS_CHROOT",
61109 + "CAP_SYS_PTRACE",
61110 + "CAP_SYS_PACCT",
61111 + "CAP_SYS_ADMIN",
61112 + "CAP_SYS_BOOT",
61113 + "CAP_SYS_NICE",
61114 + "CAP_SYS_RESOURCE",
61115 + "CAP_SYS_TIME",
61116 + "CAP_SYS_TTY_CONFIG",
61117 + "CAP_MKNOD",
61118 + "CAP_LEASE",
61119 + "CAP_AUDIT_WRITE",
61120 + "CAP_AUDIT_CONTROL",
61121 + "CAP_SETFCAP",
61122 + "CAP_MAC_OVERRIDE",
61123 + "CAP_MAC_ADMIN",
61124 + "CAP_SYSLOG",
61125 + "CAP_WAKE_ALARM"
61126 +};
61127 +
61128 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
61129 +
61130 +int gr_is_capable(const int cap)
61131 +{
61132 +#ifdef CONFIG_GRKERNSEC
61133 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
61134 + return 1;
61135 + return 0;
61136 +#else
61137 + return 1;
61138 +#endif
61139 +}
61140 +
61141 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
61142 +{
61143 +#ifdef CONFIG_GRKERNSEC
61144 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
61145 + return 1;
61146 + return 0;
61147 +#else
61148 + return 1;
61149 +#endif
61150 +}
61151 +
61152 +int gr_is_capable_nolog(const int cap)
61153 +{
61154 +#ifdef CONFIG_GRKERNSEC
61155 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
61156 + return 1;
61157 + return 0;
61158 +#else
61159 + return 1;
61160 +#endif
61161 +}
61162 +
61163 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
61164 +{
61165 +#ifdef CONFIG_GRKERNSEC
61166 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
61167 + return 1;
61168 + return 0;
61169 +#else
61170 + return 1;
61171 +#endif
61172 +}
61173 +
61174 +EXPORT_SYMBOL(gr_is_capable);
61175 +EXPORT_SYMBOL(gr_is_capable_nolog);
61176 +EXPORT_SYMBOL(gr_task_is_capable);
61177 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
61178 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
61179 new file mode 100644
61180 index 0000000..06cc6ea
61181 --- /dev/null
61182 +++ b/grsecurity/grsec_fifo.c
61183 @@ -0,0 +1,24 @@
61184 +#include <linux/kernel.h>
61185 +#include <linux/sched.h>
61186 +#include <linux/fs.h>
61187 +#include <linux/file.h>
61188 +#include <linux/grinternal.h>
61189 +
61190 +int
61191 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
61192 + const struct dentry *dir, const int flag, const int acc_mode)
61193 +{
61194 +#ifdef CONFIG_GRKERNSEC_FIFO
61195 + const struct cred *cred = current_cred();
61196 +
61197 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
61198 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
61199 + !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
61200 + !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
61201 + if (!inode_permission(dentry->d_inode, acc_mode))
61202 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
61203 + return -EACCES;
61204 + }
61205 +#endif
61206 + return 0;
61207 +}
61208 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
61209 new file mode 100644
61210 index 0000000..8ca18bf
61211 --- /dev/null
61212 +++ b/grsecurity/grsec_fork.c
61213 @@ -0,0 +1,23 @@
61214 +#include <linux/kernel.h>
61215 +#include <linux/sched.h>
61216 +#include <linux/grsecurity.h>
61217 +#include <linux/grinternal.h>
61218 +#include <linux/errno.h>
61219 +
61220 +void
61221 +gr_log_forkfail(const int retval)
61222 +{
61223 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
61224 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
61225 + switch (retval) {
61226 + case -EAGAIN:
61227 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
61228 + break;
61229 + case -ENOMEM:
61230 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
61231 + break;
61232 + }
61233 + }
61234 +#endif
61235 + return;
61236 +}
61237 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
61238 new file mode 100644
61239 index 0000000..a862e9f
61240 --- /dev/null
61241 +++ b/grsecurity/grsec_init.c
61242 @@ -0,0 +1,283 @@
61243 +#include <linux/kernel.h>
61244 +#include <linux/sched.h>
61245 +#include <linux/mm.h>
61246 +#include <linux/gracl.h>
61247 +#include <linux/slab.h>
61248 +#include <linux/vmalloc.h>
61249 +#include <linux/percpu.h>
61250 +#include <linux/module.h>
61251 +
61252 +int grsec_enable_ptrace_readexec;
61253 +int grsec_enable_setxid;
61254 +int grsec_enable_symlinkown;
61255 +kgid_t grsec_symlinkown_gid;
61256 +int grsec_enable_brute;
61257 +int grsec_enable_link;
61258 +int grsec_enable_dmesg;
61259 +int grsec_enable_harden_ptrace;
61260 +int grsec_enable_fifo;
61261 +int grsec_enable_execlog;
61262 +int grsec_enable_signal;
61263 +int grsec_enable_forkfail;
61264 +int grsec_enable_audit_ptrace;
61265 +int grsec_enable_time;
61266 +int grsec_enable_audit_textrel;
61267 +int grsec_enable_group;
61268 +kgid_t grsec_audit_gid;
61269 +int grsec_enable_chdir;
61270 +int grsec_enable_mount;
61271 +int grsec_enable_rofs;
61272 +int grsec_enable_chroot_findtask;
61273 +int grsec_enable_chroot_mount;
61274 +int grsec_enable_chroot_shmat;
61275 +int grsec_enable_chroot_fchdir;
61276 +int grsec_enable_chroot_double;
61277 +int grsec_enable_chroot_pivot;
61278 +int grsec_enable_chroot_chdir;
61279 +int grsec_enable_chroot_chmod;
61280 +int grsec_enable_chroot_mknod;
61281 +int grsec_enable_chroot_nice;
61282 +int grsec_enable_chroot_execlog;
61283 +int grsec_enable_chroot_caps;
61284 +int grsec_enable_chroot_sysctl;
61285 +int grsec_enable_chroot_unix;
61286 +int grsec_enable_tpe;
61287 +kgid_t grsec_tpe_gid;
61288 +int grsec_enable_blackhole;
61289 +#ifdef CONFIG_IPV6_MODULE
61290 +EXPORT_SYMBOL(grsec_enable_blackhole);
61291 +#endif
61292 +int grsec_lastack_retries;
61293 +int grsec_enable_tpe_all;
61294 +int grsec_enable_tpe_invert;
61295 +int grsec_enable_socket_all;
61296 +kgid_t grsec_socket_all_gid;
61297 +int grsec_enable_socket_client;
61298 +kgid_t grsec_socket_client_gid;
61299 +int grsec_enable_socket_server;
61300 +kgid_t grsec_socket_server_gid;
61301 +int grsec_resource_logging;
61302 +int grsec_disable_privio;
61303 +int grsec_enable_log_rwxmaps;
61304 +int grsec_lock;
61305 +
61306 +DEFINE_SPINLOCK(grsec_alert_lock);
61307 +unsigned long grsec_alert_wtime = 0;
61308 +unsigned long grsec_alert_fyet = 0;
61309 +
61310 +DEFINE_SPINLOCK(grsec_audit_lock);
61311 +
61312 +DEFINE_RWLOCK(grsec_exec_file_lock);
61313 +
61314 +char *gr_shared_page[4];
61315 +
61316 +char *gr_alert_log_fmt;
61317 +char *gr_audit_log_fmt;
61318 +char *gr_alert_log_buf;
61319 +char *gr_audit_log_buf;
61320 +
61321 +extern struct gr_arg *gr_usermode;
61322 +extern unsigned char *gr_system_salt;
61323 +extern unsigned char *gr_system_sum;
61324 +
61325 +void __init
61326 +grsecurity_init(void)
61327 +{
61328 + int j;
61329 + /* create the per-cpu shared pages */
61330 +
61331 +#ifdef CONFIG_X86
61332 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
61333 +#endif
61334 +
61335 + for (j = 0; j < 4; j++) {
61336 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
61337 + if (gr_shared_page[j] == NULL) {
61338 + panic("Unable to allocate grsecurity shared page");
61339 + return;
61340 + }
61341 + }
61342 +
61343 + /* allocate log buffers */
61344 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
61345 + if (!gr_alert_log_fmt) {
61346 + panic("Unable to allocate grsecurity alert log format buffer");
61347 + return;
61348 + }
61349 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
61350 + if (!gr_audit_log_fmt) {
61351 + panic("Unable to allocate grsecurity audit log format buffer");
61352 + return;
61353 + }
61354 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
61355 + if (!gr_alert_log_buf) {
61356 + panic("Unable to allocate grsecurity alert log buffer");
61357 + return;
61358 + }
61359 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
61360 + if (!gr_audit_log_buf) {
61361 + panic("Unable to allocate grsecurity audit log buffer");
61362 + return;
61363 + }
61364 +
61365 + /* allocate memory for authentication structure */
61366 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
61367 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
61368 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
61369 +
61370 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
61371 + panic("Unable to allocate grsecurity authentication structure");
61372 + return;
61373 + }
61374 +
61375 +
61376 +#ifdef CONFIG_GRKERNSEC_IO
61377 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
61378 + grsec_disable_privio = 1;
61379 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
61380 + grsec_disable_privio = 1;
61381 +#else
61382 + grsec_disable_privio = 0;
61383 +#endif
61384 +#endif
61385 +
61386 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61387 + /* for backward compatibility, tpe_invert always defaults to on if
61388 + enabled in the kernel
61389 + */
61390 + grsec_enable_tpe_invert = 1;
61391 +#endif
61392 +
61393 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
61394 +#ifndef CONFIG_GRKERNSEC_SYSCTL
61395 + grsec_lock = 1;
61396 +#endif
61397 +
61398 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
61399 + grsec_enable_audit_textrel = 1;
61400 +#endif
61401 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61402 + grsec_enable_log_rwxmaps = 1;
61403 +#endif
61404 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
61405 + grsec_enable_group = 1;
61406 + grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
61407 +#endif
61408 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
61409 + grsec_enable_ptrace_readexec = 1;
61410 +#endif
61411 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61412 + grsec_enable_chdir = 1;
61413 +#endif
61414 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61415 + grsec_enable_harden_ptrace = 1;
61416 +#endif
61417 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61418 + grsec_enable_mount = 1;
61419 +#endif
61420 +#ifdef CONFIG_GRKERNSEC_LINK
61421 + grsec_enable_link = 1;
61422 +#endif
61423 +#ifdef CONFIG_GRKERNSEC_BRUTE
61424 + grsec_enable_brute = 1;
61425 +#endif
61426 +#ifdef CONFIG_GRKERNSEC_DMESG
61427 + grsec_enable_dmesg = 1;
61428 +#endif
61429 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
61430 + grsec_enable_blackhole = 1;
61431 + grsec_lastack_retries = 4;
61432 +#endif
61433 +#ifdef CONFIG_GRKERNSEC_FIFO
61434 + grsec_enable_fifo = 1;
61435 +#endif
61436 +#ifdef CONFIG_GRKERNSEC_EXECLOG
61437 + grsec_enable_execlog = 1;
61438 +#endif
61439 +#ifdef CONFIG_GRKERNSEC_SETXID
61440 + grsec_enable_setxid = 1;
61441 +#endif
61442 +#ifdef CONFIG_GRKERNSEC_SIGNAL
61443 + grsec_enable_signal = 1;
61444 +#endif
61445 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
61446 + grsec_enable_forkfail = 1;
61447 +#endif
61448 +#ifdef CONFIG_GRKERNSEC_TIME
61449 + grsec_enable_time = 1;
61450 +#endif
61451 +#ifdef CONFIG_GRKERNSEC_RESLOG
61452 + grsec_resource_logging = 1;
61453 +#endif
61454 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61455 + grsec_enable_chroot_findtask = 1;
61456 +#endif
61457 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61458 + grsec_enable_chroot_unix = 1;
61459 +#endif
61460 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61461 + grsec_enable_chroot_mount = 1;
61462 +#endif
61463 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61464 + grsec_enable_chroot_fchdir = 1;
61465 +#endif
61466 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61467 + grsec_enable_chroot_shmat = 1;
61468 +#endif
61469 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
61470 + grsec_enable_audit_ptrace = 1;
61471 +#endif
61472 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61473 + grsec_enable_chroot_double = 1;
61474 +#endif
61475 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61476 + grsec_enable_chroot_pivot = 1;
61477 +#endif
61478 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61479 + grsec_enable_chroot_chdir = 1;
61480 +#endif
61481 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61482 + grsec_enable_chroot_chmod = 1;
61483 +#endif
61484 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61485 + grsec_enable_chroot_mknod = 1;
61486 +#endif
61487 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61488 + grsec_enable_chroot_nice = 1;
61489 +#endif
61490 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61491 + grsec_enable_chroot_execlog = 1;
61492 +#endif
61493 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61494 + grsec_enable_chroot_caps = 1;
61495 +#endif
61496 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61497 + grsec_enable_chroot_sysctl = 1;
61498 +#endif
61499 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
61500 + grsec_enable_symlinkown = 1;
61501 + grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
61502 +#endif
61503 +#ifdef CONFIG_GRKERNSEC_TPE
61504 + grsec_enable_tpe = 1;
61505 + grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
61506 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
61507 + grsec_enable_tpe_all = 1;
61508 +#endif
61509 +#endif
61510 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61511 + grsec_enable_socket_all = 1;
61512 + grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
61513 +#endif
61514 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61515 + grsec_enable_socket_client = 1;
61516 + grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
61517 +#endif
61518 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61519 + grsec_enable_socket_server = 1;
61520 + grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
61521 +#endif
61522 +#endif
61523 +
61524 + return;
61525 +}
61526 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
61527 new file mode 100644
61528 index 0000000..6095407
61529 --- /dev/null
61530 +++ b/grsecurity/grsec_link.c
61531 @@ -0,0 +1,58 @@
61532 +#include <linux/kernel.h>
61533 +#include <linux/sched.h>
61534 +#include <linux/fs.h>
61535 +#include <linux/file.h>
61536 +#include <linux/grinternal.h>
61537 +
61538 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
61539 +{
61540 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
61541 + const struct inode *link_inode = link->dentry->d_inode;
61542 +
61543 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
61544 + /* ignore root-owned links, e.g. /proc/self */
61545 + !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
61546 + !uid_eq(link_inode->i_uid, target->i_uid)) {
61547 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
61548 + return 1;
61549 + }
61550 +#endif
61551 + return 0;
61552 +}
61553 +
61554 +int
61555 +gr_handle_follow_link(const struct inode *parent,
61556 + const struct inode *inode,
61557 + const struct dentry *dentry, const struct vfsmount *mnt)
61558 +{
61559 +#ifdef CONFIG_GRKERNSEC_LINK
61560 + const struct cred *cred = current_cred();
61561 +
61562 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
61563 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
61564 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
61565 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
61566 + return -EACCES;
61567 + }
61568 +#endif
61569 + return 0;
61570 +}
61571 +
61572 +int
61573 +gr_handle_hardlink(const struct dentry *dentry,
61574 + const struct vfsmount *mnt,
61575 + struct inode *inode, const int mode, const struct filename *to)
61576 +{
61577 +#ifdef CONFIG_GRKERNSEC_LINK
61578 + const struct cred *cred = current_cred();
61579 +
61580 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
61581 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
61582 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
61583 + !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
61584 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
61585 + return -EPERM;
61586 + }
61587 +#endif
61588 + return 0;
61589 +}
61590 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
61591 new file mode 100644
61592 index 0000000..7c06085
61593 --- /dev/null
61594 +++ b/grsecurity/grsec_log.c
61595 @@ -0,0 +1,326 @@
61596 +#include <linux/kernel.h>
61597 +#include <linux/sched.h>
61598 +#include <linux/file.h>
61599 +#include <linux/tty.h>
61600 +#include <linux/fs.h>
61601 +#include <linux/grinternal.h>
61602 +
61603 +#ifdef CONFIG_TREE_PREEMPT_RCU
61604 +#define DISABLE_PREEMPT() preempt_disable()
61605 +#define ENABLE_PREEMPT() preempt_enable()
61606 +#else
61607 +#define DISABLE_PREEMPT()
61608 +#define ENABLE_PREEMPT()
61609 +#endif
61610 +
61611 +#define BEGIN_LOCKS(x) \
61612 + DISABLE_PREEMPT(); \
61613 + rcu_read_lock(); \
61614 + read_lock(&tasklist_lock); \
61615 + read_lock(&grsec_exec_file_lock); \
61616 + if (x != GR_DO_AUDIT) \
61617 + spin_lock(&grsec_alert_lock); \
61618 + else \
61619 + spin_lock(&grsec_audit_lock)
61620 +
61621 +#define END_LOCKS(x) \
61622 + if (x != GR_DO_AUDIT) \
61623 + spin_unlock(&grsec_alert_lock); \
61624 + else \
61625 + spin_unlock(&grsec_audit_lock); \
61626 + read_unlock(&grsec_exec_file_lock); \
61627 + read_unlock(&tasklist_lock); \
61628 + rcu_read_unlock(); \
61629 + ENABLE_PREEMPT(); \
61630 + if (x == GR_DONT_AUDIT) \
61631 + gr_handle_alertkill(current)
61632 +
61633 +enum {
61634 + FLOODING,
61635 + NO_FLOODING
61636 +};
61637 +
61638 +extern char *gr_alert_log_fmt;
61639 +extern char *gr_audit_log_fmt;
61640 +extern char *gr_alert_log_buf;
61641 +extern char *gr_audit_log_buf;
61642 +
61643 +static int gr_log_start(int audit)
61644 +{
61645 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
61646 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
61647 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
61648 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
61649 + unsigned long curr_secs = get_seconds();
61650 +
61651 + if (audit == GR_DO_AUDIT)
61652 + goto set_fmt;
61653 +
61654 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
61655 + grsec_alert_wtime = curr_secs;
61656 + grsec_alert_fyet = 0;
61657 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
61658 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
61659 + grsec_alert_fyet++;
61660 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
61661 + grsec_alert_wtime = curr_secs;
61662 + grsec_alert_fyet++;
61663 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
61664 + return FLOODING;
61665 + }
61666 + else return FLOODING;
61667 +
61668 +set_fmt:
61669 +#endif
61670 + memset(buf, 0, PAGE_SIZE);
61671 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
61672 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
61673 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
61674 + } else if (current->signal->curr_ip) {
61675 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
61676 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
61677 + } else if (gr_acl_is_enabled()) {
61678 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
61679 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
61680 + } else {
61681 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
61682 + strcpy(buf, fmt);
61683 + }
61684 +
61685 + return NO_FLOODING;
61686 +}
61687 +
61688 +static void gr_log_middle(int audit, const char *msg, va_list ap)
61689 + __attribute__ ((format (printf, 2, 0)));
61690 +
61691 +static void gr_log_middle(int audit, const char *msg, va_list ap)
61692 +{
61693 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
61694 + unsigned int len = strlen(buf);
61695 +
61696 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
61697 +
61698 + return;
61699 +}
61700 +
61701 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
61702 + __attribute__ ((format (printf, 2, 3)));
61703 +
61704 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
61705 +{
61706 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
61707 + unsigned int len = strlen(buf);
61708 + va_list ap;
61709 +
61710 + va_start(ap, msg);
61711 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
61712 + va_end(ap);
61713 +
61714 + return;
61715 +}
61716 +
61717 +static void gr_log_end(int audit, int append_default)
61718 +{
61719 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
61720 + if (append_default) {
61721 + struct task_struct *task = current;
61722 + struct task_struct *parent = task->real_parent;
61723 + const struct cred *cred = __task_cred(task);
61724 + const struct cred *pcred = __task_cred(parent);
61725 + unsigned int len = strlen(buf);
61726 +
61727 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
61728 + }
61729 +
61730 + printk("%s\n", buf);
61731 +
61732 + return;
61733 +}
61734 +
61735 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
61736 +{
61737 + int logtype;
61738 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
61739 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
61740 + void *voidptr = NULL;
61741 + int num1 = 0, num2 = 0;
61742 + unsigned long ulong1 = 0, ulong2 = 0;
61743 + struct dentry *dentry = NULL;
61744 + struct vfsmount *mnt = NULL;
61745 + struct file *file = NULL;
61746 + struct task_struct *task = NULL;
61747 + const struct cred *cred, *pcred;
61748 + va_list ap;
61749 +
61750 + BEGIN_LOCKS(audit);
61751 + logtype = gr_log_start(audit);
61752 + if (logtype == FLOODING) {
61753 + END_LOCKS(audit);
61754 + return;
61755 + }
61756 + va_start(ap, argtypes);
61757 + switch (argtypes) {
61758 + case GR_TTYSNIFF:
61759 + task = va_arg(ap, struct task_struct *);
61760 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
61761 + break;
61762 + case GR_SYSCTL_HIDDEN:
61763 + str1 = va_arg(ap, char *);
61764 + gr_log_middle_varargs(audit, msg, result, str1);
61765 + break;
61766 + case GR_RBAC:
61767 + dentry = va_arg(ap, struct dentry *);
61768 + mnt = va_arg(ap, struct vfsmount *);
61769 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
61770 + break;
61771 + case GR_RBAC_STR:
61772 + dentry = va_arg(ap, struct dentry *);
61773 + mnt = va_arg(ap, struct vfsmount *);
61774 + str1 = va_arg(ap, char *);
61775 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
61776 + break;
61777 + case GR_STR_RBAC:
61778 + str1 = va_arg(ap, char *);
61779 + dentry = va_arg(ap, struct dentry *);
61780 + mnt = va_arg(ap, struct vfsmount *);
61781 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
61782 + break;
61783 + case GR_RBAC_MODE2:
61784 + dentry = va_arg(ap, struct dentry *);
61785 + mnt = va_arg(ap, struct vfsmount *);
61786 + str1 = va_arg(ap, char *);
61787 + str2 = va_arg(ap, char *);
61788 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
61789 + break;
61790 + case GR_RBAC_MODE3:
61791 + dentry = va_arg(ap, struct dentry *);
61792 + mnt = va_arg(ap, struct vfsmount *);
61793 + str1 = va_arg(ap, char *);
61794 + str2 = va_arg(ap, char *);
61795 + str3 = va_arg(ap, char *);
61796 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
61797 + break;
61798 + case GR_FILENAME:
61799 + dentry = va_arg(ap, struct dentry *);
61800 + mnt = va_arg(ap, struct vfsmount *);
61801 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
61802 + break;
61803 + case GR_STR_FILENAME:
61804 + str1 = va_arg(ap, char *);
61805 + dentry = va_arg(ap, struct dentry *);
61806 + mnt = va_arg(ap, struct vfsmount *);
61807 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
61808 + break;
61809 + case GR_FILENAME_STR:
61810 + dentry = va_arg(ap, struct dentry *);
61811 + mnt = va_arg(ap, struct vfsmount *);
61812 + str1 = va_arg(ap, char *);
61813 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
61814 + break;
61815 + case GR_FILENAME_TWO_INT:
61816 + dentry = va_arg(ap, struct dentry *);
61817 + mnt = va_arg(ap, struct vfsmount *);
61818 + num1 = va_arg(ap, int);
61819 + num2 = va_arg(ap, int);
61820 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
61821 + break;
61822 + case GR_FILENAME_TWO_INT_STR:
61823 + dentry = va_arg(ap, struct dentry *);
61824 + mnt = va_arg(ap, struct vfsmount *);
61825 + num1 = va_arg(ap, int);
61826 + num2 = va_arg(ap, int);
61827 + str1 = va_arg(ap, char *);
61828 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
61829 + break;
61830 + case GR_TEXTREL:
61831 + file = va_arg(ap, struct file *);
61832 + ulong1 = va_arg(ap, unsigned long);
61833 + ulong2 = va_arg(ap, unsigned long);
61834 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
61835 + break;
61836 + case GR_PTRACE:
61837 + task = va_arg(ap, struct task_struct *);
61838 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
61839 + break;
61840 + case GR_RESOURCE:
61841 + task = va_arg(ap, struct task_struct *);
61842 + cred = __task_cred(task);
61843 + pcred = __task_cred(task->real_parent);
61844 + ulong1 = va_arg(ap, unsigned long);
61845 + str1 = va_arg(ap, char *);
61846 + ulong2 = va_arg(ap, unsigned long);
61847 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
61848 + break;
61849 + case GR_CAP:
61850 + task = va_arg(ap, struct task_struct *);
61851 + cred = __task_cred(task);
61852 + pcred = __task_cred(task->real_parent);
61853 + str1 = va_arg(ap, char *);
61854 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
61855 + break;
61856 + case GR_SIG:
61857 + str1 = va_arg(ap, char *);
61858 + voidptr = va_arg(ap, void *);
61859 + gr_log_middle_varargs(audit, msg, str1, voidptr);
61860 + break;
61861 + case GR_SIG2:
61862 + task = va_arg(ap, struct task_struct *);
61863 + cred = __task_cred(task);
61864 + pcred = __task_cred(task->real_parent);
61865 + num1 = va_arg(ap, int);
61866 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
61867 + break;
61868 + case GR_CRASH1:
61869 + task = va_arg(ap, struct task_struct *);
61870 + cred = __task_cred(task);
61871 + pcred = __task_cred(task->real_parent);
61872 + ulong1 = va_arg(ap, unsigned long);
61873 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
61874 + break;
61875 + case GR_CRASH2:
61876 + task = va_arg(ap, struct task_struct *);
61877 + cred = __task_cred(task);
61878 + pcred = __task_cred(task->real_parent);
61879 + ulong1 = va_arg(ap, unsigned long);
61880 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
61881 + break;
61882 + case GR_RWXMAP:
61883 + file = va_arg(ap, struct file *);
61884 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
61885 + break;
61886 + case GR_PSACCT:
61887 + {
61888 + unsigned int wday, cday;
61889 + __u8 whr, chr;
61890 + __u8 wmin, cmin;
61891 + __u8 wsec, csec;
61892 + char cur_tty[64] = { 0 };
61893 + char parent_tty[64] = { 0 };
61894 +
61895 + task = va_arg(ap, struct task_struct *);
61896 + wday = va_arg(ap, unsigned int);
61897 + cday = va_arg(ap, unsigned int);
61898 + whr = va_arg(ap, int);
61899 + chr = va_arg(ap, int);
61900 + wmin = va_arg(ap, int);
61901 + cmin = va_arg(ap, int);
61902 + wsec = va_arg(ap, int);
61903 + csec = va_arg(ap, int);
61904 + ulong1 = va_arg(ap, unsigned long);
61905 + cred = __task_cred(task);
61906 + pcred = __task_cred(task->real_parent);
61907 +
61908 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
61909 + }
61910 + break;
61911 + default:
61912 + gr_log_middle(audit, msg, ap);
61913 + }
61914 + va_end(ap);
61915 + // these don't need DEFAULTSECARGS printed on the end
61916 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
61917 + gr_log_end(audit, 0);
61918 + else
61919 + gr_log_end(audit, 1);
61920 + END_LOCKS(audit);
61921 +}
61922 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
61923 new file mode 100644
61924 index 0000000..f536303
61925 --- /dev/null
61926 +++ b/grsecurity/grsec_mem.c
61927 @@ -0,0 +1,40 @@
61928 +#include <linux/kernel.h>
61929 +#include <linux/sched.h>
61930 +#include <linux/mm.h>
61931 +#include <linux/mman.h>
61932 +#include <linux/grinternal.h>
61933 +
61934 +void
61935 +gr_handle_ioperm(void)
61936 +{
61937 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
61938 + return;
61939 +}
61940 +
61941 +void
61942 +gr_handle_iopl(void)
61943 +{
61944 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
61945 + return;
61946 +}
61947 +
61948 +void
61949 +gr_handle_mem_readwrite(u64 from, u64 to)
61950 +{
61951 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
61952 + return;
61953 +}
61954 +
61955 +void
61956 +gr_handle_vm86(void)
61957 +{
61958 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
61959 + return;
61960 +}
61961 +
61962 +void
61963 +gr_log_badprocpid(const char *entry)
61964 +{
61965 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
61966 + return;
61967 +}
61968 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
61969 new file mode 100644
61970 index 0000000..2131422
61971 --- /dev/null
61972 +++ b/grsecurity/grsec_mount.c
61973 @@ -0,0 +1,62 @@
61974 +#include <linux/kernel.h>
61975 +#include <linux/sched.h>
61976 +#include <linux/mount.h>
61977 +#include <linux/grsecurity.h>
61978 +#include <linux/grinternal.h>
61979 +
61980 +void
61981 +gr_log_remount(const char *devname, const int retval)
61982 +{
61983 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61984 + if (grsec_enable_mount && (retval >= 0))
61985 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
61986 +#endif
61987 + return;
61988 +}
61989 +
61990 +void
61991 +gr_log_unmount(const char *devname, const int retval)
61992 +{
61993 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61994 + if (grsec_enable_mount && (retval >= 0))
61995 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
61996 +#endif
61997 + return;
61998 +}
61999 +
62000 +void
62001 +gr_log_mount(const char *from, const char *to, const int retval)
62002 +{
62003 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62004 + if (grsec_enable_mount && (retval >= 0))
62005 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
62006 +#endif
62007 + return;
62008 +}
62009 +
62010 +int
62011 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
62012 +{
62013 +#ifdef CONFIG_GRKERNSEC_ROFS
62014 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
62015 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
62016 + return -EPERM;
62017 + } else
62018 + return 0;
62019 +#endif
62020 + return 0;
62021 +}
62022 +
62023 +int
62024 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
62025 +{
62026 +#ifdef CONFIG_GRKERNSEC_ROFS
62027 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
62028 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
62029 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
62030 + return -EPERM;
62031 + } else
62032 + return 0;
62033 +#endif
62034 + return 0;
62035 +}
62036 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
62037 new file mode 100644
62038 index 0000000..a3b12a0
62039 --- /dev/null
62040 +++ b/grsecurity/grsec_pax.c
62041 @@ -0,0 +1,36 @@
62042 +#include <linux/kernel.h>
62043 +#include <linux/sched.h>
62044 +#include <linux/mm.h>
62045 +#include <linux/file.h>
62046 +#include <linux/grinternal.h>
62047 +#include <linux/grsecurity.h>
62048 +
62049 +void
62050 +gr_log_textrel(struct vm_area_struct * vma)
62051 +{
62052 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62053 + if (grsec_enable_audit_textrel)
62054 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
62055 +#endif
62056 + return;
62057 +}
62058 +
62059 +void
62060 +gr_log_rwxmmap(struct file *file)
62061 +{
62062 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62063 + if (grsec_enable_log_rwxmaps)
62064 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
62065 +#endif
62066 + return;
62067 +}
62068 +
62069 +void
62070 +gr_log_rwxmprotect(struct file *file)
62071 +{
62072 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62073 + if (grsec_enable_log_rwxmaps)
62074 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
62075 +#endif
62076 + return;
62077 +}
62078 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
62079 new file mode 100644
62080 index 0000000..f7f29aa
62081 --- /dev/null
62082 +++ b/grsecurity/grsec_ptrace.c
62083 @@ -0,0 +1,30 @@
62084 +#include <linux/kernel.h>
62085 +#include <linux/sched.h>
62086 +#include <linux/grinternal.h>
62087 +#include <linux/security.h>
62088 +
62089 +void
62090 +gr_audit_ptrace(struct task_struct *task)
62091 +{
62092 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
62093 + if (grsec_enable_audit_ptrace)
62094 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
62095 +#endif
62096 + return;
62097 +}
62098 +
62099 +int
62100 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
62101 +{
62102 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
62103 + const struct dentry *dentry = file->f_path.dentry;
62104 + const struct vfsmount *mnt = file->f_path.mnt;
62105 +
62106 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
62107 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
62108 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
62109 + return -EACCES;
62110 + }
62111 +#endif
62112 + return 0;
62113 +}
62114 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
62115 new file mode 100644
62116 index 0000000..5c00416
62117 --- /dev/null
62118 +++ b/grsecurity/grsec_sig.c
62119 @@ -0,0 +1,222 @@
62120 +#include <linux/kernel.h>
62121 +#include <linux/sched.h>
62122 +#include <linux/delay.h>
62123 +#include <linux/grsecurity.h>
62124 +#include <linux/grinternal.h>
62125 +#include <linux/hardirq.h>
62126 +
62127 +char *signames[] = {
62128 + [SIGSEGV] = "Segmentation fault",
62129 + [SIGILL] = "Illegal instruction",
62130 + [SIGABRT] = "Abort",
62131 + [SIGBUS] = "Invalid alignment/Bus error"
62132 +};
62133 +
62134 +void
62135 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
62136 +{
62137 +#ifdef CONFIG_GRKERNSEC_SIGNAL
62138 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
62139 + (sig == SIGABRT) || (sig == SIGBUS))) {
62140 + if (t->pid == current->pid) {
62141 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
62142 + } else {
62143 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
62144 + }
62145 + }
62146 +#endif
62147 + return;
62148 +}
62149 +
62150 +int
62151 +gr_handle_signal(const struct task_struct *p, const int sig)
62152 +{
62153 +#ifdef CONFIG_GRKERNSEC
62154 + /* ignore the 0 signal for protected task checks */
62155 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
62156 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
62157 + return -EPERM;
62158 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
62159 + return -EPERM;
62160 + }
62161 +#endif
62162 + return 0;
62163 +}
62164 +
62165 +#ifdef CONFIG_GRKERNSEC
62166 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
62167 +
62168 +int gr_fake_force_sig(int sig, struct task_struct *t)
62169 +{
62170 + unsigned long int flags;
62171 + int ret, blocked, ignored;
62172 + struct k_sigaction *action;
62173 +
62174 + spin_lock_irqsave(&t->sighand->siglock, flags);
62175 + action = &t->sighand->action[sig-1];
62176 + ignored = action->sa.sa_handler == SIG_IGN;
62177 + blocked = sigismember(&t->blocked, sig);
62178 + if (blocked || ignored) {
62179 + action->sa.sa_handler = SIG_DFL;
62180 + if (blocked) {
62181 + sigdelset(&t->blocked, sig);
62182 + recalc_sigpending_and_wake(t);
62183 + }
62184 + }
62185 + if (action->sa.sa_handler == SIG_DFL)
62186 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
62187 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
62188 +
62189 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
62190 +
62191 + return ret;
62192 +}
62193 +#endif
62194 +
62195 +#ifdef CONFIG_GRKERNSEC_BRUTE
62196 +#define GR_USER_BAN_TIME (15 * 60)
62197 +#define GR_DAEMON_BRUTE_TIME (30 * 60)
62198 +
62199 +static int __get_dumpable(unsigned long mm_flags)
62200 +{
62201 + int ret;
62202 +
62203 + ret = mm_flags & MMF_DUMPABLE_MASK;
62204 + return (ret >= 2) ? 2 : ret;
62205 +}
62206 +#endif
62207 +
62208 +void gr_handle_brute_attach(unsigned long mm_flags)
62209 +{
62210 +#ifdef CONFIG_GRKERNSEC_BRUTE
62211 + struct task_struct *p = current;
62212 + kuid_t uid = GLOBAL_ROOT_UID;
62213 + int daemon = 0;
62214 +
62215 + if (!grsec_enable_brute)
62216 + return;
62217 +
62218 + rcu_read_lock();
62219 + read_lock(&tasklist_lock);
62220 + read_lock(&grsec_exec_file_lock);
62221 + if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
62222 + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
62223 + p->real_parent->brute = 1;
62224 + daemon = 1;
62225 + } else {
62226 + const struct cred *cred = __task_cred(p), *cred2;
62227 + struct task_struct *tsk, *tsk2;
62228 +
62229 + if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
62230 + struct user_struct *user;
62231 +
62232 + uid = cred->uid;
62233 +
62234 + /* this is put upon execution past expiration */
62235 + user = find_user(uid);
62236 + if (user == NULL)
62237 + goto unlock;
62238 + user->banned = 1;
62239 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
62240 + if (user->ban_expires == ~0UL)
62241 + user->ban_expires--;
62242 +
62243 + do_each_thread(tsk2, tsk) {
62244 + cred2 = __task_cred(tsk);
62245 + if (tsk != p && uid_eq(cred2->uid, uid))
62246 + gr_fake_force_sig(SIGKILL, tsk);
62247 + } while_each_thread(tsk2, tsk);
62248 + }
62249 + }
62250 +unlock:
62251 + read_unlock(&grsec_exec_file_lock);
62252 + read_unlock(&tasklist_lock);
62253 + rcu_read_unlock();
62254 +
62255 + if (!uid_eq(uid, GLOBAL_ROOT_UID))
62256 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
62257 + from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
62258 + else if (daemon)
62259 + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
62260 +
62261 +#endif
62262 + return;
62263 +}
62264 +
62265 +void gr_handle_brute_check(void)
62266 +{
62267 +#ifdef CONFIG_GRKERNSEC_BRUTE
62268 + struct task_struct *p = current;
62269 +
62270 + if (unlikely(p->brute)) {
62271 + if (!grsec_enable_brute)
62272 + p->brute = 0;
62273 + else if (time_before(get_seconds(), p->brute_expires))
62274 + msleep(30 * 1000);
62275 + }
62276 +#endif
62277 + return;
62278 +}
62279 +
62280 +void gr_handle_kernel_exploit(void)
62281 +{
62282 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
62283 + const struct cred *cred;
62284 + struct task_struct *tsk, *tsk2;
62285 + struct user_struct *user;
62286 + kuid_t uid;
62287 +
62288 + if (in_irq() || in_serving_softirq() || in_nmi())
62289 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
62290 +
62291 + uid = current_uid();
62292 +
62293 + if (uid_eq(uid, GLOBAL_ROOT_UID))
62294 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
62295 + else {
62296 + /* kill all the processes of this user, hold a reference
62297 + to their creds struct, and prevent them from creating
62298 + another process until system reset
62299 + */
62300 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
62301 + from_kuid_munged(&init_user_ns, uid));
62302 + /* we intentionally leak this ref */
62303 + user = get_uid(current->cred->user);
62304 + if (user) {
62305 + user->banned = 1;
62306 + user->ban_expires = ~0UL;
62307 + }
62308 +
62309 + read_lock(&tasklist_lock);
62310 + do_each_thread(tsk2, tsk) {
62311 + cred = __task_cred(tsk);
62312 + if (uid_eq(cred->uid, uid))
62313 + gr_fake_force_sig(SIGKILL, tsk);
62314 + } while_each_thread(tsk2, tsk);
62315 + read_unlock(&tasklist_lock);
62316 + }
62317 +#endif
62318 +}
62319 +
62320 +int __gr_process_user_ban(struct user_struct *user)
62321 +{
62322 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62323 + if (unlikely(user->banned)) {
62324 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
62325 + user->banned = 0;
62326 + user->ban_expires = 0;
62327 + free_uid(user);
62328 + } else
62329 + return -EPERM;
62330 + }
62331 +#endif
62332 + return 0;
62333 +}
62334 +
62335 +int gr_process_user_ban(void)
62336 +{
62337 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62338 + return __gr_process_user_ban(current->cred->user);
62339 +#endif
62340 + return 0;
62341 +}
62342 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
62343 new file mode 100644
62344 index 0000000..4030d57
62345 --- /dev/null
62346 +++ b/grsecurity/grsec_sock.c
62347 @@ -0,0 +1,244 @@
62348 +#include <linux/kernel.h>
62349 +#include <linux/module.h>
62350 +#include <linux/sched.h>
62351 +#include <linux/file.h>
62352 +#include <linux/net.h>
62353 +#include <linux/in.h>
62354 +#include <linux/ip.h>
62355 +#include <net/sock.h>
62356 +#include <net/inet_sock.h>
62357 +#include <linux/grsecurity.h>
62358 +#include <linux/grinternal.h>
62359 +#include <linux/gracl.h>
62360 +
62361 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
62362 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
62363 +
62364 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
62365 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
62366 +
62367 +#ifdef CONFIG_UNIX_MODULE
62368 +EXPORT_SYMBOL(gr_acl_handle_unix);
62369 +EXPORT_SYMBOL(gr_acl_handle_mknod);
62370 +EXPORT_SYMBOL(gr_handle_chroot_unix);
62371 +EXPORT_SYMBOL(gr_handle_create);
62372 +#endif
62373 +
62374 +#ifdef CONFIG_GRKERNSEC
62375 +#define gr_conn_table_size 32749
62376 +struct conn_table_entry {
62377 + struct conn_table_entry *next;
62378 + struct signal_struct *sig;
62379 +};
62380 +
62381 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
62382 +DEFINE_SPINLOCK(gr_conn_table_lock);
62383 +
62384 +extern const char * gr_socktype_to_name(unsigned char type);
62385 +extern const char * gr_proto_to_name(unsigned char proto);
62386 +extern const char * gr_sockfamily_to_name(unsigned char family);
62387 +
62388 +static __inline__ int
62389 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
62390 +{
62391 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
62392 +}
62393 +
62394 +static __inline__ int
62395 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
62396 + __u16 sport, __u16 dport)
62397 +{
62398 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
62399 + sig->gr_sport == sport && sig->gr_dport == dport))
62400 + return 1;
62401 + else
62402 + return 0;
62403 +}
62404 +
62405 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
62406 +{
62407 + struct conn_table_entry **match;
62408 + unsigned int index;
62409 +
62410 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
62411 + sig->gr_sport, sig->gr_dport,
62412 + gr_conn_table_size);
62413 +
62414 + newent->sig = sig;
62415 +
62416 + match = &gr_conn_table[index];
62417 + newent->next = *match;
62418 + *match = newent;
62419 +
62420 + return;
62421 +}
62422 +
62423 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
62424 +{
62425 + struct conn_table_entry *match, *last = NULL;
62426 + unsigned int index;
62427 +
62428 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
62429 + sig->gr_sport, sig->gr_dport,
62430 + gr_conn_table_size);
62431 +
62432 + match = gr_conn_table[index];
62433 + while (match && !conn_match(match->sig,
62434 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
62435 + sig->gr_dport)) {
62436 + last = match;
62437 + match = match->next;
62438 + }
62439 +
62440 + if (match) {
62441 + if (last)
62442 + last->next = match->next;
62443 + else
62444 + gr_conn_table[index] = NULL;
62445 + kfree(match);
62446 + }
62447 +
62448 + return;
62449 +}
62450 +
62451 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
62452 + __u16 sport, __u16 dport)
62453 +{
62454 + struct conn_table_entry *match;
62455 + unsigned int index;
62456 +
62457 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
62458 +
62459 + match = gr_conn_table[index];
62460 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
62461 + match = match->next;
62462 +
62463 + if (match)
62464 + return match->sig;
62465 + else
62466 + return NULL;
62467 +}
62468 +
62469 +#endif
62470 +
62471 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
62472 +{
62473 +#ifdef CONFIG_GRKERNSEC
62474 + struct signal_struct *sig = task->signal;
62475 + struct conn_table_entry *newent;
62476 +
62477 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
62478 + if (newent == NULL)
62479 + return;
62480 + /* no bh lock needed since we are called with bh disabled */
62481 + spin_lock(&gr_conn_table_lock);
62482 + gr_del_task_from_ip_table_nolock(sig);
62483 + sig->gr_saddr = inet->inet_rcv_saddr;
62484 + sig->gr_daddr = inet->inet_daddr;
62485 + sig->gr_sport = inet->inet_sport;
62486 + sig->gr_dport = inet->inet_dport;
62487 + gr_add_to_task_ip_table_nolock(sig, newent);
62488 + spin_unlock(&gr_conn_table_lock);
62489 +#endif
62490 + return;
62491 +}
62492 +
62493 +void gr_del_task_from_ip_table(struct task_struct *task)
62494 +{
62495 +#ifdef CONFIG_GRKERNSEC
62496 + spin_lock_bh(&gr_conn_table_lock);
62497 + gr_del_task_from_ip_table_nolock(task->signal);
62498 + spin_unlock_bh(&gr_conn_table_lock);
62499 +#endif
62500 + return;
62501 +}
62502 +
62503 +void
62504 +gr_attach_curr_ip(const struct sock *sk)
62505 +{
62506 +#ifdef CONFIG_GRKERNSEC
62507 + struct signal_struct *p, *set;
62508 + const struct inet_sock *inet = inet_sk(sk);
62509 +
62510 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
62511 + return;
62512 +
62513 + set = current->signal;
62514 +
62515 + spin_lock_bh(&gr_conn_table_lock);
62516 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
62517 + inet->inet_dport, inet->inet_sport);
62518 + if (unlikely(p != NULL)) {
62519 + set->curr_ip = p->curr_ip;
62520 + set->used_accept = 1;
62521 + gr_del_task_from_ip_table_nolock(p);
62522 + spin_unlock_bh(&gr_conn_table_lock);
62523 + return;
62524 + }
62525 + spin_unlock_bh(&gr_conn_table_lock);
62526 +
62527 + set->curr_ip = inet->inet_daddr;
62528 + set->used_accept = 1;
62529 +#endif
62530 + return;
62531 +}
62532 +
62533 +int
62534 +gr_handle_sock_all(const int family, const int type, const int protocol)
62535 +{
62536 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
62537 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
62538 + (family != AF_UNIX)) {
62539 + if (family == AF_INET)
62540 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
62541 + else
62542 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
62543 + return -EACCES;
62544 + }
62545 +#endif
62546 + return 0;
62547 +}
62548 +
62549 +int
62550 +gr_handle_sock_server(const struct sockaddr *sck)
62551 +{
62552 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62553 + if (grsec_enable_socket_server &&
62554 + in_group_p(grsec_socket_server_gid) &&
62555 + sck && (sck->sa_family != AF_UNIX) &&
62556 + (sck->sa_family != AF_LOCAL)) {
62557 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
62558 + return -EACCES;
62559 + }
62560 +#endif
62561 + return 0;
62562 +}
62563 +
62564 +int
62565 +gr_handle_sock_server_other(const struct sock *sck)
62566 +{
62567 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62568 + if (grsec_enable_socket_server &&
62569 + in_group_p(grsec_socket_server_gid) &&
62570 + sck && (sck->sk_family != AF_UNIX) &&
62571 + (sck->sk_family != AF_LOCAL)) {
62572 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
62573 + return -EACCES;
62574 + }
62575 +#endif
62576 + return 0;
62577 +}
62578 +
62579 +int
62580 +gr_handle_sock_client(const struct sockaddr *sck)
62581 +{
62582 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
62583 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
62584 + sck && (sck->sa_family != AF_UNIX) &&
62585 + (sck->sa_family != AF_LOCAL)) {
62586 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
62587 + return -EACCES;
62588 + }
62589 +#endif
62590 + return 0;
62591 +}
62592 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
62593 new file mode 100644
62594 index 0000000..f55ef0f
62595 --- /dev/null
62596 +++ b/grsecurity/grsec_sysctl.c
62597 @@ -0,0 +1,469 @@
62598 +#include <linux/kernel.h>
62599 +#include <linux/sched.h>
62600 +#include <linux/sysctl.h>
62601 +#include <linux/grsecurity.h>
62602 +#include <linux/grinternal.h>
62603 +
62604 +int
62605 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
62606 +{
62607 +#ifdef CONFIG_GRKERNSEC_SYSCTL
62608 + if (dirname == NULL || name == NULL)
62609 + return 0;
62610 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
62611 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
62612 + return -EACCES;
62613 + }
62614 +#endif
62615 + return 0;
62616 +}
62617 +
62618 +#ifdef CONFIG_GRKERNSEC_ROFS
62619 +static int __maybe_unused one = 1;
62620 +#endif
62621 +
62622 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62623 +struct ctl_table grsecurity_table[] = {
62624 +#ifdef CONFIG_GRKERNSEC_SYSCTL
62625 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
62626 +#ifdef CONFIG_GRKERNSEC_IO
62627 + {
62628 + .procname = "disable_priv_io",
62629 + .data = &grsec_disable_privio,
62630 + .maxlen = sizeof(int),
62631 + .mode = 0600,
62632 + .proc_handler = &proc_dointvec,
62633 + },
62634 +#endif
62635 +#endif
62636 +#ifdef CONFIG_GRKERNSEC_LINK
62637 + {
62638 + .procname = "linking_restrictions",
62639 + .data = &grsec_enable_link,
62640 + .maxlen = sizeof(int),
62641 + .mode = 0600,
62642 + .proc_handler = &proc_dointvec,
62643 + },
62644 +#endif
62645 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
62646 + {
62647 + .procname = "enforce_symlinksifowner",
62648 + .data = &grsec_enable_symlinkown,
62649 + .maxlen = sizeof(int),
62650 + .mode = 0600,
62651 + .proc_handler = &proc_dointvec,
62652 + },
62653 + {
62654 + .procname = "symlinkown_gid",
62655 + .data = &grsec_symlinkown_gid,
62656 + .maxlen = sizeof(int),
62657 + .mode = 0600,
62658 + .proc_handler = &proc_dointvec,
62659 + },
62660 +#endif
62661 +#ifdef CONFIG_GRKERNSEC_BRUTE
62662 + {
62663 + .procname = "deter_bruteforce",
62664 + .data = &grsec_enable_brute,
62665 + .maxlen = sizeof(int),
62666 + .mode = 0600,
62667 + .proc_handler = &proc_dointvec,
62668 + },
62669 +#endif
62670 +#ifdef CONFIG_GRKERNSEC_FIFO
62671 + {
62672 + .procname = "fifo_restrictions",
62673 + .data = &grsec_enable_fifo,
62674 + .maxlen = sizeof(int),
62675 + .mode = 0600,
62676 + .proc_handler = &proc_dointvec,
62677 + },
62678 +#endif
62679 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
62680 + {
62681 + .procname = "ptrace_readexec",
62682 + .data = &grsec_enable_ptrace_readexec,
62683 + .maxlen = sizeof(int),
62684 + .mode = 0600,
62685 + .proc_handler = &proc_dointvec,
62686 + },
62687 +#endif
62688 +#ifdef CONFIG_GRKERNSEC_SETXID
62689 + {
62690 + .procname = "consistent_setxid",
62691 + .data = &grsec_enable_setxid,
62692 + .maxlen = sizeof(int),
62693 + .mode = 0600,
62694 + .proc_handler = &proc_dointvec,
62695 + },
62696 +#endif
62697 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
62698 + {
62699 + .procname = "ip_blackhole",
62700 + .data = &grsec_enable_blackhole,
62701 + .maxlen = sizeof(int),
62702 + .mode = 0600,
62703 + .proc_handler = &proc_dointvec,
62704 + },
62705 + {
62706 + .procname = "lastack_retries",
62707 + .data = &grsec_lastack_retries,
62708 + .maxlen = sizeof(int),
62709 + .mode = 0600,
62710 + .proc_handler = &proc_dointvec,
62711 + },
62712 +#endif
62713 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62714 + {
62715 + .procname = "exec_logging",
62716 + .data = &grsec_enable_execlog,
62717 + .maxlen = sizeof(int),
62718 + .mode = 0600,
62719 + .proc_handler = &proc_dointvec,
62720 + },
62721 +#endif
62722 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62723 + {
62724 + .procname = "rwxmap_logging",
62725 + .data = &grsec_enable_log_rwxmaps,
62726 + .maxlen = sizeof(int),
62727 + .mode = 0600,
62728 + .proc_handler = &proc_dointvec,
62729 + },
62730 +#endif
62731 +#ifdef CONFIG_GRKERNSEC_SIGNAL
62732 + {
62733 + .procname = "signal_logging",
62734 + .data = &grsec_enable_signal,
62735 + .maxlen = sizeof(int),
62736 + .mode = 0600,
62737 + .proc_handler = &proc_dointvec,
62738 + },
62739 +#endif
62740 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
62741 + {
62742 + .procname = "forkfail_logging",
62743 + .data = &grsec_enable_forkfail,
62744 + .maxlen = sizeof(int),
62745 + .mode = 0600,
62746 + .proc_handler = &proc_dointvec,
62747 + },
62748 +#endif
62749 +#ifdef CONFIG_GRKERNSEC_TIME
62750 + {
62751 + .procname = "timechange_logging",
62752 + .data = &grsec_enable_time,
62753 + .maxlen = sizeof(int),
62754 + .mode = 0600,
62755 + .proc_handler = &proc_dointvec,
62756 + },
62757 +#endif
62758 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62759 + {
62760 + .procname = "chroot_deny_shmat",
62761 + .data = &grsec_enable_chroot_shmat,
62762 + .maxlen = sizeof(int),
62763 + .mode = 0600,
62764 + .proc_handler = &proc_dointvec,
62765 + },
62766 +#endif
62767 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62768 + {
62769 + .procname = "chroot_deny_unix",
62770 + .data = &grsec_enable_chroot_unix,
62771 + .maxlen = sizeof(int),
62772 + .mode = 0600,
62773 + .proc_handler = &proc_dointvec,
62774 + },
62775 +#endif
62776 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62777 + {
62778 + .procname = "chroot_deny_mount",
62779 + .data = &grsec_enable_chroot_mount,
62780 + .maxlen = sizeof(int),
62781 + .mode = 0600,
62782 + .proc_handler = &proc_dointvec,
62783 + },
62784 +#endif
62785 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62786 + {
62787 + .procname = "chroot_deny_fchdir",
62788 + .data = &grsec_enable_chroot_fchdir,
62789 + .maxlen = sizeof(int),
62790 + .mode = 0600,
62791 + .proc_handler = &proc_dointvec,
62792 + },
62793 +#endif
62794 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62795 + {
62796 + .procname = "chroot_deny_chroot",
62797 + .data = &grsec_enable_chroot_double,
62798 + .maxlen = sizeof(int),
62799 + .mode = 0600,
62800 + .proc_handler = &proc_dointvec,
62801 + },
62802 +#endif
62803 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62804 + {
62805 + .procname = "chroot_deny_pivot",
62806 + .data = &grsec_enable_chroot_pivot,
62807 + .maxlen = sizeof(int),
62808 + .mode = 0600,
62809 + .proc_handler = &proc_dointvec,
62810 + },
62811 +#endif
62812 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62813 + {
62814 + .procname = "chroot_enforce_chdir",
62815 + .data = &grsec_enable_chroot_chdir,
62816 + .maxlen = sizeof(int),
62817 + .mode = 0600,
62818 + .proc_handler = &proc_dointvec,
62819 + },
62820 +#endif
62821 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62822 + {
62823 + .procname = "chroot_deny_chmod",
62824 + .data = &grsec_enable_chroot_chmod,
62825 + .maxlen = sizeof(int),
62826 + .mode = 0600,
62827 + .proc_handler = &proc_dointvec,
62828 + },
62829 +#endif
62830 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62831 + {
62832 + .procname = "chroot_deny_mknod",
62833 + .data = &grsec_enable_chroot_mknod,
62834 + .maxlen = sizeof(int),
62835 + .mode = 0600,
62836 + .proc_handler = &proc_dointvec,
62837 + },
62838 +#endif
62839 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62840 + {
62841 + .procname = "chroot_restrict_nice",
62842 + .data = &grsec_enable_chroot_nice,
62843 + .maxlen = sizeof(int),
62844 + .mode = 0600,
62845 + .proc_handler = &proc_dointvec,
62846 + },
62847 +#endif
62848 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62849 + {
62850 + .procname = "chroot_execlog",
62851 + .data = &grsec_enable_chroot_execlog,
62852 + .maxlen = sizeof(int),
62853 + .mode = 0600,
62854 + .proc_handler = &proc_dointvec,
62855 + },
62856 +#endif
62857 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62858 + {
62859 + .procname = "chroot_caps",
62860 + .data = &grsec_enable_chroot_caps,
62861 + .maxlen = sizeof(int),
62862 + .mode = 0600,
62863 + .proc_handler = &proc_dointvec,
62864 + },
62865 +#endif
62866 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62867 + {
62868 + .procname = "chroot_deny_sysctl",
62869 + .data = &grsec_enable_chroot_sysctl,
62870 + .maxlen = sizeof(int),
62871 + .mode = 0600,
62872 + .proc_handler = &proc_dointvec,
62873 + },
62874 +#endif
62875 +#ifdef CONFIG_GRKERNSEC_TPE
62876 + {
62877 + .procname = "tpe",
62878 + .data = &grsec_enable_tpe,
62879 + .maxlen = sizeof(int),
62880 + .mode = 0600,
62881 + .proc_handler = &proc_dointvec,
62882 + },
62883 + {
62884 + .procname = "tpe_gid",
62885 + .data = &grsec_tpe_gid,
62886 + .maxlen = sizeof(int),
62887 + .mode = 0600,
62888 + .proc_handler = &proc_dointvec,
62889 + },
62890 +#endif
62891 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62892 + {
62893 + .procname = "tpe_invert",
62894 + .data = &grsec_enable_tpe_invert,
62895 + .maxlen = sizeof(int),
62896 + .mode = 0600,
62897 + .proc_handler = &proc_dointvec,
62898 + },
62899 +#endif
62900 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
62901 + {
62902 + .procname = "tpe_restrict_all",
62903 + .data = &grsec_enable_tpe_all,
62904 + .maxlen = sizeof(int),
62905 + .mode = 0600,
62906 + .proc_handler = &proc_dointvec,
62907 + },
62908 +#endif
62909 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
62910 + {
62911 + .procname = "socket_all",
62912 + .data = &grsec_enable_socket_all,
62913 + .maxlen = sizeof(int),
62914 + .mode = 0600,
62915 + .proc_handler = &proc_dointvec,
62916 + },
62917 + {
62918 + .procname = "socket_all_gid",
62919 + .data = &grsec_socket_all_gid,
62920 + .maxlen = sizeof(int),
62921 + .mode = 0600,
62922 + .proc_handler = &proc_dointvec,
62923 + },
62924 +#endif
62925 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
62926 + {
62927 + .procname = "socket_client",
62928 + .data = &grsec_enable_socket_client,
62929 + .maxlen = sizeof(int),
62930 + .mode = 0600,
62931 + .proc_handler = &proc_dointvec,
62932 + },
62933 + {
62934 + .procname = "socket_client_gid",
62935 + .data = &grsec_socket_client_gid,
62936 + .maxlen = sizeof(int),
62937 + .mode = 0600,
62938 + .proc_handler = &proc_dointvec,
62939 + },
62940 +#endif
62941 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62942 + {
62943 + .procname = "socket_server",
62944 + .data = &grsec_enable_socket_server,
62945 + .maxlen = sizeof(int),
62946 + .mode = 0600,
62947 + .proc_handler = &proc_dointvec,
62948 + },
62949 + {
62950 + .procname = "socket_server_gid",
62951 + .data = &grsec_socket_server_gid,
62952 + .maxlen = sizeof(int),
62953 + .mode = 0600,
62954 + .proc_handler = &proc_dointvec,
62955 + },
62956 +#endif
62957 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
62958 + {
62959 + .procname = "audit_group",
62960 + .data = &grsec_enable_group,
62961 + .maxlen = sizeof(int),
62962 + .mode = 0600,
62963 + .proc_handler = &proc_dointvec,
62964 + },
62965 + {
62966 + .procname = "audit_gid",
62967 + .data = &grsec_audit_gid,
62968 + .maxlen = sizeof(int),
62969 + .mode = 0600,
62970 + .proc_handler = &proc_dointvec,
62971 + },
62972 +#endif
62973 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62974 + {
62975 + .procname = "audit_chdir",
62976 + .data = &grsec_enable_chdir,
62977 + .maxlen = sizeof(int),
62978 + .mode = 0600,
62979 + .proc_handler = &proc_dointvec,
62980 + },
62981 +#endif
62982 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62983 + {
62984 + .procname = "audit_mount",
62985 + .data = &grsec_enable_mount,
62986 + .maxlen = sizeof(int),
62987 + .mode = 0600,
62988 + .proc_handler = &proc_dointvec,
62989 + },
62990 +#endif
62991 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62992 + {
62993 + .procname = "audit_textrel",
62994 + .data = &grsec_enable_audit_textrel,
62995 + .maxlen = sizeof(int),
62996 + .mode = 0600,
62997 + .proc_handler = &proc_dointvec,
62998 + },
62999 +#endif
63000 +#ifdef CONFIG_GRKERNSEC_DMESG
63001 + {
63002 + .procname = "dmesg",
63003 + .data = &grsec_enable_dmesg,
63004 + .maxlen = sizeof(int),
63005 + .mode = 0600,
63006 + .proc_handler = &proc_dointvec,
63007 + },
63008 +#endif
63009 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63010 + {
63011 + .procname = "chroot_findtask",
63012 + .data = &grsec_enable_chroot_findtask,
63013 + .maxlen = sizeof(int),
63014 + .mode = 0600,
63015 + .proc_handler = &proc_dointvec,
63016 + },
63017 +#endif
63018 +#ifdef CONFIG_GRKERNSEC_RESLOG
63019 + {
63020 + .procname = "resource_logging",
63021 + .data = &grsec_resource_logging,
63022 + .maxlen = sizeof(int),
63023 + .mode = 0600,
63024 + .proc_handler = &proc_dointvec,
63025 + },
63026 +#endif
63027 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63028 + {
63029 + .procname = "audit_ptrace",
63030 + .data = &grsec_enable_audit_ptrace,
63031 + .maxlen = sizeof(int),
63032 + .mode = 0600,
63033 + .proc_handler = &proc_dointvec,
63034 + },
63035 +#endif
63036 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63037 + {
63038 + .procname = "harden_ptrace",
63039 + .data = &grsec_enable_harden_ptrace,
63040 + .maxlen = sizeof(int),
63041 + .mode = 0600,
63042 + .proc_handler = &proc_dointvec,
63043 + },
63044 +#endif
63045 + {
63046 + .procname = "grsec_lock",
63047 + .data = &grsec_lock,
63048 + .maxlen = sizeof(int),
63049 + .mode = 0600,
63050 + .proc_handler = &proc_dointvec,
63051 + },
63052 +#endif
63053 +#ifdef CONFIG_GRKERNSEC_ROFS
63054 + {
63055 + .procname = "romount_protect",
63056 + .data = &grsec_enable_rofs,
63057 + .maxlen = sizeof(int),
63058 + .mode = 0600,
63059 + .proc_handler = &proc_dointvec_minmax,
63060 + .extra1 = &one,
63061 + .extra2 = &one,
63062 + },
63063 +#endif
63064 + { }
63065 +};
63066 +#endif
63067 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
63068 new file mode 100644
63069 index 0000000..0dc13c3
63070 --- /dev/null
63071 +++ b/grsecurity/grsec_time.c
63072 @@ -0,0 +1,16 @@
63073 +#include <linux/kernel.h>
63074 +#include <linux/sched.h>
63075 +#include <linux/grinternal.h>
63076 +#include <linux/module.h>
63077 +
63078 +void
63079 +gr_log_timechange(void)
63080 +{
63081 +#ifdef CONFIG_GRKERNSEC_TIME
63082 + if (grsec_enable_time)
63083 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
63084 +#endif
63085 + return;
63086 +}
63087 +
63088 +EXPORT_SYMBOL(gr_log_timechange);
63089 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
63090 new file mode 100644
63091 index 0000000..ac20d7f
63092 --- /dev/null
63093 +++ b/grsecurity/grsec_tpe.c
63094 @@ -0,0 +1,73 @@
63095 +#include <linux/kernel.h>
63096 +#include <linux/sched.h>
63097 +#include <linux/file.h>
63098 +#include <linux/fs.h>
63099 +#include <linux/grinternal.h>
63100 +
63101 +extern int gr_acl_tpe_check(void);
63102 +
63103 +int
63104 +gr_tpe_allow(const struct file *file)
63105 +{
63106 +#ifdef CONFIG_GRKERNSEC
63107 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
63108 + const struct cred *cred = current_cred();
63109 + char *msg = NULL;
63110 + char *msg2 = NULL;
63111 +
63112 + // never restrict root
63113 + if (uid_eq(cred->uid, GLOBAL_ROOT_UID))
63114 + return 1;
63115 +
63116 + if (grsec_enable_tpe) {
63117 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63118 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
63119 + msg = "not being in trusted group";
63120 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
63121 + msg = "being in untrusted group";
63122 +#else
63123 + if (in_group_p(grsec_tpe_gid))
63124 + msg = "being in untrusted group";
63125 +#endif
63126 + }
63127 + if (!msg && gr_acl_tpe_check())
63128 + msg = "being in untrusted role";
63129 +
63130 + // not in any affected group/role
63131 + if (!msg)
63132 + goto next_check;
63133 +
63134 + if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
63135 + msg2 = "file in non-root-owned directory";
63136 + else if (inode->i_mode & S_IWOTH)
63137 + msg2 = "file in world-writable directory";
63138 + else if (inode->i_mode & S_IWGRP)
63139 + msg2 = "file in group-writable directory";
63140 +
63141 + if (msg && msg2) {
63142 + char fullmsg[70] = {0};
63143 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
63144 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
63145 + return 0;
63146 + }
63147 + msg = NULL;
63148 +next_check:
63149 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63150 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
63151 + return 1;
63152 +
63153 + if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID) && !uid_eq(inode->i_uid, cred->uid))
63154 + msg = "directory not owned by user";
63155 + else if (inode->i_mode & S_IWOTH)
63156 + msg = "file in world-writable directory";
63157 + else if (inode->i_mode & S_IWGRP)
63158 + msg = "file in group-writable directory";
63159 +
63160 + if (msg) {
63161 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
63162 + return 0;
63163 + }
63164 +#endif
63165 +#endif
63166 + return 1;
63167 +}
63168 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
63169 new file mode 100644
63170 index 0000000..9f7b1ac
63171 --- /dev/null
63172 +++ b/grsecurity/grsum.c
63173 @@ -0,0 +1,61 @@
63174 +#include <linux/err.h>
63175 +#include <linux/kernel.h>
63176 +#include <linux/sched.h>
63177 +#include <linux/mm.h>
63178 +#include <linux/scatterlist.h>
63179 +#include <linux/crypto.h>
63180 +#include <linux/gracl.h>
63181 +
63182 +
63183 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
63184 +#error "crypto and sha256 must be built into the kernel"
63185 +#endif
63186 +
63187 +int
63188 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
63189 +{
63190 + char *p;
63191 + struct crypto_hash *tfm;
63192 + struct hash_desc desc;
63193 + struct scatterlist sg;
63194 + unsigned char temp_sum[GR_SHA_LEN];
63195 + volatile int retval = 0;
63196 + volatile int dummy = 0;
63197 + unsigned int i;
63198 +
63199 + sg_init_table(&sg, 1);
63200 +
63201 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
63202 + if (IS_ERR(tfm)) {
63203 + /* should never happen, since sha256 should be built in */
63204 + return 1;
63205 + }
63206 +
63207 + desc.tfm = tfm;
63208 + desc.flags = 0;
63209 +
63210 + crypto_hash_init(&desc);
63211 +
63212 + p = salt;
63213 + sg_set_buf(&sg, p, GR_SALT_LEN);
63214 + crypto_hash_update(&desc, &sg, sg.length);
63215 +
63216 + p = entry->pw;
63217 + sg_set_buf(&sg, p, strlen(p));
63218 +
63219 + crypto_hash_update(&desc, &sg, sg.length);
63220 +
63221 + crypto_hash_final(&desc, temp_sum);
63222 +
63223 + memset(entry->pw, 0, GR_PW_LEN);
63224 +
63225 + for (i = 0; i < GR_SHA_LEN; i++)
63226 + if (sum[i] != temp_sum[i])
63227 + retval = 1;
63228 + else
63229 + dummy = 1; // waste a cycle
63230 +
63231 + crypto_free_hash(tfm);
63232 +
63233 + return retval;
63234 +}
63235 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
63236 index 77ff547..181834f 100644
63237 --- a/include/asm-generic/4level-fixup.h
63238 +++ b/include/asm-generic/4level-fixup.h
63239 @@ -13,8 +13,10 @@
63240 #define pmd_alloc(mm, pud, address) \
63241 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
63242 NULL: pmd_offset(pud, address))
63243 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
63244
63245 #define pud_alloc(mm, pgd, address) (pgd)
63246 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
63247 #define pud_offset(pgd, start) (pgd)
63248 #define pud_none(pud) 0
63249 #define pud_bad(pud) 0
63250 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
63251 index b7babf0..04ad282 100644
63252 --- a/include/asm-generic/atomic-long.h
63253 +++ b/include/asm-generic/atomic-long.h
63254 @@ -22,6 +22,12 @@
63255
63256 typedef atomic64_t atomic_long_t;
63257
63258 +#ifdef CONFIG_PAX_REFCOUNT
63259 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
63260 +#else
63261 +typedef atomic64_t atomic_long_unchecked_t;
63262 +#endif
63263 +
63264 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
63265
63266 static inline long atomic_long_read(atomic_long_t *l)
63267 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
63268 return (long)atomic64_read(v);
63269 }
63270
63271 +#ifdef CONFIG_PAX_REFCOUNT
63272 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
63273 +{
63274 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63275 +
63276 + return (long)atomic64_read_unchecked(v);
63277 +}
63278 +#endif
63279 +
63280 static inline void atomic_long_set(atomic_long_t *l, long i)
63281 {
63282 atomic64_t *v = (atomic64_t *)l;
63283 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
63284 atomic64_set(v, i);
63285 }
63286
63287 +#ifdef CONFIG_PAX_REFCOUNT
63288 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
63289 +{
63290 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63291 +
63292 + atomic64_set_unchecked(v, i);
63293 +}
63294 +#endif
63295 +
63296 static inline void atomic_long_inc(atomic_long_t *l)
63297 {
63298 atomic64_t *v = (atomic64_t *)l;
63299 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
63300 atomic64_inc(v);
63301 }
63302
63303 +#ifdef CONFIG_PAX_REFCOUNT
63304 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
63305 +{
63306 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63307 +
63308 + atomic64_inc_unchecked(v);
63309 +}
63310 +#endif
63311 +
63312 static inline void atomic_long_dec(atomic_long_t *l)
63313 {
63314 atomic64_t *v = (atomic64_t *)l;
63315 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
63316 atomic64_dec(v);
63317 }
63318
63319 +#ifdef CONFIG_PAX_REFCOUNT
63320 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
63321 +{
63322 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63323 +
63324 + atomic64_dec_unchecked(v);
63325 +}
63326 +#endif
63327 +
63328 static inline void atomic_long_add(long i, atomic_long_t *l)
63329 {
63330 atomic64_t *v = (atomic64_t *)l;
63331 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
63332 atomic64_add(i, v);
63333 }
63334
63335 +#ifdef CONFIG_PAX_REFCOUNT
63336 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
63337 +{
63338 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63339 +
63340 + atomic64_add_unchecked(i, v);
63341 +}
63342 +#endif
63343 +
63344 static inline void atomic_long_sub(long i, atomic_long_t *l)
63345 {
63346 atomic64_t *v = (atomic64_t *)l;
63347 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
63348 atomic64_sub(i, v);
63349 }
63350
63351 +#ifdef CONFIG_PAX_REFCOUNT
63352 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
63353 +{
63354 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63355 +
63356 + atomic64_sub_unchecked(i, v);
63357 +}
63358 +#endif
63359 +
63360 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
63361 {
63362 atomic64_t *v = (atomic64_t *)l;
63363 @@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
63364 return (long)atomic64_add_return(i, v);
63365 }
63366
63367 +#ifdef CONFIG_PAX_REFCOUNT
63368 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
63369 +{
63370 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63371 +
63372 + return (long)atomic64_add_return_unchecked(i, v);
63373 +}
63374 +#endif
63375 +
63376 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
63377 {
63378 atomic64_t *v = (atomic64_t *)l;
63379 @@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
63380 return (long)atomic64_inc_return(v);
63381 }
63382
63383 +#ifdef CONFIG_PAX_REFCOUNT
63384 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
63385 +{
63386 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
63387 +
63388 + return (long)atomic64_inc_return_unchecked(v);
63389 +}
63390 +#endif
63391 +
63392 static inline long atomic_long_dec_return(atomic_long_t *l)
63393 {
63394 atomic64_t *v = (atomic64_t *)l;
63395 @@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
63396
63397 typedef atomic_t atomic_long_t;
63398
63399 +#ifdef CONFIG_PAX_REFCOUNT
63400 +typedef atomic_unchecked_t atomic_long_unchecked_t;
63401 +#else
63402 +typedef atomic_t atomic_long_unchecked_t;
63403 +#endif
63404 +
63405 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
63406 static inline long atomic_long_read(atomic_long_t *l)
63407 {
63408 @@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
63409 return (long)atomic_read(v);
63410 }
63411
63412 +#ifdef CONFIG_PAX_REFCOUNT
63413 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
63414 +{
63415 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63416 +
63417 + return (long)atomic_read_unchecked(v);
63418 +}
63419 +#endif
63420 +
63421 static inline void atomic_long_set(atomic_long_t *l, long i)
63422 {
63423 atomic_t *v = (atomic_t *)l;
63424 @@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
63425 atomic_set(v, i);
63426 }
63427
63428 +#ifdef CONFIG_PAX_REFCOUNT
63429 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
63430 +{
63431 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63432 +
63433 + atomic_set_unchecked(v, i);
63434 +}
63435 +#endif
63436 +
63437 static inline void atomic_long_inc(atomic_long_t *l)
63438 {
63439 atomic_t *v = (atomic_t *)l;
63440 @@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
63441 atomic_inc(v);
63442 }
63443
63444 +#ifdef CONFIG_PAX_REFCOUNT
63445 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
63446 +{
63447 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63448 +
63449 + atomic_inc_unchecked(v);
63450 +}
63451 +#endif
63452 +
63453 static inline void atomic_long_dec(atomic_long_t *l)
63454 {
63455 atomic_t *v = (atomic_t *)l;
63456 @@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
63457 atomic_dec(v);
63458 }
63459
63460 +#ifdef CONFIG_PAX_REFCOUNT
63461 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
63462 +{
63463 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63464 +
63465 + atomic_dec_unchecked(v);
63466 +}
63467 +#endif
63468 +
63469 static inline void atomic_long_add(long i, atomic_long_t *l)
63470 {
63471 atomic_t *v = (atomic_t *)l;
63472 @@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
63473 atomic_add(i, v);
63474 }
63475
63476 +#ifdef CONFIG_PAX_REFCOUNT
63477 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
63478 +{
63479 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63480 +
63481 + atomic_add_unchecked(i, v);
63482 +}
63483 +#endif
63484 +
63485 static inline void atomic_long_sub(long i, atomic_long_t *l)
63486 {
63487 atomic_t *v = (atomic_t *)l;
63488 @@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
63489 atomic_sub(i, v);
63490 }
63491
63492 +#ifdef CONFIG_PAX_REFCOUNT
63493 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
63494 +{
63495 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63496 +
63497 + atomic_sub_unchecked(i, v);
63498 +}
63499 +#endif
63500 +
63501 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
63502 {
63503 atomic_t *v = (atomic_t *)l;
63504 @@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
63505 return (long)atomic_add_return(i, v);
63506 }
63507
63508 +#ifdef CONFIG_PAX_REFCOUNT
63509 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
63510 +{
63511 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63512 +
63513 + return (long)atomic_add_return_unchecked(i, v);
63514 +}
63515 +
63516 +#endif
63517 +
63518 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
63519 {
63520 atomic_t *v = (atomic_t *)l;
63521 @@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
63522 return (long)atomic_inc_return(v);
63523 }
63524
63525 +#ifdef CONFIG_PAX_REFCOUNT
63526 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
63527 +{
63528 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
63529 +
63530 + return (long)atomic_inc_return_unchecked(v);
63531 +}
63532 +#endif
63533 +
63534 static inline long atomic_long_dec_return(atomic_long_t *l)
63535 {
63536 atomic_t *v = (atomic_t *)l;
63537 @@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
63538
63539 #endif /* BITS_PER_LONG == 64 */
63540
63541 +#ifdef CONFIG_PAX_REFCOUNT
63542 +static inline void pax_refcount_needs_these_functions(void)
63543 +{
63544 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
63545 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
63546 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
63547 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
63548 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
63549 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
63550 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
63551 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
63552 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
63553 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
63554 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
63555 +#ifdef CONFIG_X86
63556 + atomic_clear_mask_unchecked(0, NULL);
63557 + atomic_set_mask_unchecked(0, NULL);
63558 +#endif
63559 +
63560 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
63561 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
63562 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
63563 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
63564 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
63565 + atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
63566 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
63567 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
63568 +}
63569 +#else
63570 +#define atomic_read_unchecked(v) atomic_read(v)
63571 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
63572 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
63573 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
63574 +#define atomic_inc_unchecked(v) atomic_inc(v)
63575 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
63576 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
63577 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
63578 +#define atomic_dec_unchecked(v) atomic_dec(v)
63579 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
63580 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
63581 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
63582 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
63583 +
63584 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
63585 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
63586 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
63587 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
63588 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
63589 +#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
63590 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
63591 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
63592 +#endif
63593 +
63594 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
63595 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
63596 index 1ced641..c896ee8 100644
63597 --- a/include/asm-generic/atomic.h
63598 +++ b/include/asm-generic/atomic.h
63599 @@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
63600 * Atomically clears the bits set in @mask from @v
63601 */
63602 #ifndef atomic_clear_mask
63603 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
63604 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
63605 {
63606 unsigned long flags;
63607
63608 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
63609 index b18ce4f..2ee2843 100644
63610 --- a/include/asm-generic/atomic64.h
63611 +++ b/include/asm-generic/atomic64.h
63612 @@ -16,6 +16,8 @@ typedef struct {
63613 long long counter;
63614 } atomic64_t;
63615
63616 +typedef atomic64_t atomic64_unchecked_t;
63617 +
63618 #define ATOMIC64_INIT(i) { (i) }
63619
63620 extern long long atomic64_read(const atomic64_t *v);
63621 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
63622 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
63623 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
63624
63625 +#define atomic64_read_unchecked(v) atomic64_read(v)
63626 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
63627 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
63628 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
63629 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
63630 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
63631 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
63632 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
63633 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
63634 +
63635 #endif /* _ASM_GENERIC_ATOMIC64_H */
63636 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
63637 index 1bfcfe5..e04c5c9 100644
63638 --- a/include/asm-generic/cache.h
63639 +++ b/include/asm-generic/cache.h
63640 @@ -6,7 +6,7 @@
63641 * cache lines need to provide their own cache.h.
63642 */
63643
63644 -#define L1_CACHE_SHIFT 5
63645 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
63646 +#define L1_CACHE_SHIFT 5UL
63647 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
63648
63649 #endif /* __ASM_GENERIC_CACHE_H */
63650 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
63651 index 0d68a1e..b74a761 100644
63652 --- a/include/asm-generic/emergency-restart.h
63653 +++ b/include/asm-generic/emergency-restart.h
63654 @@ -1,7 +1,7 @@
63655 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
63656 #define _ASM_GENERIC_EMERGENCY_RESTART_H
63657
63658 -static inline void machine_emergency_restart(void)
63659 +static inline __noreturn void machine_emergency_restart(void)
63660 {
63661 machine_restart(NULL);
63662 }
63663 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
63664 index 90f99c7..00ce236 100644
63665 --- a/include/asm-generic/kmap_types.h
63666 +++ b/include/asm-generic/kmap_types.h
63667 @@ -2,9 +2,9 @@
63668 #define _ASM_GENERIC_KMAP_TYPES_H
63669
63670 #ifdef __WITH_KM_FENCE
63671 -# define KM_TYPE_NR 41
63672 +# define KM_TYPE_NR 42
63673 #else
63674 -# define KM_TYPE_NR 20
63675 +# define KM_TYPE_NR 21
63676 #endif
63677
63678 #endif
63679 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
63680 index 9ceb03b..62b0b8f 100644
63681 --- a/include/asm-generic/local.h
63682 +++ b/include/asm-generic/local.h
63683 @@ -23,24 +23,37 @@ typedef struct
63684 atomic_long_t a;
63685 } local_t;
63686
63687 +typedef struct {
63688 + atomic_long_unchecked_t a;
63689 +} local_unchecked_t;
63690 +
63691 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
63692
63693 #define local_read(l) atomic_long_read(&(l)->a)
63694 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
63695 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
63696 +#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
63697 #define local_inc(l) atomic_long_inc(&(l)->a)
63698 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
63699 #define local_dec(l) atomic_long_dec(&(l)->a)
63700 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
63701 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
63702 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
63703 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
63704 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
63705
63706 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
63707 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
63708 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
63709 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
63710 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
63711 +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
63712 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
63713 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
63714 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
63715
63716 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
63717 +#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
63718 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
63719 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
63720 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
63721 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
63722 index 725612b..9cc513a 100644
63723 --- a/include/asm-generic/pgtable-nopmd.h
63724 +++ b/include/asm-generic/pgtable-nopmd.h
63725 @@ -1,14 +1,19 @@
63726 #ifndef _PGTABLE_NOPMD_H
63727 #define _PGTABLE_NOPMD_H
63728
63729 -#ifndef __ASSEMBLY__
63730 -
63731 #include <asm-generic/pgtable-nopud.h>
63732
63733 -struct mm_struct;
63734 -
63735 #define __PAGETABLE_PMD_FOLDED
63736
63737 +#define PMD_SHIFT PUD_SHIFT
63738 +#define PTRS_PER_PMD 1
63739 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
63740 +#define PMD_MASK (~(PMD_SIZE-1))
63741 +
63742 +#ifndef __ASSEMBLY__
63743 +
63744 +struct mm_struct;
63745 +
63746 /*
63747 * Having the pmd type consist of a pud gets the size right, and allows
63748 * us to conceptually access the pud entry that this pmd is folded into
63749 @@ -16,11 +21,6 @@ struct mm_struct;
63750 */
63751 typedef struct { pud_t pud; } pmd_t;
63752
63753 -#define PMD_SHIFT PUD_SHIFT
63754 -#define PTRS_PER_PMD 1
63755 -#define PMD_SIZE (1UL << PMD_SHIFT)
63756 -#define PMD_MASK (~(PMD_SIZE-1))
63757 -
63758 /*
63759 * The "pud_xxx()" functions here are trivial for a folded two-level
63760 * setup: the pmd is never bad, and a pmd always exists (as it's folded
63761 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
63762 index 810431d..0ec4804f 100644
63763 --- a/include/asm-generic/pgtable-nopud.h
63764 +++ b/include/asm-generic/pgtable-nopud.h
63765 @@ -1,10 +1,15 @@
63766 #ifndef _PGTABLE_NOPUD_H
63767 #define _PGTABLE_NOPUD_H
63768
63769 -#ifndef __ASSEMBLY__
63770 -
63771 #define __PAGETABLE_PUD_FOLDED
63772
63773 +#define PUD_SHIFT PGDIR_SHIFT
63774 +#define PTRS_PER_PUD 1
63775 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
63776 +#define PUD_MASK (~(PUD_SIZE-1))
63777 +
63778 +#ifndef __ASSEMBLY__
63779 +
63780 /*
63781 * Having the pud type consist of a pgd gets the size right, and allows
63782 * us to conceptually access the pgd entry that this pud is folded into
63783 @@ -12,11 +17,6 @@
63784 */
63785 typedef struct { pgd_t pgd; } pud_t;
63786
63787 -#define PUD_SHIFT PGDIR_SHIFT
63788 -#define PTRS_PER_PUD 1
63789 -#define PUD_SIZE (1UL << PUD_SHIFT)
63790 -#define PUD_MASK (~(PUD_SIZE-1))
63791 -
63792 /*
63793 * The "pgd_xxx()" functions here are trivial for a folded two-level
63794 * setup: the pud is never bad, and a pud always exists (as it's folded
63795 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
63796 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
63797
63798 #define pgd_populate(mm, pgd, pud) do { } while (0)
63799 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
63800 /*
63801 * (puds are folded into pgds so this doesn't get actually called,
63802 * but the define is needed for a generic inline function.)
63803 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
63804 index 5cf680a..4b74d62 100644
63805 --- a/include/asm-generic/pgtable.h
63806 +++ b/include/asm-generic/pgtable.h
63807 @@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
63808 }
63809 #endif /* CONFIG_NUMA_BALANCING */
63810
63811 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
63812 +static inline unsigned long pax_open_kernel(void) { return 0; }
63813 +#endif
63814 +
63815 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
63816 +static inline unsigned long pax_close_kernel(void) { return 0; }
63817 +#endif
63818 +
63819 #endif /* CONFIG_MMU */
63820
63821 #endif /* !__ASSEMBLY__ */
63822 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
63823 index d1ea7ce..b1ebf2a 100644
63824 --- a/include/asm-generic/vmlinux.lds.h
63825 +++ b/include/asm-generic/vmlinux.lds.h
63826 @@ -218,6 +218,7 @@
63827 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
63828 VMLINUX_SYMBOL(__start_rodata) = .; \
63829 *(.rodata) *(.rodata.*) \
63830 + *(.data..read_only) \
63831 *(__vermagic) /* Kernel version magic */ \
63832 . = ALIGN(8); \
63833 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
63834 @@ -725,17 +726,18 @@
63835 * section in the linker script will go there too. @phdr should have
63836 * a leading colon.
63837 *
63838 - * Note that this macros defines __per_cpu_load as an absolute symbol.
63839 + * Note that this macros defines per_cpu_load as an absolute symbol.
63840 * If there is no need to put the percpu section at a predetermined
63841 * address, use PERCPU_SECTION.
63842 */
63843 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
63844 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
63845 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
63846 + per_cpu_load = .; \
63847 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
63848 - LOAD_OFFSET) { \
63849 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
63850 PERCPU_INPUT(cacheline) \
63851 } phdr \
63852 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
63853 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
63854
63855 /**
63856 * PERCPU_SECTION - define output section for percpu area, simple version
63857 diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
63858 index 418d270..bfd2794 100644
63859 --- a/include/crypto/algapi.h
63860 +++ b/include/crypto/algapi.h
63861 @@ -34,7 +34,7 @@ struct crypto_type {
63862 unsigned int maskclear;
63863 unsigned int maskset;
63864 unsigned int tfmsize;
63865 -};
63866 +} __do_const;
63867
63868 struct crypto_instance {
63869 struct crypto_alg alg;
63870 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
63871 index fad21c9..3fff955 100644
63872 --- a/include/drm/drmP.h
63873 +++ b/include/drm/drmP.h
63874 @@ -72,6 +72,7 @@
63875 #include <linux/workqueue.h>
63876 #include <linux/poll.h>
63877 #include <asm/pgalloc.h>
63878 +#include <asm/local.h>
63879 #include <drm/drm.h>
63880 #include <drm/drm_sarea.h>
63881
63882 @@ -1068,7 +1069,7 @@ struct drm_device {
63883
63884 /** \name Usage Counters */
63885 /*@{ */
63886 - int open_count; /**< Outstanding files open */
63887 + local_t open_count; /**< Outstanding files open */
63888 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
63889 atomic_t vma_count; /**< Outstanding vma areas open */
63890 int buf_use; /**< Buffers in use -- cannot alloc */
63891 @@ -1079,7 +1080,7 @@ struct drm_device {
63892 /*@{ */
63893 unsigned long counters;
63894 enum drm_stat_type types[15];
63895 - atomic_t counts[15];
63896 + atomic_unchecked_t counts[15];
63897 /*@} */
63898
63899 struct list_head filelist;
63900 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
63901 index f43d556..94d9343 100644
63902 --- a/include/drm/drm_crtc_helper.h
63903 +++ b/include/drm/drm_crtc_helper.h
63904 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
63905 struct drm_connector *connector);
63906 /* disable encoder when not in use - more explicit than dpms off */
63907 void (*disable)(struct drm_encoder *encoder);
63908 -};
63909 +} __no_const;
63910
63911 /**
63912 * drm_connector_helper_funcs - helper operations for connectors
63913 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
63914 index 72dcbe8..8db58d7 100644
63915 --- a/include/drm/ttm/ttm_memory.h
63916 +++ b/include/drm/ttm/ttm_memory.h
63917 @@ -48,7 +48,7 @@
63918
63919 struct ttm_mem_shrink {
63920 int (*do_shrink) (struct ttm_mem_shrink *);
63921 -};
63922 +} __no_const;
63923
63924 /**
63925 * struct ttm_mem_global - Global memory accounting structure.
63926 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
63927 index c1da539..4db35ec 100644
63928 --- a/include/linux/atmdev.h
63929 +++ b/include/linux/atmdev.h
63930 @@ -28,7 +28,7 @@ struct compat_atm_iobuf {
63931 #endif
63932
63933 struct k_atm_aal_stats {
63934 -#define __HANDLE_ITEM(i) atomic_t i
63935 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63936 __AAL_STAT_ITEMS
63937 #undef __HANDLE_ITEM
63938 };
63939 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
63940 index 0530b98..b127a9e 100644
63941 --- a/include/linux/binfmts.h
63942 +++ b/include/linux/binfmts.h
63943 @@ -73,6 +73,7 @@ struct linux_binfmt {
63944 int (*load_binary)(struct linux_binprm *);
63945 int (*load_shlib)(struct file *);
63946 int (*core_dump)(struct coredump_params *cprm);
63947 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
63948 unsigned long min_coredump; /* minimal dump size */
63949 };
63950
63951 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
63952 index f94bc83..62b9cfe 100644
63953 --- a/include/linux/blkdev.h
63954 +++ b/include/linux/blkdev.h
63955 @@ -1498,7 +1498,7 @@ struct block_device_operations {
63956 /* this callback is with swap_lock and sometimes page table lock held */
63957 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
63958 struct module *owner;
63959 -};
63960 +} __do_const;
63961
63962 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
63963 unsigned long);
63964 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
63965 index 7c2e030..b72475d 100644
63966 --- a/include/linux/blktrace_api.h
63967 +++ b/include/linux/blktrace_api.h
63968 @@ -23,7 +23,7 @@ struct blk_trace {
63969 struct dentry *dir;
63970 struct dentry *dropped_file;
63971 struct dentry *msg_file;
63972 - atomic_t dropped;
63973 + atomic_unchecked_t dropped;
63974 };
63975
63976 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
63977 diff --git a/include/linux/cache.h b/include/linux/cache.h
63978 index 4c57065..4307975 100644
63979 --- a/include/linux/cache.h
63980 +++ b/include/linux/cache.h
63981 @@ -16,6 +16,10 @@
63982 #define __read_mostly
63983 #endif
63984
63985 +#ifndef __read_only
63986 +#define __read_only __read_mostly
63987 +#endif
63988 +
63989 #ifndef ____cacheline_aligned
63990 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
63991 #endif
63992 diff --git a/include/linux/capability.h b/include/linux/capability.h
63993 index 98503b7..cc36d18 100644
63994 --- a/include/linux/capability.h
63995 +++ b/include/linux/capability.h
63996 @@ -211,8 +211,13 @@ extern bool capable(int cap);
63997 extern bool ns_capable(struct user_namespace *ns, int cap);
63998 extern bool nsown_capable(int cap);
63999 extern bool inode_capable(const struct inode *inode, int cap);
64000 +extern bool capable_nolog(int cap);
64001 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
64002 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
64003
64004 /* audit system wants to get cap info from files as well */
64005 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
64006
64007 +extern int is_privileged_binary(const struct dentry *dentry);
64008 +
64009 #endif /* !_LINUX_CAPABILITY_H */
64010 diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
64011 index 8609d57..86e4d79 100644
64012 --- a/include/linux/cdrom.h
64013 +++ b/include/linux/cdrom.h
64014 @@ -87,7 +87,6 @@ struct cdrom_device_ops {
64015
64016 /* driver specifications */
64017 const int capability; /* capability flags */
64018 - int n_minors; /* number of active minor devices */
64019 /* handle uniform packets for scsi type devices (scsi,atapi) */
64020 int (*generic_packet) (struct cdrom_device_info *,
64021 struct packet_command *);
64022 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
64023 index 42e55de..1cd0e66 100644
64024 --- a/include/linux/cleancache.h
64025 +++ b/include/linux/cleancache.h
64026 @@ -31,7 +31,7 @@ struct cleancache_ops {
64027 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
64028 void (*invalidate_inode)(int, struct cleancache_filekey);
64029 void (*invalidate_fs)(int);
64030 -};
64031 +} __no_const;
64032
64033 extern struct cleancache_ops
64034 cleancache_register_ops(struct cleancache_ops *ops);
64035 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
64036 index 662fd1b..e801992 100644
64037 --- a/include/linux/compiler-gcc4.h
64038 +++ b/include/linux/compiler-gcc4.h
64039 @@ -34,6 +34,21 @@
64040 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
64041
64042 #if __GNUC_MINOR__ >= 5
64043 +
64044 +#ifdef CONSTIFY_PLUGIN
64045 +#define __no_const __attribute__((no_const))
64046 +#define __do_const __attribute__((do_const))
64047 +#endif
64048 +
64049 +#ifdef SIZE_OVERFLOW_PLUGIN
64050 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
64051 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
64052 +#endif
64053 +
64054 +#ifdef LATENT_ENTROPY_PLUGIN
64055 +#define __latent_entropy __attribute__((latent_entropy))
64056 +#endif
64057 +
64058 /*
64059 * Mark a position in code as unreachable. This can be used to
64060 * suppress control flow warnings after asm blocks that transfer
64061 @@ -49,6 +64,11 @@
64062 #define __noclone __attribute__((__noclone__))
64063
64064 #endif
64065 +
64066 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
64067 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
64068 +#define __bos0(ptr) __bos((ptr), 0)
64069 +#define __bos1(ptr) __bos((ptr), 1)
64070 #endif
64071
64072 #if __GNUC_MINOR__ >= 6
64073 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
64074 index dd852b7..72924c0 100644
64075 --- a/include/linux/compiler.h
64076 +++ b/include/linux/compiler.h
64077 @@ -5,11 +5,14 @@
64078
64079 #ifdef __CHECKER__
64080 # define __user __attribute__((noderef, address_space(1)))
64081 +# define __force_user __force __user
64082 # define __kernel __attribute__((address_space(0)))
64083 +# define __force_kernel __force __kernel
64084 # define __safe __attribute__((safe))
64085 # define __force __attribute__((force))
64086 # define __nocast __attribute__((nocast))
64087 # define __iomem __attribute__((noderef, address_space(2)))
64088 +# define __force_iomem __force __iomem
64089 # define __must_hold(x) __attribute__((context(x,1,1)))
64090 # define __acquires(x) __attribute__((context(x,0,1)))
64091 # define __releases(x) __attribute__((context(x,1,0)))
64092 @@ -17,20 +20,48 @@
64093 # define __release(x) __context__(x,-1)
64094 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
64095 # define __percpu __attribute__((noderef, address_space(3)))
64096 +# define __force_percpu __force __percpu
64097 #ifdef CONFIG_SPARSE_RCU_POINTER
64098 # define __rcu __attribute__((noderef, address_space(4)))
64099 +# define __force_rcu __force __rcu
64100 #else
64101 # define __rcu
64102 +# define __force_rcu
64103 #endif
64104 extern void __chk_user_ptr(const volatile void __user *);
64105 extern void __chk_io_ptr(const volatile void __iomem *);
64106 +#elif defined(CHECKER_PLUGIN)
64107 +//# define __user
64108 +//# define __force_user
64109 +//# define __kernel
64110 +//# define __force_kernel
64111 +# define __safe
64112 +# define __force
64113 +# define __nocast
64114 +# define __iomem
64115 +# define __force_iomem
64116 +# define __chk_user_ptr(x) (void)0
64117 +# define __chk_io_ptr(x) (void)0
64118 +# define __builtin_warning(x, y...) (1)
64119 +# define __acquires(x)
64120 +# define __releases(x)
64121 +# define __acquire(x) (void)0
64122 +# define __release(x) (void)0
64123 +# define __cond_lock(x,c) (c)
64124 +# define __percpu
64125 +# define __force_percpu
64126 +# define __rcu
64127 +# define __force_rcu
64128 #else
64129 # define __user
64130 +# define __force_user
64131 # define __kernel
64132 +# define __force_kernel
64133 # define __safe
64134 # define __force
64135 # define __nocast
64136 # define __iomem
64137 +# define __force_iomem
64138 # define __chk_user_ptr(x) (void)0
64139 # define __chk_io_ptr(x) (void)0
64140 # define __builtin_warning(x, y...) (1)
64141 @@ -41,7 +72,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
64142 # define __release(x) (void)0
64143 # define __cond_lock(x,c) (c)
64144 # define __percpu
64145 +# define __force_percpu
64146 # define __rcu
64147 +# define __force_rcu
64148 #endif
64149
64150 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
64151 @@ -275,6 +308,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
64152 # define __attribute_const__ /* unimplemented */
64153 #endif
64154
64155 +#ifndef __no_const
64156 +# define __no_const
64157 +#endif
64158 +
64159 +#ifndef __do_const
64160 +# define __do_const
64161 +#endif
64162 +
64163 +#ifndef __size_overflow
64164 +# define __size_overflow(...)
64165 +#endif
64166 +
64167 +#ifndef __intentional_overflow
64168 +# define __intentional_overflow(...)
64169 +#endif
64170 +
64171 +#ifndef __latent_entropy
64172 +# define __latent_entropy
64173 +#endif
64174 +
64175 /*
64176 * Tell gcc if a function is cold. The compiler will assume any path
64177 * directly leading to the call is unlikely.
64178 @@ -284,6 +337,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
64179 #define __cold
64180 #endif
64181
64182 +#ifndef __alloc_size
64183 +#define __alloc_size(...)
64184 +#endif
64185 +
64186 +#ifndef __bos
64187 +#define __bos(ptr, arg)
64188 +#endif
64189 +
64190 +#ifndef __bos0
64191 +#define __bos0(ptr)
64192 +#endif
64193 +
64194 +#ifndef __bos1
64195 +#define __bos1(ptr)
64196 +#endif
64197 +
64198 /* Simple shorthand for a section definition */
64199 #ifndef __section
64200 # define __section(S) __attribute__ ((__section__(#S)))
64201 @@ -323,6 +392,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
64202 * use is to mediate communication between process-level code and irq/NMI
64203 * handlers, all running on the same CPU.
64204 */
64205 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
64206 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
64207 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
64208
64209 #endif /* __LINUX_COMPILER_H */
64210 diff --git a/include/linux/cpu.h b/include/linux/cpu.h
64211 index ce7a074..01ab8ac 100644
64212 --- a/include/linux/cpu.h
64213 +++ b/include/linux/cpu.h
64214 @@ -115,7 +115,7 @@ enum {
64215 /* Need to know about CPUs going up/down? */
64216 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
64217 #define cpu_notifier(fn, pri) { \
64218 - static struct notifier_block fn##_nb __cpuinitdata = \
64219 + static struct notifier_block fn##_nb = \
64220 { .notifier_call = fn, .priority = pri }; \
64221 register_cpu_notifier(&fn##_nb); \
64222 }
64223 diff --git a/include/linux/cred.h b/include/linux/cred.h
64224 index 04421e8..6bce4ef 100644
64225 --- a/include/linux/cred.h
64226 +++ b/include/linux/cred.h
64227 @@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
64228 static inline void validate_process_creds(void)
64229 {
64230 }
64231 +static inline void validate_task_creds(struct task_struct *task)
64232 +{
64233 +}
64234 #endif
64235
64236 /**
64237 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
64238 index b92eadf..b4ecdc1 100644
64239 --- a/include/linux/crypto.h
64240 +++ b/include/linux/crypto.h
64241 @@ -373,7 +373,7 @@ struct cipher_tfm {
64242 const u8 *key, unsigned int keylen);
64243 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
64244 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
64245 -};
64246 +} __no_const;
64247
64248 struct hash_tfm {
64249 int (*init)(struct hash_desc *desc);
64250 @@ -394,13 +394,13 @@ struct compress_tfm {
64251 int (*cot_decompress)(struct crypto_tfm *tfm,
64252 const u8 *src, unsigned int slen,
64253 u8 *dst, unsigned int *dlen);
64254 -};
64255 +} __no_const;
64256
64257 struct rng_tfm {
64258 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
64259 unsigned int dlen);
64260 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
64261 -};
64262 +} __no_const;
64263
64264 #define crt_ablkcipher crt_u.ablkcipher
64265 #define crt_aead crt_u.aead
64266 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
64267 index 7925bf0..d5143d2 100644
64268 --- a/include/linux/decompress/mm.h
64269 +++ b/include/linux/decompress/mm.h
64270 @@ -77,7 +77,7 @@ static void free(void *where)
64271 * warnings when not needed (indeed large_malloc / large_free are not
64272 * needed by inflate */
64273
64274 -#define malloc(a) kmalloc(a, GFP_KERNEL)
64275 +#define malloc(a) kmalloc((a), GFP_KERNEL)
64276 #define free(a) kfree(a)
64277
64278 #define large_malloc(a) vmalloc(a)
64279 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
64280 index 94af418..b1ca7a2 100644
64281 --- a/include/linux/dma-mapping.h
64282 +++ b/include/linux/dma-mapping.h
64283 @@ -54,7 +54,7 @@ struct dma_map_ops {
64284 u64 (*get_required_mask)(struct device *dev);
64285 #endif
64286 int is_phys;
64287 -};
64288 +} __do_const;
64289
64290 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
64291
64292 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
64293 index d3201e4..8281e63 100644
64294 --- a/include/linux/dmaengine.h
64295 +++ b/include/linux/dmaengine.h
64296 @@ -1018,9 +1018,9 @@ struct dma_pinned_list {
64297 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
64298 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
64299
64300 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
64301 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
64302 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
64303 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
64304 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
64305 struct dma_pinned_list *pinned_list, struct page *page,
64306 unsigned int offset, size_t len);
64307
64308 diff --git a/include/linux/efi.h b/include/linux/efi.h
64309 index 7a9498a..155713d 100644
64310 --- a/include/linux/efi.h
64311 +++ b/include/linux/efi.h
64312 @@ -733,6 +733,7 @@ struct efivar_operations {
64313 efi_set_variable_t *set_variable;
64314 efi_query_variable_info_t *query_variable_info;
64315 };
64316 +typedef struct efivar_operations __no_const efivar_operations_no_const;
64317
64318 struct efivars {
64319 /*
64320 diff --git a/include/linux/elf.h b/include/linux/elf.h
64321 index 8c9048e..16a4665 100644
64322 --- a/include/linux/elf.h
64323 +++ b/include/linux/elf.h
64324 @@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
64325 #define elf_note elf32_note
64326 #define elf_addr_t Elf32_Off
64327 #define Elf_Half Elf32_Half
64328 +#define elf_dyn Elf32_Dyn
64329
64330 #else
64331
64332 @@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
64333 #define elf_note elf64_note
64334 #define elf_addr_t Elf64_Off
64335 #define Elf_Half Elf64_Half
64336 +#define elf_dyn Elf64_Dyn
64337
64338 #endif
64339
64340 diff --git a/include/linux/filter.h b/include/linux/filter.h
64341 index c45eabc..baa0be5 100644
64342 --- a/include/linux/filter.h
64343 +++ b/include/linux/filter.h
64344 @@ -20,6 +20,7 @@ struct compat_sock_fprog {
64345
64346 struct sk_buff;
64347 struct sock;
64348 +struct bpf_jit_work;
64349
64350 struct sk_filter
64351 {
64352 @@ -27,6 +28,9 @@ struct sk_filter
64353 unsigned int len; /* Number of filter blocks */
64354 unsigned int (*bpf_func)(const struct sk_buff *skb,
64355 const struct sock_filter *filter);
64356 +#ifdef CONFIG_BPF_JIT
64357 + struct bpf_jit_work *work;
64358 +#endif
64359 struct rcu_head rcu;
64360 struct sock_filter insns[0];
64361 };
64362 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
64363 index 3044254..9767f41 100644
64364 --- a/include/linux/frontswap.h
64365 +++ b/include/linux/frontswap.h
64366 @@ -11,7 +11,7 @@ struct frontswap_ops {
64367 int (*load)(unsigned, pgoff_t, struct page *);
64368 void (*invalidate_page)(unsigned, pgoff_t);
64369 void (*invalidate_area)(unsigned);
64370 -};
64371 +} __no_const;
64372
64373 extern bool frontswap_enabled;
64374 extern struct frontswap_ops
64375 diff --git a/include/linux/fs.h b/include/linux/fs.h
64376 index 7617ee0..b575199 100644
64377 --- a/include/linux/fs.h
64378 +++ b/include/linux/fs.h
64379 @@ -1541,7 +1541,8 @@ struct file_operations {
64380 long (*fallocate)(struct file *file, int mode, loff_t offset,
64381 loff_t len);
64382 int (*show_fdinfo)(struct seq_file *m, struct file *f);
64383 -};
64384 +} __do_const;
64385 +typedef struct file_operations __no_const file_operations_no_const;
64386
64387 struct inode_operations {
64388 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
64389 @@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
64390 inode->i_flags |= S_NOSEC;
64391 }
64392
64393 +static inline bool is_sidechannel_device(const struct inode *inode)
64394 +{
64395 +#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
64396 + umode_t mode = inode->i_mode;
64397 + return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
64398 +#else
64399 + return false;
64400 +#endif
64401 +}
64402 +
64403 #endif /* _LINUX_FS_H */
64404 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
64405 index d0ae3a8..0244b34 100644
64406 --- a/include/linux/fs_struct.h
64407 +++ b/include/linux/fs_struct.h
64408 @@ -6,7 +6,7 @@
64409 #include <linux/seqlock.h>
64410
64411 struct fs_struct {
64412 - int users;
64413 + atomic_t users;
64414 spinlock_t lock;
64415 seqcount_t seq;
64416 int umask;
64417 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
64418 index 5dfa0aa..6acf322 100644
64419 --- a/include/linux/fscache-cache.h
64420 +++ b/include/linux/fscache-cache.h
64421 @@ -112,7 +112,7 @@ struct fscache_operation {
64422 fscache_operation_release_t release;
64423 };
64424
64425 -extern atomic_t fscache_op_debug_id;
64426 +extern atomic_unchecked_t fscache_op_debug_id;
64427 extern void fscache_op_work_func(struct work_struct *work);
64428
64429 extern void fscache_enqueue_operation(struct fscache_operation *);
64430 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
64431 INIT_WORK(&op->work, fscache_op_work_func);
64432 atomic_set(&op->usage, 1);
64433 op->state = FSCACHE_OP_ST_INITIALISED;
64434 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
64435 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64436 op->processor = processor;
64437 op->release = release;
64438 INIT_LIST_HEAD(&op->pend_link);
64439 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
64440 index 0fbfb46..508eb0d 100644
64441 --- a/include/linux/fsnotify.h
64442 +++ b/include/linux/fsnotify.h
64443 @@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
64444 struct inode *inode = path->dentry->d_inode;
64445 __u32 mask = FS_ACCESS;
64446
64447 + if (is_sidechannel_device(inode))
64448 + return;
64449 +
64450 if (S_ISDIR(inode->i_mode))
64451 mask |= FS_ISDIR;
64452
64453 @@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
64454 struct inode *inode = path->dentry->d_inode;
64455 __u32 mask = FS_MODIFY;
64456
64457 + if (is_sidechannel_device(inode))
64458 + return;
64459 +
64460 if (S_ISDIR(inode->i_mode))
64461 mask |= FS_ISDIR;
64462
64463 @@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
64464 */
64465 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
64466 {
64467 - return kstrdup(name, GFP_KERNEL);
64468 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
64469 }
64470
64471 /*
64472 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
64473 index a3d4895..ddd2a50 100644
64474 --- a/include/linux/ftrace_event.h
64475 +++ b/include/linux/ftrace_event.h
64476 @@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
64477 extern int trace_add_event_call(struct ftrace_event_call *call);
64478 extern void trace_remove_event_call(struct ftrace_event_call *call);
64479
64480 -#define is_signed_type(type) (((type)(-1)) < 0)
64481 +#define is_signed_type(type) (((type)(-1)) < (type)1)
64482
64483 int trace_set_clr_event(const char *system, const char *event, int set);
64484
64485 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
64486 index 79b8bba..86b539e 100644
64487 --- a/include/linux/genhd.h
64488 +++ b/include/linux/genhd.h
64489 @@ -194,7 +194,7 @@ struct gendisk {
64490 struct kobject *slave_dir;
64491
64492 struct timer_rand_state *random;
64493 - atomic_t sync_io; /* RAID */
64494 + atomic_unchecked_t sync_io; /* RAID */
64495 struct disk_events *ev;
64496 #ifdef CONFIG_BLK_DEV_INTEGRITY
64497 struct blk_integrity *integrity;
64498 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
64499 index 0f615eb..5c3832f 100644
64500 --- a/include/linux/gfp.h
64501 +++ b/include/linux/gfp.h
64502 @@ -35,6 +35,13 @@ struct vm_area_struct;
64503 #define ___GFP_NO_KSWAPD 0x400000u
64504 #define ___GFP_OTHER_NODE 0x800000u
64505 #define ___GFP_WRITE 0x1000000u
64506 +
64507 +#ifdef CONFIG_PAX_USERCOPY_SLABS
64508 +#define ___GFP_USERCOPY 0x2000000u
64509 +#else
64510 +#define ___GFP_USERCOPY 0
64511 +#endif
64512 +
64513 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
64514
64515 /*
64516 @@ -92,6 +99,7 @@ struct vm_area_struct;
64517 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
64518 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
64519 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
64520 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
64521
64522 /*
64523 * This may seem redundant, but it's a way of annotating false positives vs.
64524 @@ -99,7 +107,7 @@ struct vm_area_struct;
64525 */
64526 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
64527
64528 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
64529 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
64530 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
64531
64532 /* This equals 0, but use constants in case they ever change */
64533 @@ -153,6 +161,8 @@ struct vm_area_struct;
64534 /* 4GB DMA on some platforms */
64535 #define GFP_DMA32 __GFP_DMA32
64536
64537 +#define GFP_USERCOPY __GFP_USERCOPY
64538 +
64539 /* Convert GFP flags to their corresponding migrate type */
64540 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
64541 {
64542 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
64543 new file mode 100644
64544 index 0000000..ebe6d72
64545 --- /dev/null
64546 +++ b/include/linux/gracl.h
64547 @@ -0,0 +1,319 @@
64548 +#ifndef GR_ACL_H
64549 +#define GR_ACL_H
64550 +
64551 +#include <linux/grdefs.h>
64552 +#include <linux/resource.h>
64553 +#include <linux/capability.h>
64554 +#include <linux/dcache.h>
64555 +#include <asm/resource.h>
64556 +
64557 +/* Major status information */
64558 +
64559 +#define GR_VERSION "grsecurity 2.9.1"
64560 +#define GRSECURITY_VERSION 0x2901
64561 +
64562 +enum {
64563 + GR_SHUTDOWN = 0,
64564 + GR_ENABLE = 1,
64565 + GR_SPROLE = 2,
64566 + GR_RELOAD = 3,
64567 + GR_SEGVMOD = 4,
64568 + GR_STATUS = 5,
64569 + GR_UNSPROLE = 6,
64570 + GR_PASSSET = 7,
64571 + GR_SPROLEPAM = 8,
64572 +};
64573 +
64574 +/* Password setup definitions
64575 + * kernel/grhash.c */
64576 +enum {
64577 + GR_PW_LEN = 128,
64578 + GR_SALT_LEN = 16,
64579 + GR_SHA_LEN = 32,
64580 +};
64581 +
64582 +enum {
64583 + GR_SPROLE_LEN = 64,
64584 +};
64585 +
64586 +enum {
64587 + GR_NO_GLOB = 0,
64588 + GR_REG_GLOB,
64589 + GR_CREATE_GLOB
64590 +};
64591 +
64592 +#define GR_NLIMITS 32
64593 +
64594 +/* Begin Data Structures */
64595 +
64596 +struct sprole_pw {
64597 + unsigned char *rolename;
64598 + unsigned char salt[GR_SALT_LEN];
64599 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
64600 +};
64601 +
64602 +struct name_entry {
64603 + __u32 key;
64604 + ino_t inode;
64605 + dev_t device;
64606 + char *name;
64607 + __u16 len;
64608 + __u8 deleted;
64609 + struct name_entry *prev;
64610 + struct name_entry *next;
64611 +};
64612 +
64613 +struct inodev_entry {
64614 + struct name_entry *nentry;
64615 + struct inodev_entry *prev;
64616 + struct inodev_entry *next;
64617 +};
64618 +
64619 +struct acl_role_db {
64620 + struct acl_role_label **r_hash;
64621 + __u32 r_size;
64622 +};
64623 +
64624 +struct inodev_db {
64625 + struct inodev_entry **i_hash;
64626 + __u32 i_size;
64627 +};
64628 +
64629 +struct name_db {
64630 + struct name_entry **n_hash;
64631 + __u32 n_size;
64632 +};
64633 +
64634 +struct crash_uid {
64635 + uid_t uid;
64636 + unsigned long expires;
64637 +};
64638 +
64639 +struct gr_hash_struct {
64640 + void **table;
64641 + void **nametable;
64642 + void *first;
64643 + __u32 table_size;
64644 + __u32 used_size;
64645 + int type;
64646 +};
64647 +
64648 +/* Userspace Grsecurity ACL data structures */
64649 +
64650 +struct acl_subject_label {
64651 + char *filename;
64652 + ino_t inode;
64653 + dev_t device;
64654 + __u32 mode;
64655 + kernel_cap_t cap_mask;
64656 + kernel_cap_t cap_lower;
64657 + kernel_cap_t cap_invert_audit;
64658 +
64659 + struct rlimit res[GR_NLIMITS];
64660 + __u32 resmask;
64661 +
64662 + __u8 user_trans_type;
64663 + __u8 group_trans_type;
64664 + uid_t *user_transitions;
64665 + gid_t *group_transitions;
64666 + __u16 user_trans_num;
64667 + __u16 group_trans_num;
64668 +
64669 + __u32 sock_families[2];
64670 + __u32 ip_proto[8];
64671 + __u32 ip_type;
64672 + struct acl_ip_label **ips;
64673 + __u32 ip_num;
64674 + __u32 inaddr_any_override;
64675 +
64676 + __u32 crashes;
64677 + unsigned long expires;
64678 +
64679 + struct acl_subject_label *parent_subject;
64680 + struct gr_hash_struct *hash;
64681 + struct acl_subject_label *prev;
64682 + struct acl_subject_label *next;
64683 +
64684 + struct acl_object_label **obj_hash;
64685 + __u32 obj_hash_size;
64686 + __u16 pax_flags;
64687 +};
64688 +
64689 +struct role_allowed_ip {
64690 + __u32 addr;
64691 + __u32 netmask;
64692 +
64693 + struct role_allowed_ip *prev;
64694 + struct role_allowed_ip *next;
64695 +};
64696 +
64697 +struct role_transition {
64698 + char *rolename;
64699 +
64700 + struct role_transition *prev;
64701 + struct role_transition *next;
64702 +};
64703 +
64704 +struct acl_role_label {
64705 + char *rolename;
64706 + uid_t uidgid;
64707 + __u16 roletype;
64708 +
64709 + __u16 auth_attempts;
64710 + unsigned long expires;
64711 +
64712 + struct acl_subject_label *root_label;
64713 + struct gr_hash_struct *hash;
64714 +
64715 + struct acl_role_label *prev;
64716 + struct acl_role_label *next;
64717 +
64718 + struct role_transition *transitions;
64719 + struct role_allowed_ip *allowed_ips;
64720 + uid_t *domain_children;
64721 + __u16 domain_child_num;
64722 +
64723 + umode_t umask;
64724 +
64725 + struct acl_subject_label **subj_hash;
64726 + __u32 subj_hash_size;
64727 +};
64728 +
64729 +struct user_acl_role_db {
64730 + struct acl_role_label **r_table;
64731 + __u32 num_pointers; /* Number of allocations to track */
64732 + __u32 num_roles; /* Number of roles */
64733 + __u32 num_domain_children; /* Number of domain children */
64734 + __u32 num_subjects; /* Number of subjects */
64735 + __u32 num_objects; /* Number of objects */
64736 +};
64737 +
64738 +struct acl_object_label {
64739 + char *filename;
64740 + ino_t inode;
64741 + dev_t device;
64742 + __u32 mode;
64743 +
64744 + struct acl_subject_label *nested;
64745 + struct acl_object_label *globbed;
64746 +
64747 + /* next two structures not used */
64748 +
64749 + struct acl_object_label *prev;
64750 + struct acl_object_label *next;
64751 +};
64752 +
64753 +struct acl_ip_label {
64754 + char *iface;
64755 + __u32 addr;
64756 + __u32 netmask;
64757 + __u16 low, high;
64758 + __u8 mode;
64759 + __u32 type;
64760 + __u32 proto[8];
64761 +
64762 + /* next two structures not used */
64763 +
64764 + struct acl_ip_label *prev;
64765 + struct acl_ip_label *next;
64766 +};
64767 +
64768 +struct gr_arg {
64769 + struct user_acl_role_db role_db;
64770 + unsigned char pw[GR_PW_LEN];
64771 + unsigned char salt[GR_SALT_LEN];
64772 + unsigned char sum[GR_SHA_LEN];
64773 + unsigned char sp_role[GR_SPROLE_LEN];
64774 + struct sprole_pw *sprole_pws;
64775 + dev_t segv_device;
64776 + ino_t segv_inode;
64777 + uid_t segv_uid;
64778 + __u16 num_sprole_pws;
64779 + __u16 mode;
64780 +};
64781 +
64782 +struct gr_arg_wrapper {
64783 + struct gr_arg *arg;
64784 + __u32 version;
64785 + __u32 size;
64786 +};
64787 +
64788 +struct subject_map {
64789 + struct acl_subject_label *user;
64790 + struct acl_subject_label *kernel;
64791 + struct subject_map *prev;
64792 + struct subject_map *next;
64793 +};
64794 +
64795 +struct acl_subj_map_db {
64796 + struct subject_map **s_hash;
64797 + __u32 s_size;
64798 +};
64799 +
64800 +/* End Data Structures Section */
64801 +
64802 +/* Hash functions generated by empirical testing by Brad Spengler
64803 + Makes good use of the low bits of the inode. Generally 0-1 times
64804 + in loop for successful match. 0-3 for unsuccessful match.
64805 + Shift/add algorithm with modulus of table size and an XOR*/
64806 +
64807 +static __inline__ unsigned int
64808 +gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
64809 +{
64810 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
64811 +}
64812 +
64813 + static __inline__ unsigned int
64814 +gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
64815 +{
64816 + return ((const unsigned long)userp % sz);
64817 +}
64818 +
64819 +static __inline__ unsigned int
64820 +gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
64821 +{
64822 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
64823 +}
64824 +
64825 +static __inline__ unsigned int
64826 +gr_nhash(const char *name, const __u16 len, const unsigned int sz)
64827 +{
64828 + return full_name_hash((const unsigned char *)name, len) % sz;
64829 +}
64830 +
64831 +#define FOR_EACH_ROLE_START(role) \
64832 + role = role_list; \
64833 + while (role) {
64834 +
64835 +#define FOR_EACH_ROLE_END(role) \
64836 + role = role->prev; \
64837 + }
64838 +
64839 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
64840 + subj = NULL; \
64841 + iter = 0; \
64842 + while (iter < role->subj_hash_size) { \
64843 + if (subj == NULL) \
64844 + subj = role->subj_hash[iter]; \
64845 + if (subj == NULL) { \
64846 + iter++; \
64847 + continue; \
64848 + }
64849 +
64850 +#define FOR_EACH_SUBJECT_END(subj,iter) \
64851 + subj = subj->next; \
64852 + if (subj == NULL) \
64853 + iter++; \
64854 + }
64855 +
64856 +
64857 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
64858 + subj = role->hash->first; \
64859 + while (subj != NULL) {
64860 +
64861 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
64862 + subj = subj->next; \
64863 + }
64864 +
64865 +#endif
64866 +
64867 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
64868 new file mode 100644
64869 index 0000000..323ecf2
64870 --- /dev/null
64871 +++ b/include/linux/gralloc.h
64872 @@ -0,0 +1,9 @@
64873 +#ifndef __GRALLOC_H
64874 +#define __GRALLOC_H
64875 +
64876 +void acl_free_all(void);
64877 +int acl_alloc_stack_init(unsigned long size);
64878 +void *acl_alloc(unsigned long len);
64879 +void *acl_alloc_num(unsigned long num, unsigned long len);
64880 +
64881 +#endif
64882 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
64883 new file mode 100644
64884 index 0000000..be66033
64885 --- /dev/null
64886 +++ b/include/linux/grdefs.h
64887 @@ -0,0 +1,140 @@
64888 +#ifndef GRDEFS_H
64889 +#define GRDEFS_H
64890 +
64891 +/* Begin grsecurity status declarations */
64892 +
64893 +enum {
64894 + GR_READY = 0x01,
64895 + GR_STATUS_INIT = 0x00 // disabled state
64896 +};
64897 +
64898 +/* Begin ACL declarations */
64899 +
64900 +/* Role flags */
64901 +
64902 +enum {
64903 + GR_ROLE_USER = 0x0001,
64904 + GR_ROLE_GROUP = 0x0002,
64905 + GR_ROLE_DEFAULT = 0x0004,
64906 + GR_ROLE_SPECIAL = 0x0008,
64907 + GR_ROLE_AUTH = 0x0010,
64908 + GR_ROLE_NOPW = 0x0020,
64909 + GR_ROLE_GOD = 0x0040,
64910 + GR_ROLE_LEARN = 0x0080,
64911 + GR_ROLE_TPE = 0x0100,
64912 + GR_ROLE_DOMAIN = 0x0200,
64913 + GR_ROLE_PAM = 0x0400,
64914 + GR_ROLE_PERSIST = 0x0800
64915 +};
64916 +
64917 +/* ACL Subject and Object mode flags */
64918 +enum {
64919 + GR_DELETED = 0x80000000
64920 +};
64921 +
64922 +/* ACL Object-only mode flags */
64923 +enum {
64924 + GR_READ = 0x00000001,
64925 + GR_APPEND = 0x00000002,
64926 + GR_WRITE = 0x00000004,
64927 + GR_EXEC = 0x00000008,
64928 + GR_FIND = 0x00000010,
64929 + GR_INHERIT = 0x00000020,
64930 + GR_SETID = 0x00000040,
64931 + GR_CREATE = 0x00000080,
64932 + GR_DELETE = 0x00000100,
64933 + GR_LINK = 0x00000200,
64934 + GR_AUDIT_READ = 0x00000400,
64935 + GR_AUDIT_APPEND = 0x00000800,
64936 + GR_AUDIT_WRITE = 0x00001000,
64937 + GR_AUDIT_EXEC = 0x00002000,
64938 + GR_AUDIT_FIND = 0x00004000,
64939 + GR_AUDIT_INHERIT= 0x00008000,
64940 + GR_AUDIT_SETID = 0x00010000,
64941 + GR_AUDIT_CREATE = 0x00020000,
64942 + GR_AUDIT_DELETE = 0x00040000,
64943 + GR_AUDIT_LINK = 0x00080000,
64944 + GR_PTRACERD = 0x00100000,
64945 + GR_NOPTRACE = 0x00200000,
64946 + GR_SUPPRESS = 0x00400000,
64947 + GR_NOLEARN = 0x00800000,
64948 + GR_INIT_TRANSFER= 0x01000000
64949 +};
64950 +
64951 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
64952 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
64953 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
64954 +
64955 +/* ACL subject-only mode flags */
64956 +enum {
64957 + GR_KILL = 0x00000001,
64958 + GR_VIEW = 0x00000002,
64959 + GR_PROTECTED = 0x00000004,
64960 + GR_LEARN = 0x00000008,
64961 + GR_OVERRIDE = 0x00000010,
64962 + /* just a placeholder, this mode is only used in userspace */
64963 + GR_DUMMY = 0x00000020,
64964 + GR_PROTSHM = 0x00000040,
64965 + GR_KILLPROC = 0x00000080,
64966 + GR_KILLIPPROC = 0x00000100,
64967 + /* just a placeholder, this mode is only used in userspace */
64968 + GR_NOTROJAN = 0x00000200,
64969 + GR_PROTPROCFD = 0x00000400,
64970 + GR_PROCACCT = 0x00000800,
64971 + GR_RELAXPTRACE = 0x00001000,
64972 + //GR_NESTED = 0x00002000,
64973 + GR_INHERITLEARN = 0x00004000,
64974 + GR_PROCFIND = 0x00008000,
64975 + GR_POVERRIDE = 0x00010000,
64976 + GR_KERNELAUTH = 0x00020000,
64977 + GR_ATSECURE = 0x00040000,
64978 + GR_SHMEXEC = 0x00080000
64979 +};
64980 +
64981 +enum {
64982 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
64983 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
64984 + GR_PAX_ENABLE_MPROTECT = 0x0004,
64985 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
64986 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
64987 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
64988 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
64989 + GR_PAX_DISABLE_MPROTECT = 0x0400,
64990 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
64991 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
64992 +};
64993 +
64994 +enum {
64995 + GR_ID_USER = 0x01,
64996 + GR_ID_GROUP = 0x02,
64997 +};
64998 +
64999 +enum {
65000 + GR_ID_ALLOW = 0x01,
65001 + GR_ID_DENY = 0x02,
65002 +};
65003 +
65004 +#define GR_CRASH_RES 31
65005 +#define GR_UIDTABLE_MAX 500
65006 +
65007 +/* begin resource learning section */
65008 +enum {
65009 + GR_RLIM_CPU_BUMP = 60,
65010 + GR_RLIM_FSIZE_BUMP = 50000,
65011 + GR_RLIM_DATA_BUMP = 10000,
65012 + GR_RLIM_STACK_BUMP = 1000,
65013 + GR_RLIM_CORE_BUMP = 10000,
65014 + GR_RLIM_RSS_BUMP = 500000,
65015 + GR_RLIM_NPROC_BUMP = 1,
65016 + GR_RLIM_NOFILE_BUMP = 5,
65017 + GR_RLIM_MEMLOCK_BUMP = 50000,
65018 + GR_RLIM_AS_BUMP = 500000,
65019 + GR_RLIM_LOCKS_BUMP = 2,
65020 + GR_RLIM_SIGPENDING_BUMP = 5,
65021 + GR_RLIM_MSGQUEUE_BUMP = 10000,
65022 + GR_RLIM_NICE_BUMP = 1,
65023 + GR_RLIM_RTPRIO_BUMP = 1,
65024 + GR_RLIM_RTTIME_BUMP = 1000000
65025 +};
65026 +
65027 +#endif
65028 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
65029 new file mode 100644
65030 index 0000000..9bb6662
65031 --- /dev/null
65032 +++ b/include/linux/grinternal.h
65033 @@ -0,0 +1,215 @@
65034 +#ifndef __GRINTERNAL_H
65035 +#define __GRINTERNAL_H
65036 +
65037 +#ifdef CONFIG_GRKERNSEC
65038 +
65039 +#include <linux/fs.h>
65040 +#include <linux/mnt_namespace.h>
65041 +#include <linux/nsproxy.h>
65042 +#include <linux/gracl.h>
65043 +#include <linux/grdefs.h>
65044 +#include <linux/grmsg.h>
65045 +
65046 +void gr_add_learn_entry(const char *fmt, ...)
65047 + __attribute__ ((format (printf, 1, 2)));
65048 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
65049 + const struct vfsmount *mnt);
65050 +__u32 gr_check_create(const struct dentry *new_dentry,
65051 + const struct dentry *parent,
65052 + const struct vfsmount *mnt, const __u32 mode);
65053 +int gr_check_protected_task(const struct task_struct *task);
65054 +__u32 to_gr_audit(const __u32 reqmode);
65055 +int gr_set_acls(const int type);
65056 +int gr_apply_subject_to_task(struct task_struct *task);
65057 +int gr_acl_is_enabled(void);
65058 +char gr_roletype_to_char(void);
65059 +
65060 +void gr_handle_alertkill(struct task_struct *task);
65061 +char *gr_to_filename(const struct dentry *dentry,
65062 + const struct vfsmount *mnt);
65063 +char *gr_to_filename1(const struct dentry *dentry,
65064 + const struct vfsmount *mnt);
65065 +char *gr_to_filename2(const struct dentry *dentry,
65066 + const struct vfsmount *mnt);
65067 +char *gr_to_filename3(const struct dentry *dentry,
65068 + const struct vfsmount *mnt);
65069 +
65070 +extern int grsec_enable_ptrace_readexec;
65071 +extern int grsec_enable_harden_ptrace;
65072 +extern int grsec_enable_link;
65073 +extern int grsec_enable_fifo;
65074 +extern int grsec_enable_execve;
65075 +extern int grsec_enable_shm;
65076 +extern int grsec_enable_execlog;
65077 +extern int grsec_enable_signal;
65078 +extern int grsec_enable_audit_ptrace;
65079 +extern int grsec_enable_forkfail;
65080 +extern int grsec_enable_time;
65081 +extern int grsec_enable_rofs;
65082 +extern int grsec_enable_chroot_shmat;
65083 +extern int grsec_enable_chroot_mount;
65084 +extern int grsec_enable_chroot_double;
65085 +extern int grsec_enable_chroot_pivot;
65086 +extern int grsec_enable_chroot_chdir;
65087 +extern int grsec_enable_chroot_chmod;
65088 +extern int grsec_enable_chroot_mknod;
65089 +extern int grsec_enable_chroot_fchdir;
65090 +extern int grsec_enable_chroot_nice;
65091 +extern int grsec_enable_chroot_execlog;
65092 +extern int grsec_enable_chroot_caps;
65093 +extern int grsec_enable_chroot_sysctl;
65094 +extern int grsec_enable_chroot_unix;
65095 +extern int grsec_enable_symlinkown;
65096 +extern kgid_t grsec_symlinkown_gid;
65097 +extern int grsec_enable_tpe;
65098 +extern kgid_t grsec_tpe_gid;
65099 +extern int grsec_enable_tpe_all;
65100 +extern int grsec_enable_tpe_invert;
65101 +extern int grsec_enable_socket_all;
65102 +extern kgid_t grsec_socket_all_gid;
65103 +extern int grsec_enable_socket_client;
65104 +extern kgid_t grsec_socket_client_gid;
65105 +extern int grsec_enable_socket_server;
65106 +extern kgid_t grsec_socket_server_gid;
65107 +extern kgid_t grsec_audit_gid;
65108 +extern int grsec_enable_group;
65109 +extern int grsec_enable_audit_textrel;
65110 +extern int grsec_enable_log_rwxmaps;
65111 +extern int grsec_enable_mount;
65112 +extern int grsec_enable_chdir;
65113 +extern int grsec_resource_logging;
65114 +extern int grsec_enable_blackhole;
65115 +extern int grsec_lastack_retries;
65116 +extern int grsec_enable_brute;
65117 +extern int grsec_lock;
65118 +
65119 +extern spinlock_t grsec_alert_lock;
65120 +extern unsigned long grsec_alert_wtime;
65121 +extern unsigned long grsec_alert_fyet;
65122 +
65123 +extern spinlock_t grsec_audit_lock;
65124 +
65125 +extern rwlock_t grsec_exec_file_lock;
65126 +
65127 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
65128 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
65129 + (tsk)->exec_file->f_vfsmnt) : "/")
65130 +
65131 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
65132 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
65133 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
65134 +
65135 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
65136 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
65137 + (tsk)->exec_file->f_vfsmnt) : "/")
65138 +
65139 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
65140 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
65141 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
65142 +
65143 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
65144 +
65145 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
65146 +
65147 +#define GR_CHROOT_CAPS {{ \
65148 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
65149 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
65150 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
65151 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
65152 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
65153 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
65154 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
65155 +
65156 +#define security_learn(normal_msg,args...) \
65157 +({ \
65158 + read_lock(&grsec_exec_file_lock); \
65159 + gr_add_learn_entry(normal_msg "\n", ## args); \
65160 + read_unlock(&grsec_exec_file_lock); \
65161 +})
65162 +
65163 +enum {
65164 + GR_DO_AUDIT,
65165 + GR_DONT_AUDIT,
65166 + /* used for non-audit messages that we shouldn't kill the task on */
65167 + GR_DONT_AUDIT_GOOD
65168 +};
65169 +
65170 +enum {
65171 + GR_TTYSNIFF,
65172 + GR_RBAC,
65173 + GR_RBAC_STR,
65174 + GR_STR_RBAC,
65175 + GR_RBAC_MODE2,
65176 + GR_RBAC_MODE3,
65177 + GR_FILENAME,
65178 + GR_SYSCTL_HIDDEN,
65179 + GR_NOARGS,
65180 + GR_ONE_INT,
65181 + GR_ONE_INT_TWO_STR,
65182 + GR_ONE_STR,
65183 + GR_STR_INT,
65184 + GR_TWO_STR_INT,
65185 + GR_TWO_INT,
65186 + GR_TWO_U64,
65187 + GR_THREE_INT,
65188 + GR_FIVE_INT_TWO_STR,
65189 + GR_TWO_STR,
65190 + GR_THREE_STR,
65191 + GR_FOUR_STR,
65192 + GR_STR_FILENAME,
65193 + GR_FILENAME_STR,
65194 + GR_FILENAME_TWO_INT,
65195 + GR_FILENAME_TWO_INT_STR,
65196 + GR_TEXTREL,
65197 + GR_PTRACE,
65198 + GR_RESOURCE,
65199 + GR_CAP,
65200 + GR_SIG,
65201 + GR_SIG2,
65202 + GR_CRASH1,
65203 + GR_CRASH2,
65204 + GR_PSACCT,
65205 + GR_RWXMAP
65206 +};
65207 +
65208 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
65209 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
65210 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
65211 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
65212 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
65213 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
65214 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
65215 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
65216 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
65217 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
65218 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
65219 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
65220 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
65221 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
65222 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
65223 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
65224 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
65225 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
65226 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
65227 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
65228 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
65229 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
65230 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
65231 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
65232 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
65233 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
65234 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
65235 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
65236 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
65237 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
65238 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
65239 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
65240 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
65241 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
65242 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
65243 +
65244 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
65245 +
65246 +#endif
65247 +
65248 +#endif
65249 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
65250 new file mode 100644
65251 index 0000000..2bd4c8d
65252 --- /dev/null
65253 +++ b/include/linux/grmsg.h
65254 @@ -0,0 +1,111 @@
65255 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
65256 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
65257 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
65258 +#define GR_STOPMOD_MSG "denied modification of module state by "
65259 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
65260 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
65261 +#define GR_IOPERM_MSG "denied use of ioperm() by "
65262 +#define GR_IOPL_MSG "denied use of iopl() by "
65263 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
65264 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
65265 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
65266 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
65267 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
65268 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
65269 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
65270 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
65271 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
65272 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
65273 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
65274 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
65275 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
65276 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
65277 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
65278 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
65279 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
65280 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
65281 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
65282 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
65283 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
65284 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
65285 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
65286 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
65287 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
65288 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
65289 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
65290 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
65291 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
65292 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
65293 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
65294 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
65295 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
65296 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
65297 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
65298 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
65299 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
65300 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
65301 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
65302 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
65303 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
65304 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
65305 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
65306 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
65307 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
65308 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
65309 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
65310 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
65311 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
65312 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
65313 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
65314 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
65315 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
65316 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
65317 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
65318 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
65319 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
65320 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
65321 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
65322 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
65323 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
65324 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
65325 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
65326 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
65327 +#define GR_NICE_CHROOT_MSG "denied priority change by "
65328 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
65329 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
65330 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
65331 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
65332 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
65333 +#define GR_TIME_MSG "time set by "
65334 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
65335 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
65336 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
65337 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
65338 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
65339 +#define GR_BIND_MSG "denied bind() by "
65340 +#define GR_CONNECT_MSG "denied connect() by "
65341 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
65342 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
65343 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
65344 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
65345 +#define GR_CAP_ACL_MSG "use of %s denied for "
65346 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
65347 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
65348 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
65349 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
65350 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
65351 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
65352 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
65353 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
65354 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
65355 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
65356 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
65357 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
65358 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
65359 +#define GR_VM86_MSG "denied use of vm86 by "
65360 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
65361 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
65362 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
65363 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
65364 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
65365 +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
65366 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
65367 new file mode 100644
65368 index 0000000..1ae241a
65369 --- /dev/null
65370 +++ b/include/linux/grsecurity.h
65371 @@ -0,0 +1,257 @@
65372 +#ifndef GR_SECURITY_H
65373 +#define GR_SECURITY_H
65374 +#include <linux/fs.h>
65375 +#include <linux/fs_struct.h>
65376 +#include <linux/binfmts.h>
65377 +#include <linux/gracl.h>
65378 +
65379 +/* notify of brain-dead configs */
65380 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65381 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
65382 +#endif
65383 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
65384 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
65385 +#endif
65386 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
65387 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
65388 +#endif
65389 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
65390 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
65391 +#endif
65392 +
65393 +#include <linux/compat.h>
65394 +
65395 +struct user_arg_ptr {
65396 +#ifdef CONFIG_COMPAT
65397 + bool is_compat;
65398 +#endif
65399 + union {
65400 + const char __user *const __user *native;
65401 +#ifdef CONFIG_COMPAT
65402 + const compat_uptr_t __user *compat;
65403 +#endif
65404 + } ptr;
65405 +};
65406 +
65407 +void gr_handle_brute_attach(unsigned long mm_flags);
65408 +void gr_handle_brute_check(void);
65409 +void gr_handle_kernel_exploit(void);
65410 +int gr_process_user_ban(void);
65411 +
65412 +char gr_roletype_to_char(void);
65413 +
65414 +int gr_acl_enable_at_secure(void);
65415 +
65416 +int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
65417 +int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
65418 +
65419 +void gr_del_task_from_ip_table(struct task_struct *p);
65420 +
65421 +int gr_pid_is_chrooted(struct task_struct *p);
65422 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
65423 +int gr_handle_chroot_nice(void);
65424 +int gr_handle_chroot_sysctl(const int op);
65425 +int gr_handle_chroot_setpriority(struct task_struct *p,
65426 + const int niceval);
65427 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
65428 +int gr_handle_chroot_chroot(const struct dentry *dentry,
65429 + const struct vfsmount *mnt);
65430 +void gr_handle_chroot_chdir(struct path *path);
65431 +int gr_handle_chroot_chmod(const struct dentry *dentry,
65432 + const struct vfsmount *mnt, const int mode);
65433 +int gr_handle_chroot_mknod(const struct dentry *dentry,
65434 + const struct vfsmount *mnt, const int mode);
65435 +int gr_handle_chroot_mount(const struct dentry *dentry,
65436 + const struct vfsmount *mnt,
65437 + const char *dev_name);
65438 +int gr_handle_chroot_pivot(void);
65439 +int gr_handle_chroot_unix(const pid_t pid);
65440 +
65441 +int gr_handle_rawio(const struct inode *inode);
65442 +
65443 +void gr_handle_ioperm(void);
65444 +void gr_handle_iopl(void);
65445 +
65446 +umode_t gr_acl_umask(void);
65447 +
65448 +int gr_tpe_allow(const struct file *file);
65449 +
65450 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
65451 +void gr_clear_chroot_entries(struct task_struct *task);
65452 +
65453 +void gr_log_forkfail(const int retval);
65454 +void gr_log_timechange(void);
65455 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
65456 +void gr_log_chdir(const struct dentry *dentry,
65457 + const struct vfsmount *mnt);
65458 +void gr_log_chroot_exec(const struct dentry *dentry,
65459 + const struct vfsmount *mnt);
65460 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
65461 +void gr_log_remount(const char *devname, const int retval);
65462 +void gr_log_unmount(const char *devname, const int retval);
65463 +void gr_log_mount(const char *from, const char *to, const int retval);
65464 +void gr_log_textrel(struct vm_area_struct *vma);
65465 +void gr_log_rwxmmap(struct file *file);
65466 +void gr_log_rwxmprotect(struct file *file);
65467 +
65468 +int gr_handle_follow_link(const struct inode *parent,
65469 + const struct inode *inode,
65470 + const struct dentry *dentry,
65471 + const struct vfsmount *mnt);
65472 +int gr_handle_fifo(const struct dentry *dentry,
65473 + const struct vfsmount *mnt,
65474 + const struct dentry *dir, const int flag,
65475 + const int acc_mode);
65476 +int gr_handle_hardlink(const struct dentry *dentry,
65477 + const struct vfsmount *mnt,
65478 + struct inode *inode,
65479 + const int mode, const struct filename *to);
65480 +
65481 +int gr_is_capable(const int cap);
65482 +int gr_is_capable_nolog(const int cap);
65483 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
65484 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
65485 +
65486 +void gr_copy_label(struct task_struct *tsk);
65487 +void gr_handle_crash(struct task_struct *task, const int sig);
65488 +int gr_handle_signal(const struct task_struct *p, const int sig);
65489 +int gr_check_crash_uid(const kuid_t uid);
65490 +int gr_check_protected_task(const struct task_struct *task);
65491 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
65492 +int gr_acl_handle_mmap(const struct file *file,
65493 + const unsigned long prot);
65494 +int gr_acl_handle_mprotect(const struct file *file,
65495 + const unsigned long prot);
65496 +int gr_check_hidden_task(const struct task_struct *tsk);
65497 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
65498 + const struct vfsmount *mnt);
65499 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
65500 + const struct vfsmount *mnt);
65501 +__u32 gr_acl_handle_access(const struct dentry *dentry,
65502 + const struct vfsmount *mnt, const int fmode);
65503 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
65504 + const struct vfsmount *mnt, umode_t *mode);
65505 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
65506 + const struct vfsmount *mnt);
65507 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
65508 + const struct vfsmount *mnt);
65509 +int gr_handle_ptrace(struct task_struct *task, const long request);
65510 +int gr_handle_proc_ptrace(struct task_struct *task);
65511 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
65512 + const struct vfsmount *mnt);
65513 +int gr_check_crash_exec(const struct file *filp);
65514 +int gr_acl_is_enabled(void);
65515 +void gr_set_kernel_label(struct task_struct *task);
65516 +void gr_set_role_label(struct task_struct *task, const kuid_t uid,
65517 + const kgid_t gid);
65518 +int gr_set_proc_label(const struct dentry *dentry,
65519 + const struct vfsmount *mnt,
65520 + const int unsafe_flags);
65521 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
65522 + const struct vfsmount *mnt);
65523 +__u32 gr_acl_handle_open(const struct dentry *dentry,
65524 + const struct vfsmount *mnt, int acc_mode);
65525 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
65526 + const struct dentry *p_dentry,
65527 + const struct vfsmount *p_mnt,
65528 + int open_flags, int acc_mode, const int imode);
65529 +void gr_handle_create(const struct dentry *dentry,
65530 + const struct vfsmount *mnt);
65531 +void gr_handle_proc_create(const struct dentry *dentry,
65532 + const struct inode *inode);
65533 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
65534 + const struct dentry *parent_dentry,
65535 + const struct vfsmount *parent_mnt,
65536 + const int mode);
65537 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
65538 + const struct dentry *parent_dentry,
65539 + const struct vfsmount *parent_mnt);
65540 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
65541 + const struct vfsmount *mnt);
65542 +void gr_handle_delete(const ino_t ino, const dev_t dev);
65543 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
65544 + const struct vfsmount *mnt);
65545 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
65546 + const struct dentry *parent_dentry,
65547 + const struct vfsmount *parent_mnt,
65548 + const struct filename *from);
65549 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
65550 + const struct dentry *parent_dentry,
65551 + const struct vfsmount *parent_mnt,
65552 + const struct dentry *old_dentry,
65553 + const struct vfsmount *old_mnt, const struct filename *to);
65554 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
65555 +int gr_acl_handle_rename(struct dentry *new_dentry,
65556 + struct dentry *parent_dentry,
65557 + const struct vfsmount *parent_mnt,
65558 + struct dentry *old_dentry,
65559 + struct inode *old_parent_inode,
65560 + struct vfsmount *old_mnt, const struct filename *newname);
65561 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65562 + struct dentry *old_dentry,
65563 + struct dentry *new_dentry,
65564 + struct vfsmount *mnt, const __u8 replace);
65565 +__u32 gr_check_link(const struct dentry *new_dentry,
65566 + const struct dentry *parent_dentry,
65567 + const struct vfsmount *parent_mnt,
65568 + const struct dentry *old_dentry,
65569 + const struct vfsmount *old_mnt);
65570 +int gr_acl_handle_filldir(const struct file *file, const char *name,
65571 + const unsigned int namelen, const ino_t ino);
65572 +
65573 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
65574 + const struct vfsmount *mnt);
65575 +void gr_acl_handle_exit(void);
65576 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
65577 +int gr_acl_handle_procpidmem(const struct task_struct *task);
65578 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
65579 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
65580 +void gr_audit_ptrace(struct task_struct *task);
65581 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
65582 +void gr_put_exec_file(struct task_struct *task);
65583 +
65584 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
65585 +
65586 +#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
65587 +extern void gr_learn_resource(const struct task_struct *task, const int res,
65588 + const unsigned long wanted, const int gt);
65589 +#else
65590 +static inline void gr_learn_resource(const struct task_struct *task, const int res,
65591 + const unsigned long wanted, const int gt)
65592 +{
65593 +}
65594 +#endif
65595 +
65596 +#ifdef CONFIG_GRKERNSEC_RESLOG
65597 +extern void gr_log_resource(const struct task_struct *task, const int res,
65598 + const unsigned long wanted, const int gt);
65599 +#else
65600 +static inline void gr_log_resource(const struct task_struct *task, const int res,
65601 + const unsigned long wanted, const int gt)
65602 +{
65603 +}
65604 +#endif
65605 +
65606 +#ifdef CONFIG_GRKERNSEC
65607 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
65608 +void gr_handle_vm86(void);
65609 +void gr_handle_mem_readwrite(u64 from, u64 to);
65610 +
65611 +void gr_log_badprocpid(const char *entry);
65612 +
65613 +extern int grsec_enable_dmesg;
65614 +extern int grsec_disable_privio;
65615 +
65616 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65617 +extern kgid_t grsec_proc_gid;
65618 +#endif
65619 +
65620 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65621 +extern int grsec_enable_chroot_findtask;
65622 +#endif
65623 +#ifdef CONFIG_GRKERNSEC_SETXID
65624 +extern int grsec_enable_setxid;
65625 +#endif
65626 +#endif
65627 +
65628 +#endif
65629 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
65630 new file mode 100644
65631 index 0000000..e7ffaaf
65632 --- /dev/null
65633 +++ b/include/linux/grsock.h
65634 @@ -0,0 +1,19 @@
65635 +#ifndef __GRSOCK_H
65636 +#define __GRSOCK_H
65637 +
65638 +extern void gr_attach_curr_ip(const struct sock *sk);
65639 +extern int gr_handle_sock_all(const int family, const int type,
65640 + const int protocol);
65641 +extern int gr_handle_sock_server(const struct sockaddr *sck);
65642 +extern int gr_handle_sock_server_other(const struct sock *sck);
65643 +extern int gr_handle_sock_client(const struct sockaddr *sck);
65644 +extern int gr_search_connect(struct socket * sock,
65645 + struct sockaddr_in * addr);
65646 +extern int gr_search_bind(struct socket * sock,
65647 + struct sockaddr_in * addr);
65648 +extern int gr_search_listen(struct socket * sock);
65649 +extern int gr_search_accept(struct socket * sock);
65650 +extern int gr_search_socket(const int domain, const int type,
65651 + const int protocol);
65652 +
65653 +#endif
65654 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
65655 index ef788b5..ac41b7b 100644
65656 --- a/include/linux/highmem.h
65657 +++ b/include/linux/highmem.h
65658 @@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
65659 kunmap_atomic(kaddr);
65660 }
65661
65662 +static inline void sanitize_highpage(struct page *page)
65663 +{
65664 + void *kaddr;
65665 + unsigned long flags;
65666 +
65667 + local_irq_save(flags);
65668 + kaddr = kmap_atomic(page);
65669 + clear_page(kaddr);
65670 + kunmap_atomic(kaddr);
65671 + local_irq_restore(flags);
65672 +}
65673 +
65674 static inline void zero_user_segments(struct page *page,
65675 unsigned start1, unsigned end1,
65676 unsigned start2, unsigned end2)
65677 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
65678 index d0c4db7..61b3577 100644
65679 --- a/include/linux/i2c.h
65680 +++ b/include/linux/i2c.h
65681 @@ -369,6 +369,7 @@ struct i2c_algorithm {
65682 /* To determine what the adapter supports */
65683 u32 (*functionality) (struct i2c_adapter *);
65684 };
65685 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
65686
65687 /*
65688 * i2c_adapter is the structure used to identify a physical i2c bus along
65689 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
65690 index d23c3c2..eb63c81 100644
65691 --- a/include/linux/i2o.h
65692 +++ b/include/linux/i2o.h
65693 @@ -565,7 +565,7 @@ struct i2o_controller {
65694 struct i2o_device *exec; /* Executive */
65695 #if BITS_PER_LONG == 64
65696 spinlock_t context_list_lock; /* lock for context_list */
65697 - atomic_t context_list_counter; /* needed for unique contexts */
65698 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
65699 struct list_head context_list; /* list of context id's
65700 and pointers */
65701 #endif
65702 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
65703 index aff7ad8..3942bbd 100644
65704 --- a/include/linux/if_pppox.h
65705 +++ b/include/linux/if_pppox.h
65706 @@ -76,7 +76,7 @@ struct pppox_proto {
65707 int (*ioctl)(struct socket *sock, unsigned int cmd,
65708 unsigned long arg);
65709 struct module *owner;
65710 -};
65711 +} __do_const;
65712
65713 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
65714 extern void unregister_pppox_proto(int proto_num);
65715 diff --git a/include/linux/init.h b/include/linux/init.h
65716 index 10ed4f4..8e8490d 100644
65717 --- a/include/linux/init.h
65718 +++ b/include/linux/init.h
65719 @@ -39,9 +39,36 @@
65720 * Also note, that this data cannot be "const".
65721 */
65722
65723 +#ifdef MODULE
65724 +#define add_init_latent_entropy
65725 +#define add_devinit_latent_entropy
65726 +#define add_cpuinit_latent_entropy
65727 +#define add_meminit_latent_entropy
65728 +#else
65729 +#define add_init_latent_entropy __latent_entropy
65730 +
65731 +#ifdef CONFIG_HOTPLUG
65732 +#define add_devinit_latent_entropy
65733 +#else
65734 +#define add_devinit_latent_entropy __latent_entropy
65735 +#endif
65736 +
65737 +#ifdef CONFIG_HOTPLUG_CPU
65738 +#define add_cpuinit_latent_entropy
65739 +#else
65740 +#define add_cpuinit_latent_entropy __latent_entropy
65741 +#endif
65742 +
65743 +#ifdef CONFIG_MEMORY_HOTPLUG
65744 +#define add_meminit_latent_entropy
65745 +#else
65746 +#define add_meminit_latent_entropy __latent_entropy
65747 +#endif
65748 +#endif
65749 +
65750 /* These are for everybody (although not all archs will actually
65751 discard it in modules) */
65752 -#define __init __section(.init.text) __cold notrace
65753 +#define __init __section(.init.text) __cold notrace add_init_latent_entropy
65754 #define __initdata __section(.init.data)
65755 #define __initconst __constsection(.init.rodata)
65756 #define __exitdata __section(.exit.data)
65757 @@ -94,7 +121,7 @@
65758 #define __exit __section(.exit.text) __exitused __cold notrace
65759
65760 /* Used for HOTPLUG_CPU */
65761 -#define __cpuinit __section(.cpuinit.text) __cold notrace
65762 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
65763 #define __cpuinitdata __section(.cpuinit.data)
65764 #define __cpuinitconst __constsection(.cpuinit.rodata)
65765 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
65766 @@ -102,7 +129,7 @@
65767 #define __cpuexitconst __constsection(.cpuexit.rodata)
65768
65769 /* Used for MEMORY_HOTPLUG */
65770 -#define __meminit __section(.meminit.text) __cold notrace
65771 +#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
65772 #define __meminitdata __section(.meminit.data)
65773 #define __meminitconst __constsection(.meminit.rodata)
65774 #define __memexit __section(.memexit.text) __exitused __cold notrace
65775 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
65776 index 6d087c5..401cab8 100644
65777 --- a/include/linux/init_task.h
65778 +++ b/include/linux/init_task.h
65779 @@ -143,6 +143,12 @@ extern struct task_group root_task_group;
65780
65781 #define INIT_TASK_COMM "swapper"
65782
65783 +#ifdef CONFIG_X86
65784 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
65785 +#else
65786 +#define INIT_TASK_THREAD_INFO
65787 +#endif
65788 +
65789 /*
65790 * INIT_TASK is used to set up the first task table, touch at
65791 * your own risk!. Base=0, limit=0x1fffff (=2MB)
65792 @@ -182,6 +188,7 @@ extern struct task_group root_task_group;
65793 RCU_POINTER_INITIALIZER(cred, &init_cred), \
65794 .comm = INIT_TASK_COMM, \
65795 .thread = INIT_THREAD, \
65796 + INIT_TASK_THREAD_INFO \
65797 .fs = &init_fs, \
65798 .files = &init_files, \
65799 .signal = &init_signals, \
65800 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
65801 index 5fa5afe..ac55b25 100644
65802 --- a/include/linux/interrupt.h
65803 +++ b/include/linux/interrupt.h
65804 @@ -430,7 +430,7 @@ enum
65805 /* map softirq index to softirq name. update 'softirq_to_name' in
65806 * kernel/softirq.c when adding a new softirq.
65807 */
65808 -extern char *softirq_to_name[NR_SOFTIRQS];
65809 +extern const char * const softirq_to_name[NR_SOFTIRQS];
65810
65811 /* softirq mask and active fields moved to irq_cpustat_t in
65812 * asm/hardirq.h to get better cache usage. KAO
65813 @@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
65814
65815 struct softirq_action
65816 {
65817 - void (*action)(struct softirq_action *);
65818 -};
65819 + void (*action)(void);
65820 +} __no_const;
65821
65822 asmlinkage void do_softirq(void);
65823 asmlinkage void __do_softirq(void);
65824 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
65825 +extern void open_softirq(int nr, void (*action)(void));
65826 extern void softirq_init(void);
65827 extern void __raise_softirq_irqoff(unsigned int nr);
65828
65829 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
65830 index 6883e19..06992b1 100644
65831 --- a/include/linux/kallsyms.h
65832 +++ b/include/linux/kallsyms.h
65833 @@ -15,7 +15,8 @@
65834
65835 struct module;
65836
65837 -#ifdef CONFIG_KALLSYMS
65838 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
65839 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65840 /* Lookup the address for a symbol. Returns 0 if not found. */
65841 unsigned long kallsyms_lookup_name(const char *name);
65842
65843 @@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
65844 /* Stupid that this does nothing, but I didn't create this mess. */
65845 #define __print_symbol(fmt, addr)
65846 #endif /*CONFIG_KALLSYMS*/
65847 +#else /* when included by kallsyms.c, vsnprintf.c, or
65848 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
65849 +extern void __print_symbol(const char *fmt, unsigned long address);
65850 +extern int sprint_backtrace(char *buffer, unsigned long address);
65851 +extern int sprint_symbol(char *buffer, unsigned long address);
65852 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
65853 +const char *kallsyms_lookup(unsigned long addr,
65854 + unsigned long *symbolsize,
65855 + unsigned long *offset,
65856 + char **modname, char *namebuf);
65857 +#endif
65858
65859 /* This macro allows us to keep printk typechecking */
65860 static __printf(1, 2)
65861 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
65862 index 4dff0c6..1ca9b72 100644
65863 --- a/include/linux/kgdb.h
65864 +++ b/include/linux/kgdb.h
65865 @@ -53,7 +53,7 @@ extern int kgdb_connected;
65866 extern int kgdb_io_module_registered;
65867
65868 extern atomic_t kgdb_setting_breakpoint;
65869 -extern atomic_t kgdb_cpu_doing_single_step;
65870 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
65871
65872 extern struct task_struct *kgdb_usethread;
65873 extern struct task_struct *kgdb_contthread;
65874 @@ -255,7 +255,7 @@ struct kgdb_arch {
65875 void (*correct_hw_break)(void);
65876
65877 void (*enable_nmi)(bool on);
65878 -};
65879 +} __do_const;
65880
65881 /**
65882 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
65883 @@ -280,7 +280,7 @@ struct kgdb_io {
65884 void (*pre_exception) (void);
65885 void (*post_exception) (void);
65886 int is_console;
65887 -};
65888 +} __do_const;
65889
65890 extern struct kgdb_arch arch_kgdb_ops;
65891
65892 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
65893 index 5398d58..5883a34 100644
65894 --- a/include/linux/kmod.h
65895 +++ b/include/linux/kmod.h
65896 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
65897 * usually useless though. */
65898 extern __printf(2, 3)
65899 int __request_module(bool wait, const char *name, ...);
65900 +extern __printf(3, 4)
65901 +int ___request_module(bool wait, char *param_name, const char *name, ...);
65902 #define request_module(mod...) __request_module(true, mod)
65903 #define request_module_nowait(mod...) __request_module(false, mod)
65904 #define try_then_request_module(x, mod...) \
65905 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
65906 index 939b112..90b7f44 100644
65907 --- a/include/linux/kobject.h
65908 +++ b/include/linux/kobject.h
65909 @@ -111,7 +111,7 @@ struct kobj_type {
65910 struct attribute **default_attrs;
65911 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
65912 const void *(*namespace)(struct kobject *kobj);
65913 -};
65914 +} __do_const;
65915
65916 struct kobj_uevent_env {
65917 char *envp[UEVENT_NUM_ENVP];
65918 diff --git a/include/linux/kref.h b/include/linux/kref.h
65919 index 4972e6e..de4d19b 100644
65920 --- a/include/linux/kref.h
65921 +++ b/include/linux/kref.h
65922 @@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
65923 static inline int kref_sub(struct kref *kref, unsigned int count,
65924 void (*release)(struct kref *kref))
65925 {
65926 - WARN_ON(release == NULL);
65927 + BUG_ON(release == NULL);
65928
65929 if (atomic_sub_and_test((int) count, &kref->refcount)) {
65930 release(kref);
65931 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
65932 index 2c497ab..afe32f5 100644
65933 --- a/include/linux/kvm_host.h
65934 +++ b/include/linux/kvm_host.h
65935 @@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
65936 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
65937 void vcpu_put(struct kvm_vcpu *vcpu);
65938
65939 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
65940 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
65941 struct module *module);
65942 void kvm_exit(void);
65943
65944 @@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
65945 struct kvm_guest_debug *dbg);
65946 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
65947
65948 -int kvm_arch_init(void *opaque);
65949 +int kvm_arch_init(const void *opaque);
65950 void kvm_arch_exit(void);
65951
65952 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
65953 diff --git a/include/linux/libata.h b/include/linux/libata.h
65954 index 649e5f8..ead5194 100644
65955 --- a/include/linux/libata.h
65956 +++ b/include/linux/libata.h
65957 @@ -915,7 +915,7 @@ struct ata_port_operations {
65958 * fields must be pointers.
65959 */
65960 const struct ata_port_operations *inherits;
65961 -};
65962 +} __do_const;
65963
65964 struct ata_port_info {
65965 unsigned long flags;
65966 diff --git a/include/linux/list.h b/include/linux/list.h
65967 index cc6d2aa..71febca 100644
65968 --- a/include/linux/list.h
65969 +++ b/include/linux/list.h
65970 @@ -112,6 +112,9 @@ extern void __list_del_entry(struct list_head *entry);
65971 extern void list_del(struct list_head *entry);
65972 #endif
65973
65974 +extern void pax_list_add_tail(struct list_head *new, struct list_head *head);
65975 +extern void pax_list_del(struct list_head *entry);
65976 +
65977 /**
65978 * list_replace - replace old entry by new one
65979 * @old : the element to be replaced
65980 diff --git a/include/linux/mm.h b/include/linux/mm.h
65981 index 66e2f7c..ea88001 100644
65982 --- a/include/linux/mm.h
65983 +++ b/include/linux/mm.h
65984 @@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
65985 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
65986 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
65987 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
65988 +
65989 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65990 +#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
65991 +#endif
65992 +
65993 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
65994
65995 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
65996 @@ -231,6 +236,7 @@ struct vm_operations_struct {
65997 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
65998 unsigned long size, pgoff_t pgoff);
65999 };
66000 +typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
66001
66002 struct mmu_gather;
66003 struct inode;
66004 @@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
66005 int set_page_dirty_lock(struct page *page);
66006 int clear_page_dirty_for_io(struct page *page);
66007
66008 -/* Is the vma a continuation of the stack vma above it? */
66009 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
66010 -{
66011 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
66012 -}
66013 -
66014 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
66015 - unsigned long addr)
66016 -{
66017 - return (vma->vm_flags & VM_GROWSDOWN) &&
66018 - (vma->vm_start == addr) &&
66019 - !vma_growsdown(vma->vm_prev, addr);
66020 -}
66021 -
66022 -/* Is the vma a continuation of the stack vma below it? */
66023 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
66024 -{
66025 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
66026 -}
66027 -
66028 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
66029 - unsigned long addr)
66030 -{
66031 - return (vma->vm_flags & VM_GROWSUP) &&
66032 - (vma->vm_end == addr) &&
66033 - !vma_growsup(vma->vm_next, addr);
66034 -}
66035 -
66036 extern pid_t
66037 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
66038
66039 @@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
66040 }
66041 #endif
66042
66043 +#ifdef CONFIG_MMU
66044 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
66045 +#else
66046 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
66047 +{
66048 + return __pgprot(0);
66049 +}
66050 +#endif
66051 +
66052 int vma_wants_writenotify(struct vm_area_struct *vma);
66053
66054 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
66055 @@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
66056 {
66057 return 0;
66058 }
66059 +
66060 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
66061 + unsigned long address)
66062 +{
66063 + return 0;
66064 +}
66065 #else
66066 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
66067 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
66068 #endif
66069
66070 #ifdef __PAGETABLE_PMD_FOLDED
66071 @@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
66072 {
66073 return 0;
66074 }
66075 +
66076 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
66077 + unsigned long address)
66078 +{
66079 + return 0;
66080 +}
66081 #else
66082 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
66083 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
66084 #endif
66085
66086 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
66087 @@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
66088 NULL: pud_offset(pgd, address);
66089 }
66090
66091 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
66092 +{
66093 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
66094 + NULL: pud_offset(pgd, address);
66095 +}
66096 +
66097 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
66098 {
66099 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
66100 NULL: pmd_offset(pud, address);
66101 }
66102 +
66103 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
66104 +{
66105 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
66106 + NULL: pmd_offset(pud, address);
66107 +}
66108 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
66109
66110 #if USE_SPLIT_PTLOCKS
66111 @@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
66112 unsigned long, unsigned long,
66113 unsigned long, unsigned long);
66114 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
66115 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
66116
66117 /* These take the mm semaphore themselves */
66118 extern unsigned long vm_brk(unsigned long, unsigned long);
66119 @@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
66120 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
66121 struct vm_area_struct **pprev);
66122
66123 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
66124 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
66125 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
66126 +
66127 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
66128 NULL if none. Assume start_addr < end_addr. */
66129 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
66130 @@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
66131 return vma;
66132 }
66133
66134 -#ifdef CONFIG_MMU
66135 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
66136 -#else
66137 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
66138 -{
66139 - return __pgprot(0);
66140 -}
66141 -#endif
66142 -
66143 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
66144 unsigned long change_prot_numa(struct vm_area_struct *vma,
66145 unsigned long start, unsigned long end);
66146 @@ -1721,7 +1730,7 @@ extern int unpoison_memory(unsigned long pfn);
66147 extern int sysctl_memory_failure_early_kill;
66148 extern int sysctl_memory_failure_recovery;
66149 extern void shake_page(struct page *p, int access);
66150 -extern atomic_long_t mce_bad_pages;
66151 +extern atomic_long_unchecked_t mce_bad_pages;
66152 extern int soft_offline_page(struct page *page, int flags);
66153
66154 extern void dump_page(struct page *page);
66155 @@ -1752,5 +1761,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
66156 static inline bool page_is_guard(struct page *page) { return false; }
66157 #endif /* CONFIG_DEBUG_PAGEALLOC */
66158
66159 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66160 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
66161 +#else
66162 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
66163 +#endif
66164 +
66165 #endif /* __KERNEL__ */
66166 #endif /* _LINUX_MM_H */
66167 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
66168 index f8f5162..6276a36 100644
66169 --- a/include/linux/mm_types.h
66170 +++ b/include/linux/mm_types.h
66171 @@ -288,6 +288,8 @@ struct vm_area_struct {
66172 #ifdef CONFIG_NUMA
66173 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
66174 #endif
66175 +
66176 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
66177 };
66178
66179 struct core_thread {
66180 @@ -362,7 +364,7 @@ struct mm_struct {
66181 unsigned long def_flags;
66182 unsigned long nr_ptes; /* Page table pages */
66183 unsigned long start_code, end_code, start_data, end_data;
66184 - unsigned long start_brk, brk, start_stack;
66185 + unsigned long brk_gap, start_brk, brk, start_stack;
66186 unsigned long arg_start, arg_end, env_start, env_end;
66187
66188 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
66189 @@ -436,6 +438,24 @@ struct mm_struct {
66190 int first_nid;
66191 #endif
66192 struct uprobes_state uprobes_state;
66193 +
66194 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66195 + unsigned long pax_flags;
66196 +#endif
66197 +
66198 +#ifdef CONFIG_PAX_DLRESOLVE
66199 + unsigned long call_dl_resolve;
66200 +#endif
66201 +
66202 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
66203 + unsigned long call_syscall;
66204 +#endif
66205 +
66206 +#ifdef CONFIG_PAX_ASLR
66207 + unsigned long delta_mmap; /* randomized offset */
66208 + unsigned long delta_stack; /* randomized offset */
66209 +#endif
66210 +
66211 };
66212
66213 /* first nid will either be a valid NID or one of these values */
66214 diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
66215 index c5d5278..f0b68c8 100644
66216 --- a/include/linux/mmiotrace.h
66217 +++ b/include/linux/mmiotrace.h
66218 @@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
66219 /* Called from ioremap.c */
66220 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
66221 void __iomem *addr);
66222 -extern void mmiotrace_iounmap(volatile void __iomem *addr);
66223 +extern void mmiotrace_iounmap(const volatile void __iomem *addr);
66224
66225 /* For anyone to insert markers. Remember trailing newline. */
66226 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
66227 @@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
66228 {
66229 }
66230
66231 -static inline void mmiotrace_iounmap(volatile void __iomem *addr)
66232 +static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
66233 {
66234 }
66235
66236 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
66237 index 73b64a3..6562925 100644
66238 --- a/include/linux/mmzone.h
66239 +++ b/include/linux/mmzone.h
66240 @@ -412,7 +412,7 @@ struct zone {
66241 unsigned long flags; /* zone flags, see below */
66242
66243 /* Zone statistics */
66244 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66245 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66246
66247 /*
66248 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
66249 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
66250 index fed3def..7cc3f93 100644
66251 --- a/include/linux/mod_devicetable.h
66252 +++ b/include/linux/mod_devicetable.h
66253 @@ -12,7 +12,7 @@
66254 typedef unsigned long kernel_ulong_t;
66255 #endif
66256
66257 -#define PCI_ANY_ID (~0)
66258 +#define PCI_ANY_ID ((__u16)~0)
66259
66260 struct pci_device_id {
66261 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
66262 @@ -139,7 +139,7 @@ struct usb_device_id {
66263 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
66264 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
66265
66266 -#define HID_ANY_ID (~0)
66267 +#define HID_ANY_ID (~0U)
66268 #define HID_BUS_ANY 0xffff
66269 #define HID_GROUP_ANY 0x0000
66270
66271 diff --git a/include/linux/module.h b/include/linux/module.h
66272 index 1375ee3..d631af0 100644
66273 --- a/include/linux/module.h
66274 +++ b/include/linux/module.h
66275 @@ -17,9 +17,11 @@
66276 #include <linux/moduleparam.h>
66277 #include <linux/tracepoint.h>
66278 #include <linux/export.h>
66279 +#include <linux/fs.h>
66280
66281 #include <linux/percpu.h>
66282 #include <asm/module.h>
66283 +#include <asm/pgtable.h>
66284
66285 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
66286 #define MODULE_SIG_STRING "~Module signature appended~\n"
66287 @@ -281,19 +283,16 @@ struct module
66288 int (*init)(void);
66289
66290 /* If this is non-NULL, vfree after init() returns */
66291 - void *module_init;
66292 + void *module_init_rx, *module_init_rw;
66293
66294 /* Here is the actual code + data, vfree'd on unload. */
66295 - void *module_core;
66296 + void *module_core_rx, *module_core_rw;
66297
66298 /* Here are the sizes of the init and core sections */
66299 - unsigned int init_size, core_size;
66300 + unsigned int init_size_rw, core_size_rw;
66301
66302 /* The size of the executable code in each section. */
66303 - unsigned int init_text_size, core_text_size;
66304 -
66305 - /* Size of RO sections of the module (text+rodata) */
66306 - unsigned int init_ro_size, core_ro_size;
66307 + unsigned int init_size_rx, core_size_rx;
66308
66309 /* Arch-specific module values */
66310 struct mod_arch_specific arch;
66311 @@ -349,6 +348,10 @@ struct module
66312 #ifdef CONFIG_EVENT_TRACING
66313 struct ftrace_event_call **trace_events;
66314 unsigned int num_trace_events;
66315 + struct file_operations trace_id;
66316 + struct file_operations trace_enable;
66317 + struct file_operations trace_format;
66318 + struct file_operations trace_filter;
66319 #endif
66320 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
66321 unsigned int num_ftrace_callsites;
66322 @@ -396,16 +399,46 @@ bool is_module_address(unsigned long addr);
66323 bool is_module_percpu_address(unsigned long addr);
66324 bool is_module_text_address(unsigned long addr);
66325
66326 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
66327 +{
66328 +
66329 +#ifdef CONFIG_PAX_KERNEXEC
66330 + if (ktla_ktva(addr) >= (unsigned long)start &&
66331 + ktla_ktva(addr) < (unsigned long)start + size)
66332 + return 1;
66333 +#endif
66334 +
66335 + return ((void *)addr >= start && (void *)addr < start + size);
66336 +}
66337 +
66338 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
66339 +{
66340 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
66341 +}
66342 +
66343 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
66344 +{
66345 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
66346 +}
66347 +
66348 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
66349 +{
66350 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
66351 +}
66352 +
66353 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
66354 +{
66355 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
66356 +}
66357 +
66358 static inline int within_module_core(unsigned long addr, struct module *mod)
66359 {
66360 - return (unsigned long)mod->module_core <= addr &&
66361 - addr < (unsigned long)mod->module_core + mod->core_size;
66362 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
66363 }
66364
66365 static inline int within_module_init(unsigned long addr, struct module *mod)
66366 {
66367 - return (unsigned long)mod->module_init <= addr &&
66368 - addr < (unsigned long)mod->module_init + mod->init_size;
66369 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
66370 }
66371
66372 /* Search for module by name: must hold module_mutex. */
66373 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
66374 index 560ca53..5ee8d73 100644
66375 --- a/include/linux/moduleloader.h
66376 +++ b/include/linux/moduleloader.h
66377 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
66378
66379 /* Allocator used for allocating struct module, core sections and init
66380 sections. Returns NULL on failure. */
66381 -void *module_alloc(unsigned long size);
66382 +void *module_alloc(unsigned long size) __size_overflow(1);
66383 +
66384 +#ifdef CONFIG_PAX_KERNEXEC
66385 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
66386 +#else
66387 +#define module_alloc_exec(x) module_alloc(x)
66388 +#endif
66389
66390 /* Free memory returned from module_alloc. */
66391 void module_free(struct module *mod, void *module_region);
66392
66393 +#ifdef CONFIG_PAX_KERNEXEC
66394 +void module_free_exec(struct module *mod, void *module_region);
66395 +#else
66396 +#define module_free_exec(x, y) module_free((x), (y))
66397 +#endif
66398 +
66399 /*
66400 * Apply the given relocation to the (simplified) ELF. Return -error
66401 * or 0.
66402 @@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
66403 unsigned int relsec,
66404 struct module *me)
66405 {
66406 +#ifdef CONFIG_MODULES
66407 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
66408 +#endif
66409 return -ENOEXEC;
66410 }
66411 #endif
66412 @@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
66413 unsigned int relsec,
66414 struct module *me)
66415 {
66416 +#ifdef CONFIG_MODULES
66417 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
66418 +#endif
66419 return -ENOEXEC;
66420 }
66421 #endif
66422 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
66423 index 137b419..fe663ec 100644
66424 --- a/include/linux/moduleparam.h
66425 +++ b/include/linux/moduleparam.h
66426 @@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
66427 * @len is usually just sizeof(string).
66428 */
66429 #define module_param_string(name, string, len, perm) \
66430 - static const struct kparam_string __param_string_##name \
66431 + static const struct kparam_string __param_string_##name __used \
66432 = { len, string }; \
66433 __module_param_call(MODULE_PARAM_PREFIX, name, \
66434 &param_ops_string, \
66435 @@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
66436 */
66437 #define module_param_array_named(name, array, type, nump, perm) \
66438 param_check_##type(name, &(array)[0]); \
66439 - static const struct kparam_array __param_arr_##name \
66440 + static const struct kparam_array __param_arr_##name __used \
66441 = { .max = ARRAY_SIZE(array), .num = nump, \
66442 .ops = &param_ops_##type, \
66443 .elemsize = sizeof(array[0]), .elem = array }; \
66444 diff --git a/include/linux/namei.h b/include/linux/namei.h
66445 index 5a5ff57..5ae5070 100644
66446 --- a/include/linux/namei.h
66447 +++ b/include/linux/namei.h
66448 @@ -19,7 +19,7 @@ struct nameidata {
66449 unsigned seq;
66450 int last_type;
66451 unsigned depth;
66452 - char *saved_names[MAX_NESTED_LINKS + 1];
66453 + const char *saved_names[MAX_NESTED_LINKS + 1];
66454 };
66455
66456 /*
66457 @@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
66458
66459 extern void nd_jump_link(struct nameidata *nd, struct path *path);
66460
66461 -static inline void nd_set_link(struct nameidata *nd, char *path)
66462 +static inline void nd_set_link(struct nameidata *nd, const char *path)
66463 {
66464 nd->saved_names[nd->depth] = path;
66465 }
66466
66467 -static inline char *nd_get_link(struct nameidata *nd)
66468 +static inline const char *nd_get_link(const struct nameidata *nd)
66469 {
66470 return nd->saved_names[nd->depth];
66471 }
66472 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
66473 index 9ef07d0..130a5d9 100644
66474 --- a/include/linux/netdevice.h
66475 +++ b/include/linux/netdevice.h
66476 @@ -1012,6 +1012,7 @@ struct net_device_ops {
66477 u32 pid, u32 seq,
66478 struct net_device *dev);
66479 };
66480 +typedef struct net_device_ops __no_const net_device_ops_no_const;
66481
66482 /*
66483 * The DEVICE structure.
66484 @@ -1078,7 +1079,7 @@ struct net_device {
66485 int iflink;
66486
66487 struct net_device_stats stats;
66488 - atomic_long_t rx_dropped; /* dropped packets by core network
66489 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
66490 * Do not use this in drivers.
66491 */
66492
66493 diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
66494 index 7958e84..ed74d7a 100644
66495 --- a/include/linux/netfilter/ipset/ip_set.h
66496 +++ b/include/linux/netfilter/ipset/ip_set.h
66497 @@ -98,7 +98,7 @@ struct ip_set_type_variant {
66498 /* Return true if "b" set is the same as "a"
66499 * according to the create set parameters */
66500 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
66501 -};
66502 +} __do_const;
66503
66504 /* The core set type structure */
66505 struct ip_set_type {
66506 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
66507 index 4966dde..7d8ce06 100644
66508 --- a/include/linux/netfilter/nfnetlink.h
66509 +++ b/include/linux/netfilter/nfnetlink.h
66510 @@ -16,7 +16,7 @@ struct nfnl_callback {
66511 const struct nlattr * const cda[]);
66512 const struct nla_policy *policy; /* netlink attribute policy */
66513 const u_int16_t attr_count; /* number of nlattr's */
66514 -};
66515 +} __do_const;
66516
66517 struct nfnetlink_subsystem {
66518 const char *name;
66519 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
66520 new file mode 100644
66521 index 0000000..33f4af8
66522 --- /dev/null
66523 +++ b/include/linux/netfilter/xt_gradm.h
66524 @@ -0,0 +1,9 @@
66525 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
66526 +#define _LINUX_NETFILTER_XT_GRADM_H 1
66527 +
66528 +struct xt_gradm_mtinfo {
66529 + __u16 flags;
66530 + __u16 invflags;
66531 +};
66532 +
66533 +#endif
66534 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
66535 index d65746e..62e72c2 100644
66536 --- a/include/linux/notifier.h
66537 +++ b/include/linux/notifier.h
66538 @@ -51,7 +51,8 @@ struct notifier_block {
66539 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
66540 struct notifier_block __rcu *next;
66541 int priority;
66542 -};
66543 +} __do_const;
66544 +typedef struct notifier_block __no_const notifier_block_no_const;
66545
66546 struct atomic_notifier_head {
66547 spinlock_t lock;
66548 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
66549 index a4c5624..79d6d88 100644
66550 --- a/include/linux/oprofile.h
66551 +++ b/include/linux/oprofile.h
66552 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
66553 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
66554 char const * name, ulong * val);
66555
66556 -/** Create a file for read-only access to an atomic_t. */
66557 +/** Create a file for read-only access to an atomic_unchecked_t. */
66558 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
66559 - char const * name, atomic_t * val);
66560 + char const * name, atomic_unchecked_t * val);
66561
66562 /** create a directory */
66563 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
66564 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
66565 index 6bfb2faa..e5bc5e5 100644
66566 --- a/include/linux/perf_event.h
66567 +++ b/include/linux/perf_event.h
66568 @@ -328,8 +328,8 @@ struct perf_event {
66569
66570 enum perf_event_active_state state;
66571 unsigned int attach_state;
66572 - local64_t count;
66573 - atomic64_t child_count;
66574 + local64_t count; /* PaX: fix it one day */
66575 + atomic64_unchecked_t child_count;
66576
66577 /*
66578 * These are the total time in nanoseconds that the event
66579 @@ -380,8 +380,8 @@ struct perf_event {
66580 * These accumulate total time (in nanoseconds) that children
66581 * events have been enabled and running, respectively.
66582 */
66583 - atomic64_t child_total_time_enabled;
66584 - atomic64_t child_total_time_running;
66585 + atomic64_unchecked_t child_total_time_enabled;
66586 + atomic64_unchecked_t child_total_time_running;
66587
66588 /*
66589 * Protect attach/detach and child_list:
66590 @@ -801,7 +801,7 @@ static inline void perf_event_task_tick(void) { }
66591 */
66592 #define perf_cpu_notifier(fn) \
66593 do { \
66594 - static struct notifier_block fn##_nb __cpuinitdata = \
66595 + static struct notifier_block fn##_nb = \
66596 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
66597 unsigned long cpu = smp_processor_id(); \
66598 unsigned long flags; \
66599 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
66600 index ad1a427..6419649 100644
66601 --- a/include/linux/pipe_fs_i.h
66602 +++ b/include/linux/pipe_fs_i.h
66603 @@ -45,9 +45,9 @@ struct pipe_buffer {
66604 struct pipe_inode_info {
66605 wait_queue_head_t wait;
66606 unsigned int nrbufs, curbuf, buffers;
66607 - unsigned int readers;
66608 - unsigned int writers;
66609 - unsigned int waiting_writers;
66610 + atomic_t readers;
66611 + atomic_t writers;
66612 + atomic_t waiting_writers;
66613 unsigned int r_counter;
66614 unsigned int w_counter;
66615 struct page *tmp_page;
66616 diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
66617 index 5f28cae..3d23723 100644
66618 --- a/include/linux/platform_data/usb-ehci-s5p.h
66619 +++ b/include/linux/platform_data/usb-ehci-s5p.h
66620 @@ -14,7 +14,7 @@
66621 struct s5p_ehci_platdata {
66622 int (*phy_init)(struct platform_device *pdev, int type);
66623 int (*phy_exit)(struct platform_device *pdev, int type);
66624 -};
66625 +} __no_const;
66626
66627 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
66628
66629 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
66630 index f271860..6b3bec5 100644
66631 --- a/include/linux/pm_runtime.h
66632 +++ b/include/linux/pm_runtime.h
66633 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
66634
66635 static inline void pm_runtime_mark_last_busy(struct device *dev)
66636 {
66637 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
66638 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
66639 }
66640
66641 #else /* !CONFIG_PM_RUNTIME */
66642 diff --git a/include/linux/poison.h b/include/linux/poison.h
66643 index 2110a81..13a11bb 100644
66644 --- a/include/linux/poison.h
66645 +++ b/include/linux/poison.h
66646 @@ -19,8 +19,8 @@
66647 * under normal circumstances, used to verify that nobody uses
66648 * non-initialized list entries.
66649 */
66650 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
66651 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
66652 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
66653 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
66654
66655 /********** include/linux/timer.h **********/
66656 /*
66657 diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
66658 index c0f44c2..1572583 100644
66659 --- a/include/linux/power/smartreflex.h
66660 +++ b/include/linux/power/smartreflex.h
66661 @@ -238,7 +238,7 @@ struct omap_sr_class_data {
66662 int (*notify)(struct omap_sr *sr, u32 status);
66663 u8 notify_flags;
66664 u8 class_type;
66665 -};
66666 +} __do_const;
66667
66668 /**
66669 * struct omap_sr_nvalue_table - Smartreflex n-target value info
66670 diff --git a/include/linux/printk.h b/include/linux/printk.h
66671 index 9afc01e..92c32e8 100644
66672 --- a/include/linux/printk.h
66673 +++ b/include/linux/printk.h
66674 @@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
66675 extern int printk_needs_cpu(int cpu);
66676 extern void printk_tick(void);
66677
66678 +extern int kptr_restrict;
66679 +
66680 #ifdef CONFIG_PRINTK
66681 asmlinkage __printf(5, 0)
66682 int vprintk_emit(int facility, int level,
66683 @@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
66684
66685 extern int printk_delay_msec;
66686 extern int dmesg_restrict;
66687 -extern int kptr_restrict;
66688
66689 void log_buf_kexec_setup(void);
66690 void __init setup_log_buf(int early);
66691 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
66692 index 32676b3..8f7a182 100644
66693 --- a/include/linux/proc_fs.h
66694 +++ b/include/linux/proc_fs.h
66695 @@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
66696 return proc_create_data(name, mode, parent, proc_fops, NULL);
66697 }
66698
66699 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
66700 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
66701 +{
66702 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66703 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
66704 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66705 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
66706 +#else
66707 + return proc_create_data(name, mode, parent, proc_fops, NULL);
66708 +#endif
66709 +}
66710 +
66711 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
66712 umode_t mode, struct proc_dir_entry *base,
66713 read_proc_t *read_proc, void * data)
66714 diff --git a/include/linux/random.h b/include/linux/random.h
66715 index d984608..d6f0042 100644
66716 --- a/include/linux/random.h
66717 +++ b/include/linux/random.h
66718 @@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
66719 u32 prandom_u32_state(struct rnd_state *);
66720 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
66721
66722 +static inline unsigned long pax_get_random_long(void)
66723 +{
66724 + return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
66725 +}
66726 +
66727 /*
66728 * Handle minimum values for seeds
66729 */
66730 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
66731 index 23b3630..e1bc12b 100644
66732 --- a/include/linux/reboot.h
66733 +++ b/include/linux/reboot.h
66734 @@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
66735 * Architecture-specific implementations of sys_reboot commands.
66736 */
66737
66738 -extern void machine_restart(char *cmd);
66739 -extern void machine_halt(void);
66740 -extern void machine_power_off(void);
66741 +extern void machine_restart(char *cmd) __noreturn;
66742 +extern void machine_halt(void) __noreturn;
66743 +extern void machine_power_off(void) __noreturn;
66744
66745 extern void machine_shutdown(void);
66746 struct pt_regs;
66747 @@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
66748 */
66749
66750 extern void kernel_restart_prepare(char *cmd);
66751 -extern void kernel_restart(char *cmd);
66752 -extern void kernel_halt(void);
66753 -extern void kernel_power_off(void);
66754 +extern void kernel_restart(char *cmd) __noreturn;
66755 +extern void kernel_halt(void) __noreturn;
66756 +extern void kernel_power_off(void) __noreturn;
66757
66758 extern int C_A_D; /* for sysctl */
66759 void ctrl_alt_del(void);
66760 @@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
66761 * Emergency restart, callable from an interrupt handler.
66762 */
66763
66764 -extern void emergency_restart(void);
66765 +extern void emergency_restart(void) __noreturn;
66766 #include <asm/emergency-restart.h>
66767
66768 #endif /* _LINUX_REBOOT_H */
66769 diff --git a/include/linux/regset.h b/include/linux/regset.h
66770 index 8e0c9fe..ac4d221 100644
66771 --- a/include/linux/regset.h
66772 +++ b/include/linux/regset.h
66773 @@ -161,7 +161,8 @@ struct user_regset {
66774 unsigned int align;
66775 unsigned int bias;
66776 unsigned int core_note_type;
66777 -};
66778 +} __do_const;
66779 +typedef struct user_regset __no_const user_regset_no_const;
66780
66781 /**
66782 * struct user_regset_view - available regsets
66783 diff --git a/include/linux/relay.h b/include/linux/relay.h
66784 index 91cacc3..b55ff74 100644
66785 --- a/include/linux/relay.h
66786 +++ b/include/linux/relay.h
66787 @@ -160,7 +160,7 @@ struct rchan_callbacks
66788 * The callback should return 0 if successful, negative if not.
66789 */
66790 int (*remove_buf_file)(struct dentry *dentry);
66791 -};
66792 +} __no_const;
66793
66794 /*
66795 * CONFIG_RELAY kernel API, kernel/relay.c
66796 diff --git a/include/linux/rio.h b/include/linux/rio.h
66797 index a3e7842..d973ca6 100644
66798 --- a/include/linux/rio.h
66799 +++ b/include/linux/rio.h
66800 @@ -339,7 +339,7 @@ struct rio_ops {
66801 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
66802 u64 rstart, u32 size, u32 flags);
66803 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
66804 -};
66805 +} __no_const;
66806
66807 #define RIO_RESOURCE_MEM 0x00000100
66808 #define RIO_RESOURCE_DOORBELL 0x00000200
66809 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
66810 index c20635c..2f5def4 100644
66811 --- a/include/linux/rmap.h
66812 +++ b/include/linux/rmap.h
66813 @@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
66814 void anon_vma_init(void); /* create anon_vma_cachep */
66815 int anon_vma_prepare(struct vm_area_struct *);
66816 void unlink_anon_vmas(struct vm_area_struct *);
66817 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
66818 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
66819 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
66820 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
66821
66822 static inline void anon_vma_merge(struct vm_area_struct *vma,
66823 struct vm_area_struct *next)
66824 diff --git a/include/linux/sched.h b/include/linux/sched.h
66825 index d211247..d64a165 100644
66826 --- a/include/linux/sched.h
66827 +++ b/include/linux/sched.h
66828 @@ -61,6 +61,7 @@ struct bio_list;
66829 struct fs_struct;
66830 struct perf_event_context;
66831 struct blk_plug;
66832 +struct linux_binprm;
66833
66834 /*
66835 * List of flags we want to share for kernel threads,
66836 @@ -354,10 +355,23 @@ struct user_namespace;
66837 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
66838
66839 extern int sysctl_max_map_count;
66840 +extern unsigned long sysctl_heap_stack_gap;
66841
66842 #include <linux/aio.h>
66843
66844 #ifdef CONFIG_MMU
66845 +
66846 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
66847 +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
66848 +#else
66849 +static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
66850 +{
66851 + return 0;
66852 +}
66853 +#endif
66854 +
66855 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
66856 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
66857 extern void arch_pick_mmap_layout(struct mm_struct *mm);
66858 extern unsigned long
66859 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
66860 @@ -639,6 +653,17 @@ struct signal_struct {
66861 #ifdef CONFIG_TASKSTATS
66862 struct taskstats *stats;
66863 #endif
66864 +
66865 +#ifdef CONFIG_GRKERNSEC
66866 + u32 curr_ip;
66867 + u32 saved_ip;
66868 + u32 gr_saddr;
66869 + u32 gr_daddr;
66870 + u16 gr_sport;
66871 + u16 gr_dport;
66872 + u8 used_accept:1;
66873 +#endif
66874 +
66875 #ifdef CONFIG_AUDIT
66876 unsigned audit_tty;
66877 struct tty_audit_buf *tty_audit_buf;
66878 @@ -717,6 +742,11 @@ struct user_struct {
66879 struct key *session_keyring; /* UID's default session keyring */
66880 #endif
66881
66882 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66883 + unsigned int banned;
66884 + unsigned long ban_expires;
66885 +#endif
66886 +
66887 /* Hash table maintenance information */
66888 struct hlist_node uidhash_node;
66889 kuid_t uid;
66890 @@ -1360,8 +1390,8 @@ struct task_struct {
66891 struct list_head thread_group;
66892
66893 struct completion *vfork_done; /* for vfork() */
66894 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
66895 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
66896 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
66897 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
66898
66899 cputime_t utime, stime, utimescaled, stimescaled;
66900 cputime_t gtime;
66901 @@ -1377,11 +1407,6 @@ struct task_struct {
66902 struct task_cputime cputime_expires;
66903 struct list_head cpu_timers[3];
66904
66905 -/* process credentials */
66906 - const struct cred __rcu *real_cred; /* objective and real subjective task
66907 - * credentials (COW) */
66908 - const struct cred __rcu *cred; /* effective (overridable) subjective task
66909 - * credentials (COW) */
66910 char comm[TASK_COMM_LEN]; /* executable name excluding path
66911 - access with [gs]et_task_comm (which lock
66912 it with task_lock())
66913 @@ -1398,6 +1423,10 @@ struct task_struct {
66914 #endif
66915 /* CPU-specific state of this task */
66916 struct thread_struct thread;
66917 +/* thread_info moved to task_struct */
66918 +#ifdef CONFIG_X86
66919 + struct thread_info tinfo;
66920 +#endif
66921 /* filesystem information */
66922 struct fs_struct *fs;
66923 /* open file information */
66924 @@ -1471,6 +1500,10 @@ struct task_struct {
66925 gfp_t lockdep_reclaim_gfp;
66926 #endif
66927
66928 +/* process credentials */
66929 + const struct cred __rcu *real_cred; /* objective and real subjective task
66930 + * credentials (COW) */
66931 +
66932 /* journalling filesystem info */
66933 void *journal_info;
66934
66935 @@ -1509,6 +1542,10 @@ struct task_struct {
66936 /* cg_list protected by css_set_lock and tsk->alloc_lock */
66937 struct list_head cg_list;
66938 #endif
66939 +
66940 + const struct cred __rcu *cred; /* effective (overridable) subjective task
66941 + * credentials (COW) */
66942 +
66943 #ifdef CONFIG_FUTEX
66944 struct robust_list_head __user *robust_list;
66945 #ifdef CONFIG_COMPAT
66946 @@ -1605,8 +1642,74 @@ struct task_struct {
66947 #ifdef CONFIG_UPROBES
66948 struct uprobe_task *utask;
66949 #endif
66950 +
66951 +#ifdef CONFIG_GRKERNSEC
66952 + /* grsecurity */
66953 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66954 + u64 exec_id;
66955 +#endif
66956 +#ifdef CONFIG_GRKERNSEC_SETXID
66957 + const struct cred *delayed_cred;
66958 +#endif
66959 + struct dentry *gr_chroot_dentry;
66960 + struct acl_subject_label *acl;
66961 + struct acl_role_label *role;
66962 + struct file *exec_file;
66963 + unsigned long brute_expires;
66964 + u16 acl_role_id;
66965 + /* is this the task that authenticated to the special role */
66966 + u8 acl_sp_role;
66967 + u8 is_writable;
66968 + u8 brute;
66969 + u8 gr_is_chrooted;
66970 +#endif
66971 +
66972 };
66973
66974 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
66975 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
66976 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
66977 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
66978 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
66979 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
66980 +
66981 +#ifdef CONFIG_PAX_SOFTMODE
66982 +extern int pax_softmode;
66983 +#endif
66984 +
66985 +extern int pax_check_flags(unsigned long *);
66986 +
66987 +/* if tsk != current then task_lock must be held on it */
66988 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66989 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
66990 +{
66991 + if (likely(tsk->mm))
66992 + return tsk->mm->pax_flags;
66993 + else
66994 + return 0UL;
66995 +}
66996 +
66997 +/* if tsk != current then task_lock must be held on it */
66998 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
66999 +{
67000 + if (likely(tsk->mm)) {
67001 + tsk->mm->pax_flags = flags;
67002 + return 0;
67003 + }
67004 + return -EINVAL;
67005 +}
67006 +#endif
67007 +
67008 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
67009 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
67010 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
67011 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
67012 +#endif
67013 +
67014 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
67015 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
67016 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
67017 +
67018 /* Future-safe accessor for struct task_struct's cpus_allowed. */
67019 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
67020
67021 @@ -2155,7 +2258,9 @@ void yield(void);
67022 extern struct exec_domain default_exec_domain;
67023
67024 union thread_union {
67025 +#ifndef CONFIG_X86
67026 struct thread_info thread_info;
67027 +#endif
67028 unsigned long stack[THREAD_SIZE/sizeof(long)];
67029 };
67030
67031 @@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
67032 */
67033
67034 extern struct task_struct *find_task_by_vpid(pid_t nr);
67035 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
67036 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
67037 struct pid_namespace *ns);
67038
67039 @@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
67040 extern void exit_itimers(struct signal_struct *);
67041 extern void flush_itimer_signals(void);
67042
67043 -extern void do_group_exit(int);
67044 +extern __noreturn void do_group_exit(int);
67045
67046 extern int allow_signal(int);
67047 extern int disallow_signal(int);
67048 @@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
67049
67050 #endif
67051
67052 -static inline int object_is_on_stack(void *obj)
67053 +static inline int object_starts_on_stack(void *obj)
67054 {
67055 - void *stack = task_stack_page(current);
67056 + const void *stack = task_stack_page(current);
67057
67058 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
67059 }
67060 diff --git a/include/linux/security.h b/include/linux/security.h
67061 index eee7478..290f7ba 100644
67062 --- a/include/linux/security.h
67063 +++ b/include/linux/security.h
67064 @@ -26,6 +26,7 @@
67065 #include <linux/capability.h>
67066 #include <linux/slab.h>
67067 #include <linux/err.h>
67068 +#include <linux/grsecurity.h>
67069
67070 struct linux_binprm;
67071 struct cred;
67072 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
67073 index 68a04a3..866e6a1 100644
67074 --- a/include/linux/seq_file.h
67075 +++ b/include/linux/seq_file.h
67076 @@ -26,6 +26,9 @@ struct seq_file {
67077 struct mutex lock;
67078 const struct seq_operations *op;
67079 int poll_event;
67080 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67081 + u64 exec_id;
67082 +#endif
67083 #ifdef CONFIG_USER_NS
67084 struct user_namespace *user_ns;
67085 #endif
67086 @@ -38,6 +41,7 @@ struct seq_operations {
67087 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
67088 int (*show) (struct seq_file *m, void *v);
67089 };
67090 +typedef struct seq_operations __no_const seq_operations_no_const;
67091
67092 #define SEQ_SKIP 1
67093
67094 diff --git a/include/linux/shm.h b/include/linux/shm.h
67095 index 429c199..4d42e38 100644
67096 --- a/include/linux/shm.h
67097 +++ b/include/linux/shm.h
67098 @@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
67099
67100 /* The task created the shm object. NULL if the task is dead. */
67101 struct task_struct *shm_creator;
67102 +#ifdef CONFIG_GRKERNSEC
67103 + time_t shm_createtime;
67104 + pid_t shm_lapid;
67105 +#endif
67106 };
67107
67108 /* shm_mode upper byte flags */
67109 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
67110 index 320e976..fd52553 100644
67111 --- a/include/linux/skbuff.h
67112 +++ b/include/linux/skbuff.h
67113 @@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
67114 extern struct sk_buff *__alloc_skb(unsigned int size,
67115 gfp_t priority, int flags, int node);
67116 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
67117 -static inline struct sk_buff *alloc_skb(unsigned int size,
67118 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
67119 gfp_t priority)
67120 {
67121 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
67122 @@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
67123 */
67124 static inline int skb_queue_empty(const struct sk_buff_head *list)
67125 {
67126 - return list->next == (struct sk_buff *)list;
67127 + return list->next == (const struct sk_buff *)list;
67128 }
67129
67130 /**
67131 @@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
67132 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
67133 const struct sk_buff *skb)
67134 {
67135 - return skb->next == (struct sk_buff *)list;
67136 + return skb->next == (const struct sk_buff *)list;
67137 }
67138
67139 /**
67140 @@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
67141 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
67142 const struct sk_buff *skb)
67143 {
67144 - return skb->prev == (struct sk_buff *)list;
67145 + return skb->prev == (const struct sk_buff *)list;
67146 }
67147
67148 /**
67149 @@ -1722,7 +1722,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
67150 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
67151 */
67152 #ifndef NET_SKB_PAD
67153 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
67154 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
67155 #endif
67156
67157 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
67158 @@ -2300,7 +2300,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
67159 int noblock, int *err);
67160 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
67161 struct poll_table_struct *wait);
67162 -extern int skb_copy_datagram_iovec(const struct sk_buff *from,
67163 +extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
67164 int offset, struct iovec *to,
67165 int size);
67166 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
67167 diff --git a/include/linux/slab.h b/include/linux/slab.h
67168 index 5d168d7..720bff3 100644
67169 --- a/include/linux/slab.h
67170 +++ b/include/linux/slab.h
67171 @@ -12,13 +12,20 @@
67172 #include <linux/gfp.h>
67173 #include <linux/types.h>
67174 #include <linux/workqueue.h>
67175 -
67176 +#include <linux/err.h>
67177
67178 /*
67179 * Flags to pass to kmem_cache_create().
67180 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
67181 */
67182 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
67183 +
67184 +#ifdef CONFIG_PAX_USERCOPY_SLABS
67185 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
67186 +#else
67187 +#define SLAB_USERCOPY 0x00000000UL
67188 +#endif
67189 +
67190 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
67191 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
67192 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
67193 @@ -89,10 +96,13 @@
67194 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
67195 * Both make kfree a no-op.
67196 */
67197 -#define ZERO_SIZE_PTR ((void *)16)
67198 +#define ZERO_SIZE_PTR \
67199 +({ \
67200 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
67201 + (void *)(-MAX_ERRNO-1L); \
67202 +})
67203
67204 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
67205 - (unsigned long)ZERO_SIZE_PTR)
67206 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
67207
67208 /*
67209 * Common fields provided in kmem_cache by all slab allocators
67210 @@ -112,7 +122,7 @@ struct kmem_cache {
67211 unsigned int align; /* Alignment as calculated */
67212 unsigned long flags; /* Active flags on the slab */
67213 const char *name; /* Slab name for sysfs */
67214 - int refcount; /* Use counter */
67215 + atomic_t refcount; /* Use counter */
67216 void (*ctor)(void *); /* Called on object slot creation */
67217 struct list_head list; /* List of all slab caches on the system */
67218 };
67219 @@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
67220 void kfree(const void *);
67221 void kzfree(const void *);
67222 size_t ksize(const void *);
67223 +const char *check_heap_object(const void *ptr, unsigned long n);
67224 +bool is_usercopy_object(const void *ptr);
67225
67226 /*
67227 * Allocator specific definitions. These are mainly used to establish optimized
67228 @@ -311,6 +323,7 @@ size_t ksize(const void *);
67229 * for general use, and so are not documented here. For a full list of
67230 * potential flags, always refer to linux/gfp.h.
67231 */
67232 +
67233 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
67234 {
67235 if (size != 0 && n > SIZE_MAX / size)
67236 @@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
67237 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
67238 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
67239 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
67240 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
67241 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
67242 #define kmalloc_track_caller(size, flags) \
67243 __kmalloc_track_caller(size, flags, _RET_IP_)
67244 #else
67245 @@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
67246 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
67247 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
67248 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
67249 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
67250 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
67251 #define kmalloc_node_track_caller(size, flags, node) \
67252 __kmalloc_node_track_caller(size, flags, node, \
67253 _RET_IP_)
67254 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
67255 index 8bb6e0e..8eb0dbe 100644
67256 --- a/include/linux/slab_def.h
67257 +++ b/include/linux/slab_def.h
67258 @@ -52,7 +52,7 @@ struct kmem_cache {
67259 /* 4) cache creation/removal */
67260 const char *name;
67261 struct list_head list;
67262 - int refcount;
67263 + atomic_t refcount;
67264 int object_size;
67265 int align;
67266
67267 @@ -68,10 +68,10 @@ struct kmem_cache {
67268 unsigned long node_allocs;
67269 unsigned long node_frees;
67270 unsigned long node_overflow;
67271 - atomic_t allochit;
67272 - atomic_t allocmiss;
67273 - atomic_t freehit;
67274 - atomic_t freemiss;
67275 + atomic_unchecked_t allochit;
67276 + atomic_unchecked_t allocmiss;
67277 + atomic_unchecked_t freehit;
67278 + atomic_unchecked_t freemiss;
67279
67280 /*
67281 * If debugging is enabled, then the allocator can add additional
67282 @@ -111,11 +111,16 @@ struct cache_sizes {
67283 #ifdef CONFIG_ZONE_DMA
67284 struct kmem_cache *cs_dmacachep;
67285 #endif
67286 +
67287 +#ifdef CONFIG_PAX_USERCOPY_SLABS
67288 + struct kmem_cache *cs_usercopycachep;
67289 +#endif
67290 +
67291 };
67292 extern struct cache_sizes malloc_sizes[];
67293
67294 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
67295 -void *__kmalloc(size_t size, gfp_t flags);
67296 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
67297
67298 #ifdef CONFIG_TRACING
67299 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
67300 @@ -152,6 +157,13 @@ found:
67301 cachep = malloc_sizes[i].cs_dmacachep;
67302 else
67303 #endif
67304 +
67305 +#ifdef CONFIG_PAX_USERCOPY_SLABS
67306 + if (flags & GFP_USERCOPY)
67307 + cachep = malloc_sizes[i].cs_usercopycachep;
67308 + else
67309 +#endif
67310 +
67311 cachep = malloc_sizes[i].cs_cachep;
67312
67313 ret = kmem_cache_alloc_trace(cachep, flags, size);
67314 @@ -162,7 +174,7 @@ found:
67315 }
67316
67317 #ifdef CONFIG_NUMA
67318 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
67319 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
67320 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
67321
67322 #ifdef CONFIG_TRACING
67323 @@ -205,6 +217,13 @@ found:
67324 cachep = malloc_sizes[i].cs_dmacachep;
67325 else
67326 #endif
67327 +
67328 +#ifdef CONFIG_PAX_USERCOPY_SLABS
67329 + if (flags & GFP_USERCOPY)
67330 + cachep = malloc_sizes[i].cs_usercopycachep;
67331 + else
67332 +#endif
67333 +
67334 cachep = malloc_sizes[i].cs_cachep;
67335
67336 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
67337 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
67338 index f28e14a..7831211 100644
67339 --- a/include/linux/slob_def.h
67340 +++ b/include/linux/slob_def.h
67341 @@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
67342 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
67343 }
67344
67345 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
67346 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
67347
67348 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
67349 {
67350 @@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
67351 return __kmalloc_node(size, flags, NUMA_NO_NODE);
67352 }
67353
67354 -static __always_inline void *__kmalloc(size_t size, gfp_t flags)
67355 +static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
67356 {
67357 return kmalloc(size, flags);
67358 }
67359 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
67360 index 9db4825..ed42fb5 100644
67361 --- a/include/linux/slub_def.h
67362 +++ b/include/linux/slub_def.h
67363 @@ -91,7 +91,7 @@ struct kmem_cache {
67364 struct kmem_cache_order_objects max;
67365 struct kmem_cache_order_objects min;
67366 gfp_t allocflags; /* gfp flags to use on each alloc */
67367 - int refcount; /* Refcount for slab cache destroy */
67368 + atomic_t refcount; /* Refcount for slab cache destroy */
67369 void (*ctor)(void *);
67370 int inuse; /* Offset to metadata */
67371 int align; /* Alignment */
67372 @@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
67373 * Sorry that the following has to be that ugly but some versions of GCC
67374 * have trouble with constant propagation and loops.
67375 */
67376 -static __always_inline int kmalloc_index(size_t size)
67377 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
67378 {
67379 if (!size)
67380 return 0;
67381 @@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
67382 }
67383
67384 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
67385 -void *__kmalloc(size_t size, gfp_t flags);
67386 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
67387
67388 static __always_inline void *
67389 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
67390 @@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
67391 }
67392 #endif
67393
67394 -static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
67395 +static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
67396 {
67397 unsigned int order = get_order(size);
67398 return kmalloc_order_trace(size, flags, order);
67399 @@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
67400 }
67401
67402 #ifdef CONFIG_NUMA
67403 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
67404 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
67405 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
67406
67407 #ifdef CONFIG_TRACING
67408 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
67409 index 680f9a3..f13aeb0 100644
67410 --- a/include/linux/sonet.h
67411 +++ b/include/linux/sonet.h
67412 @@ -7,7 +7,7 @@
67413 #include <uapi/linux/sonet.h>
67414
67415 struct k_sonet_stats {
67416 -#define __HANDLE_ITEM(i) atomic_t i
67417 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
67418 __SONET_ITEMS
67419 #undef __HANDLE_ITEM
67420 };
67421 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
67422 index 34206b8..f019e06 100644
67423 --- a/include/linux/sunrpc/clnt.h
67424 +++ b/include/linux/sunrpc/clnt.h
67425 @@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
67426 {
67427 switch (sap->sa_family) {
67428 case AF_INET:
67429 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
67430 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
67431 case AF_INET6:
67432 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
67433 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
67434 }
67435 return 0;
67436 }
67437 @@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
67438 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
67439 const struct sockaddr *src)
67440 {
67441 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
67442 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
67443 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
67444
67445 dsin->sin_family = ssin->sin_family;
67446 @@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
67447 if (sa->sa_family != AF_INET6)
67448 return 0;
67449
67450 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
67451 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
67452 }
67453
67454 #endif /* __KERNEL__ */
67455 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
67456 index 0b8e3e6..33e0a01 100644
67457 --- a/include/linux/sunrpc/svc_rdma.h
67458 +++ b/include/linux/sunrpc/svc_rdma.h
67459 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
67460 extern unsigned int svcrdma_max_requests;
67461 extern unsigned int svcrdma_max_req_size;
67462
67463 -extern atomic_t rdma_stat_recv;
67464 -extern atomic_t rdma_stat_read;
67465 -extern atomic_t rdma_stat_write;
67466 -extern atomic_t rdma_stat_sq_starve;
67467 -extern atomic_t rdma_stat_rq_starve;
67468 -extern atomic_t rdma_stat_rq_poll;
67469 -extern atomic_t rdma_stat_rq_prod;
67470 -extern atomic_t rdma_stat_sq_poll;
67471 -extern atomic_t rdma_stat_sq_prod;
67472 +extern atomic_unchecked_t rdma_stat_recv;
67473 +extern atomic_unchecked_t rdma_stat_read;
67474 +extern atomic_unchecked_t rdma_stat_write;
67475 +extern atomic_unchecked_t rdma_stat_sq_starve;
67476 +extern atomic_unchecked_t rdma_stat_rq_starve;
67477 +extern atomic_unchecked_t rdma_stat_rq_poll;
67478 +extern atomic_unchecked_t rdma_stat_rq_prod;
67479 +extern atomic_unchecked_t rdma_stat_sq_poll;
67480 +extern atomic_unchecked_t rdma_stat_sq_prod;
67481
67482 #define RPCRDMA_VERSION 1
67483
67484 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
67485 index 14a8ff2..21fe4c7 100644
67486 --- a/include/linux/sysctl.h
67487 +++ b/include/linux/sysctl.h
67488 @@ -41,6 +41,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
67489
67490 extern int proc_dostring(struct ctl_table *, int,
67491 void __user *, size_t *, loff_t *);
67492 +extern int proc_dostring_modpriv(struct ctl_table *, int,
67493 + void __user *, size_t *, loff_t *);
67494 extern int proc_dointvec(struct ctl_table *, int,
67495 void __user *, size_t *, loff_t *);
67496 extern int proc_dointvec_minmax(struct ctl_table *, int,
67497 diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
67498 index 7faf933..c1ad32c 100644
67499 --- a/include/linux/sysrq.h
67500 +++ b/include/linux/sysrq.h
67501 @@ -15,6 +15,7 @@
67502 #define _LINUX_SYSRQ_H
67503
67504 #include <linux/errno.h>
67505 +#include <linux/compiler.h>
67506 #include <linux/types.h>
67507
67508 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
67509 @@ -36,7 +37,7 @@ struct sysrq_key_op {
67510 char *help_msg;
67511 char *action_msg;
67512 int enable_mask;
67513 -};
67514 +} __do_const;
67515
67516 #ifdef CONFIG_MAGIC_SYSRQ
67517
67518 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
67519 index e7e0473..39b7b52 100644
67520 --- a/include/linux/thread_info.h
67521 +++ b/include/linux/thread_info.h
67522 @@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
67523 #error "no set_restore_sigmask() provided and default one won't work"
67524 #endif
67525
67526 +extern void __check_object_size(const void *ptr, unsigned long n, bool to);
67527 +static inline void check_object_size(const void *ptr, unsigned long n, bool to)
67528 +{
67529 +#ifndef CONFIG_PAX_USERCOPY_DEBUG
67530 + if (!__builtin_constant_p(n))
67531 +#endif
67532 + __check_object_size(ptr, n, to);
67533 +}
67534 +
67535 #endif /* __KERNEL__ */
67536
67537 #endif /* _LINUX_THREAD_INFO_H */
67538 diff --git a/include/linux/tty.h b/include/linux/tty.h
67539 index 8db1b56..c16a040 100644
67540 --- a/include/linux/tty.h
67541 +++ b/include/linux/tty.h
67542 @@ -194,7 +194,7 @@ struct tty_port {
67543 const struct tty_port_operations *ops; /* Port operations */
67544 spinlock_t lock; /* Lock protecting tty field */
67545 int blocked_open; /* Waiting to open */
67546 - int count; /* Usage count */
67547 + atomic_t count; /* Usage count */
67548 wait_queue_head_t open_wait; /* Open waiters */
67549 wait_queue_head_t close_wait; /* Close waiters */
67550 wait_queue_head_t delta_msr_wait; /* Modem status change */
67551 @@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
67552 struct tty_struct *tty, struct file *filp);
67553 static inline int tty_port_users(struct tty_port *port)
67554 {
67555 - return port->count + port->blocked_open;
67556 + return atomic_read(&port->count) + port->blocked_open;
67557 }
67558
67559 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
67560 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
67561 index dd976cf..e272742 100644
67562 --- a/include/linux/tty_driver.h
67563 +++ b/include/linux/tty_driver.h
67564 @@ -284,7 +284,7 @@ struct tty_operations {
67565 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
67566 #endif
67567 const struct file_operations *proc_fops;
67568 -};
67569 +} __do_const;
67570
67571 struct tty_driver {
67572 int magic; /* magic number for this structure */
67573 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
67574 index fb79dd8d..07d4773 100644
67575 --- a/include/linux/tty_ldisc.h
67576 +++ b/include/linux/tty_ldisc.h
67577 @@ -149,7 +149,7 @@ struct tty_ldisc_ops {
67578
67579 struct module *owner;
67580
67581 - int refcount;
67582 + atomic_t refcount;
67583 };
67584
67585 struct tty_ldisc {
67586 diff --git a/include/linux/types.h b/include/linux/types.h
67587 index 4d118ba..c3ee9bf 100644
67588 --- a/include/linux/types.h
67589 +++ b/include/linux/types.h
67590 @@ -176,10 +176,26 @@ typedef struct {
67591 int counter;
67592 } atomic_t;
67593
67594 +#ifdef CONFIG_PAX_REFCOUNT
67595 +typedef struct {
67596 + int counter;
67597 +} atomic_unchecked_t;
67598 +#else
67599 +typedef atomic_t atomic_unchecked_t;
67600 +#endif
67601 +
67602 #ifdef CONFIG_64BIT
67603 typedef struct {
67604 long counter;
67605 } atomic64_t;
67606 +
67607 +#ifdef CONFIG_PAX_REFCOUNT
67608 +typedef struct {
67609 + long counter;
67610 +} atomic64_unchecked_t;
67611 +#else
67612 +typedef atomic64_t atomic64_unchecked_t;
67613 +#endif
67614 #endif
67615
67616 struct list_head {
67617 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
67618 index 5ca0951..ab496a5 100644
67619 --- a/include/linux/uaccess.h
67620 +++ b/include/linux/uaccess.h
67621 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
67622 long ret; \
67623 mm_segment_t old_fs = get_fs(); \
67624 \
67625 - set_fs(KERNEL_DS); \
67626 pagefault_disable(); \
67627 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
67628 - pagefault_enable(); \
67629 + set_fs(KERNEL_DS); \
67630 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
67631 set_fs(old_fs); \
67632 + pagefault_enable(); \
67633 ret; \
67634 })
67635
67636 diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
67637 index 8e522cbc..1b67af5 100644
67638 --- a/include/linux/uidgid.h
67639 +++ b/include/linux/uidgid.h
67640 @@ -197,4 +197,7 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
67641
67642 #endif /* CONFIG_USER_NS */
67643
67644 +#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
67645 +#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
67646 +
67647 #endif /* _LINUX_UIDGID_H */
67648 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
67649 index 99c1b4d..bb94261 100644
67650 --- a/include/linux/unaligned/access_ok.h
67651 +++ b/include/linux/unaligned/access_ok.h
67652 @@ -6,32 +6,32 @@
67653
67654 static inline u16 get_unaligned_le16(const void *p)
67655 {
67656 - return le16_to_cpup((__le16 *)p);
67657 + return le16_to_cpup((const __le16 *)p);
67658 }
67659
67660 static inline u32 get_unaligned_le32(const void *p)
67661 {
67662 - return le32_to_cpup((__le32 *)p);
67663 + return le32_to_cpup((const __le32 *)p);
67664 }
67665
67666 static inline u64 get_unaligned_le64(const void *p)
67667 {
67668 - return le64_to_cpup((__le64 *)p);
67669 + return le64_to_cpup((const __le64 *)p);
67670 }
67671
67672 static inline u16 get_unaligned_be16(const void *p)
67673 {
67674 - return be16_to_cpup((__be16 *)p);
67675 + return be16_to_cpup((const __be16 *)p);
67676 }
67677
67678 static inline u32 get_unaligned_be32(const void *p)
67679 {
67680 - return be32_to_cpup((__be32 *)p);
67681 + return be32_to_cpup((const __be32 *)p);
67682 }
67683
67684 static inline u64 get_unaligned_be64(const void *p)
67685 {
67686 - return be64_to_cpup((__be64 *)p);
67687 + return be64_to_cpup((const __be64 *)p);
67688 }
67689
67690 static inline void put_unaligned_le16(u16 val, void *p)
67691 diff --git a/include/linux/usb.h b/include/linux/usb.h
67692 index 4d22d0f..ac43c2f 100644
67693 --- a/include/linux/usb.h
67694 +++ b/include/linux/usb.h
67695 @@ -554,7 +554,7 @@ struct usb_device {
67696 int maxchild;
67697
67698 u32 quirks;
67699 - atomic_t urbnum;
67700 + atomic_unchecked_t urbnum;
67701
67702 unsigned long active_duration;
67703
67704 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
67705 index c5d36c6..108f4f9 100644
67706 --- a/include/linux/usb/renesas_usbhs.h
67707 +++ b/include/linux/usb/renesas_usbhs.h
67708 @@ -39,7 +39,7 @@ enum {
67709 */
67710 struct renesas_usbhs_driver_callback {
67711 int (*notify_hotplug)(struct platform_device *pdev);
67712 -};
67713 +} __no_const;
67714
67715 /*
67716 * callback functions for platform
67717 diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
67718 index b9bd2e6..4ce0093 100644
67719 --- a/include/linux/user_namespace.h
67720 +++ b/include/linux/user_namespace.h
67721 @@ -21,7 +21,7 @@ struct user_namespace {
67722 struct uid_gid_map uid_map;
67723 struct uid_gid_map gid_map;
67724 struct uid_gid_map projid_map;
67725 - struct kref kref;
67726 + atomic_t count;
67727 struct user_namespace *parent;
67728 kuid_t owner;
67729 kgid_t group;
67730 @@ -35,18 +35,18 @@ extern struct user_namespace init_user_ns;
67731 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
67732 {
67733 if (ns)
67734 - kref_get(&ns->kref);
67735 + atomic_inc(&ns->count);
67736 return ns;
67737 }
67738
67739 extern int create_user_ns(struct cred *new);
67740 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
67741 -extern void free_user_ns(struct kref *kref);
67742 +extern void free_user_ns(struct user_namespace *ns);
67743
67744 static inline void put_user_ns(struct user_namespace *ns)
67745 {
67746 - if (ns)
67747 - kref_put(&ns->kref, free_user_ns);
67748 + if (ns && atomic_dec_and_test(&ns->count))
67749 + free_user_ns(ns);
67750 }
67751
67752 struct seq_operations;
67753 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
67754 index 6f8fbcf..8259001 100644
67755 --- a/include/linux/vermagic.h
67756 +++ b/include/linux/vermagic.h
67757 @@ -25,9 +25,35 @@
67758 #define MODULE_ARCH_VERMAGIC ""
67759 #endif
67760
67761 +#ifdef CONFIG_PAX_REFCOUNT
67762 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
67763 +#else
67764 +#define MODULE_PAX_REFCOUNT ""
67765 +#endif
67766 +
67767 +#ifdef CONSTIFY_PLUGIN
67768 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
67769 +#else
67770 +#define MODULE_CONSTIFY_PLUGIN ""
67771 +#endif
67772 +
67773 +#ifdef STACKLEAK_PLUGIN
67774 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
67775 +#else
67776 +#define MODULE_STACKLEAK_PLUGIN ""
67777 +#endif
67778 +
67779 +#ifdef CONFIG_GRKERNSEC
67780 +#define MODULE_GRSEC "GRSEC "
67781 +#else
67782 +#define MODULE_GRSEC ""
67783 +#endif
67784 +
67785 #define VERMAGIC_STRING \
67786 UTS_RELEASE " " \
67787 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
67788 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
67789 - MODULE_ARCH_VERMAGIC
67790 + MODULE_ARCH_VERMAGIC \
67791 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
67792 + MODULE_GRSEC
67793
67794 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
67795 index 6071e91..ca6a489 100644
67796 --- a/include/linux/vmalloc.h
67797 +++ b/include/linux/vmalloc.h
67798 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
67799 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
67800 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
67801 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
67802 +
67803 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67804 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
67805 +#endif
67806 +
67807 /* bits [20..32] reserved for arch specific ioremap internals */
67808
67809 /*
67810 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
67811 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
67812 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
67813 unsigned long start, unsigned long end, gfp_t gfp_mask,
67814 - pgprot_t prot, int node, const void *caller);
67815 + pgprot_t prot, int node, const void *caller) __size_overflow(1);
67816 extern void vfree(const void *addr);
67817
67818 extern void *vmap(struct page **pages, unsigned int count,
67819 @@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
67820 extern void free_vm_area(struct vm_struct *area);
67821
67822 /* for /dev/kmem */
67823 -extern long vread(char *buf, char *addr, unsigned long count);
67824 -extern long vwrite(char *buf, char *addr, unsigned long count);
67825 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
67826 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
67827
67828 /*
67829 * Internals. Dont't use..
67830 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
67831 index a13291f..af51fa3 100644
67832 --- a/include/linux/vmstat.h
67833 +++ b/include/linux/vmstat.h
67834 @@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
67835 /*
67836 * Zone based page accounting with per cpu differentials.
67837 */
67838 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67839 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67840
67841 static inline void zone_page_state_add(long x, struct zone *zone,
67842 enum zone_stat_item item)
67843 {
67844 - atomic_long_add(x, &zone->vm_stat[item]);
67845 - atomic_long_add(x, &vm_stat[item]);
67846 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
67847 + atomic_long_add_unchecked(x, &vm_stat[item]);
67848 }
67849
67850 static inline unsigned long global_page_state(enum zone_stat_item item)
67851 {
67852 - long x = atomic_long_read(&vm_stat[item]);
67853 + long x = atomic_long_read_unchecked(&vm_stat[item]);
67854 #ifdef CONFIG_SMP
67855 if (x < 0)
67856 x = 0;
67857 @@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
67858 static inline unsigned long zone_page_state(struct zone *zone,
67859 enum zone_stat_item item)
67860 {
67861 - long x = atomic_long_read(&zone->vm_stat[item]);
67862 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
67863 #ifdef CONFIG_SMP
67864 if (x < 0)
67865 x = 0;
67866 @@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
67867 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
67868 enum zone_stat_item item)
67869 {
67870 - long x = atomic_long_read(&zone->vm_stat[item]);
67871 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
67872
67873 #ifdef CONFIG_SMP
67874 int cpu;
67875 @@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
67876
67877 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
67878 {
67879 - atomic_long_inc(&zone->vm_stat[item]);
67880 - atomic_long_inc(&vm_stat[item]);
67881 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
67882 + atomic_long_inc_unchecked(&vm_stat[item]);
67883 }
67884
67885 static inline void __inc_zone_page_state(struct page *page,
67886 @@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
67887
67888 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
67889 {
67890 - atomic_long_dec(&zone->vm_stat[item]);
67891 - atomic_long_dec(&vm_stat[item]);
67892 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
67893 + atomic_long_dec_unchecked(&vm_stat[item]);
67894 }
67895
67896 static inline void __dec_zone_page_state(struct page *page,
67897 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
67898 index 95d1c91..6798cca 100644
67899 --- a/include/media/v4l2-dev.h
67900 +++ b/include/media/v4l2-dev.h
67901 @@ -76,7 +76,7 @@ struct v4l2_file_operations {
67902 int (*mmap) (struct file *, struct vm_area_struct *);
67903 int (*open) (struct file *);
67904 int (*release) (struct file *);
67905 -};
67906 +} __do_const;
67907
67908 /*
67909 * Newer version of video_device, handled by videodev2.c
67910 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
67911 index 4118ad1..cb7e25f 100644
67912 --- a/include/media/v4l2-ioctl.h
67913 +++ b/include/media/v4l2-ioctl.h
67914 @@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
67915 bool valid_prio, int cmd, void *arg);
67916 };
67917
67918 -
67919 /* v4l debugging and diagnostics */
67920
67921 /* Debug bitmask flags to be used on V4L2 */
67922 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
67923 index 9e5425b..8136ffc 100644
67924 --- a/include/net/caif/cfctrl.h
67925 +++ b/include/net/caif/cfctrl.h
67926 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
67927 void (*radioset_rsp)(void);
67928 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
67929 struct cflayer *client_layer);
67930 -};
67931 +} __no_const;
67932
67933 /* Link Setup Parameters for CAIF-Links. */
67934 struct cfctrl_link_param {
67935 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
67936 struct cfctrl {
67937 struct cfsrvl serv;
67938 struct cfctrl_rsp res;
67939 - atomic_t req_seq_no;
67940 - atomic_t rsp_seq_no;
67941 + atomic_unchecked_t req_seq_no;
67942 + atomic_unchecked_t rsp_seq_no;
67943 struct list_head list;
67944 /* Protects from simultaneous access to first_req list */
67945 spinlock_t info_list_lock;
67946 diff --git a/include/net/flow.h b/include/net/flow.h
67947 index 628e11b..4c475df 100644
67948 --- a/include/net/flow.h
67949 +++ b/include/net/flow.h
67950 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
67951
67952 extern void flow_cache_flush(void);
67953 extern void flow_cache_flush_deferred(void);
67954 -extern atomic_t flow_cache_genid;
67955 +extern atomic_unchecked_t flow_cache_genid;
67956
67957 #endif
67958 diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
67959 index e5062c9..48a9a4b 100644
67960 --- a/include/net/gro_cells.h
67961 +++ b/include/net/gro_cells.h
67962 @@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
67963 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
67964
67965 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
67966 - atomic_long_inc(&dev->rx_dropped);
67967 + atomic_long_inc_unchecked(&dev->rx_dropped);
67968 kfree_skb(skb);
67969 return;
67970 }
67971 @@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
67972 int i;
67973
67974 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
67975 - gcells->cells = kcalloc(sizeof(struct gro_cell),
67976 - gcells->gro_cells_mask + 1,
67977 + gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
67978 + sizeof(struct gro_cell),
67979 GFP_KERNEL);
67980 if (!gcells->cells)
67981 return -ENOMEM;
67982 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
67983 index 1832927..ce39aea 100644
67984 --- a/include/net/inet_connection_sock.h
67985 +++ b/include/net/inet_connection_sock.h
67986 @@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
67987 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
67988 int (*bind_conflict)(const struct sock *sk,
67989 const struct inet_bind_bucket *tb, bool relax);
67990 -};
67991 +} __do_const;
67992
67993 /** inet_connection_sock - INET connection oriented sock
67994 *
67995 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
67996 index 53f464d..ba76aaa 100644
67997 --- a/include/net/inetpeer.h
67998 +++ b/include/net/inetpeer.h
67999 @@ -47,8 +47,8 @@ struct inet_peer {
68000 */
68001 union {
68002 struct {
68003 - atomic_t rid; /* Frag reception counter */
68004 - atomic_t ip_id_count; /* IP ID for the next packet */
68005 + atomic_unchecked_t rid; /* Frag reception counter */
68006 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
68007 };
68008 struct rcu_head rcu;
68009 struct inet_peer *gc_next;
68010 @@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
68011 more++;
68012 inet_peer_refcheck(p);
68013 do {
68014 - old = atomic_read(&p->ip_id_count);
68015 + old = atomic_read_unchecked(&p->ip_id_count);
68016 new = old + more;
68017 if (!new)
68018 new = 1;
68019 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
68020 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
68021 return new;
68022 }
68023
68024 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
68025 index 9497be1..5a4fafe 100644
68026 --- a/include/net/ip_fib.h
68027 +++ b/include/net/ip_fib.h
68028 @@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
68029
68030 #define FIB_RES_SADDR(net, res) \
68031 ((FIB_RES_NH(res).nh_saddr_genid == \
68032 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
68033 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
68034 FIB_RES_NH(res).nh_saddr : \
68035 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
68036 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
68037 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
68038 index 68c69d5..2ee192b 100644
68039 --- a/include/net/ip_vs.h
68040 +++ b/include/net/ip_vs.h
68041 @@ -599,7 +599,7 @@ struct ip_vs_conn {
68042 struct ip_vs_conn *control; /* Master control connection */
68043 atomic_t n_control; /* Number of controlled ones */
68044 struct ip_vs_dest *dest; /* real server */
68045 - atomic_t in_pkts; /* incoming packet counter */
68046 + atomic_unchecked_t in_pkts; /* incoming packet counter */
68047
68048 /* packet transmitter for different forwarding methods. If it
68049 mangles the packet, it must return NF_DROP or better NF_STOLEN,
68050 @@ -737,7 +737,7 @@ struct ip_vs_dest {
68051 __be16 port; /* port number of the server */
68052 union nf_inet_addr addr; /* IP address of the server */
68053 volatile unsigned int flags; /* dest status flags */
68054 - atomic_t conn_flags; /* flags to copy to conn */
68055 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
68056 atomic_t weight; /* server weight */
68057
68058 atomic_t refcnt; /* reference counter */
68059 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
68060 index 80ffde3..968b0f4 100644
68061 --- a/include/net/irda/ircomm_tty.h
68062 +++ b/include/net/irda/ircomm_tty.h
68063 @@ -35,6 +35,7 @@
68064 #include <linux/termios.h>
68065 #include <linux/timer.h>
68066 #include <linux/tty.h> /* struct tty_struct */
68067 +#include <asm/local.h>
68068
68069 #include <net/irda/irias_object.h>
68070 #include <net/irda/ircomm_core.h>
68071 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
68072 index cc7c197..9f2da2a 100644
68073 --- a/include/net/iucv/af_iucv.h
68074 +++ b/include/net/iucv/af_iucv.h
68075 @@ -141,7 +141,7 @@ struct iucv_sock {
68076 struct iucv_sock_list {
68077 struct hlist_head head;
68078 rwlock_t lock;
68079 - atomic_t autobind_name;
68080 + atomic_unchecked_t autobind_name;
68081 };
68082
68083 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
68084 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
68085 index 0dab173..1b76af0 100644
68086 --- a/include/net/neighbour.h
68087 +++ b/include/net/neighbour.h
68088 @@ -123,7 +123,7 @@ struct neigh_ops {
68089 void (*error_report)(struct neighbour *, struct sk_buff *);
68090 int (*output)(struct neighbour *, struct sk_buff *);
68091 int (*connected_output)(struct neighbour *, struct sk_buff *);
68092 -};
68093 +} __do_const;
68094
68095 struct pneigh_entry {
68096 struct pneigh_entry *next;
68097 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
68098 index de644bc..666aed3 100644
68099 --- a/include/net/net_namespace.h
68100 +++ b/include/net/net_namespace.h
68101 @@ -115,7 +115,7 @@ struct net {
68102 #endif
68103 struct netns_ipvs *ipvs;
68104 struct sock *diag_nlsk;
68105 - atomic_t rt_genid;
68106 + atomic_unchecked_t rt_genid;
68107 };
68108
68109 /*
68110 @@ -330,12 +330,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
68111
68112 static inline int rt_genid(struct net *net)
68113 {
68114 - return atomic_read(&net->rt_genid);
68115 + return atomic_read_unchecked(&net->rt_genid);
68116 }
68117
68118 static inline void rt_genid_bump(struct net *net)
68119 {
68120 - atomic_inc(&net->rt_genid);
68121 + atomic_inc_unchecked(&net->rt_genid);
68122 }
68123
68124 #endif /* __NET_NET_NAMESPACE_H */
68125 diff --git a/include/net/netdma.h b/include/net/netdma.h
68126 index 8ba8ce2..99b7fff 100644
68127 --- a/include/net/netdma.h
68128 +++ b/include/net/netdma.h
68129 @@ -24,7 +24,7 @@
68130 #include <linux/dmaengine.h>
68131 #include <linux/skbuff.h>
68132
68133 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
68134 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
68135 struct sk_buff *skb, int offset, struct iovec *to,
68136 size_t len, struct dma_pinned_list *pinned_list);
68137
68138 diff --git a/include/net/netlink.h b/include/net/netlink.h
68139 index 9690b0f..87aded7 100644
68140 --- a/include/net/netlink.h
68141 +++ b/include/net/netlink.h
68142 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
68143 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
68144 {
68145 if (mark)
68146 - skb_trim(skb, (unsigned char *) mark - skb->data);
68147 + skb_trim(skb, (const unsigned char *) mark - skb->data);
68148 }
68149
68150 /**
68151 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
68152 index 2ae2b83..dbdc85e 100644
68153 --- a/include/net/netns/ipv4.h
68154 +++ b/include/net/netns/ipv4.h
68155 @@ -64,7 +64,7 @@ struct netns_ipv4 {
68156 kgid_t sysctl_ping_group_range[2];
68157 long sysctl_tcp_mem[3];
68158
68159 - atomic_t dev_addr_genid;
68160 + atomic_unchecked_t dev_addr_genid;
68161
68162 #ifdef CONFIG_IP_MROUTE
68163 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
68164 diff --git a/include/net/protocol.h b/include/net/protocol.h
68165 index 047c047..b9dad15 100644
68166 --- a/include/net/protocol.h
68167 +++ b/include/net/protocol.h
68168 @@ -44,7 +44,7 @@ struct net_protocol {
68169 void (*err_handler)(struct sk_buff *skb, u32 info);
68170 unsigned int no_policy:1,
68171 netns_ok:1;
68172 -};
68173 +} __do_const;
68174
68175 #if IS_ENABLED(CONFIG_IPV6)
68176 struct inet6_protocol {
68177 @@ -57,7 +57,7 @@ struct inet6_protocol {
68178 u8 type, u8 code, int offset,
68179 __be32 info);
68180 unsigned int flags; /* INET6_PROTO_xxx */
68181 -};
68182 +} __do_const;
68183
68184 #define INET6_PROTO_NOPOLICY 0x1
68185 #define INET6_PROTO_FINAL 0x2
68186 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
68187 index 7fdf298..197e9f7 100644
68188 --- a/include/net/sctp/sctp.h
68189 +++ b/include/net/sctp/sctp.h
68190 @@ -330,9 +330,9 @@ do { \
68191
68192 #else /* SCTP_DEBUG */
68193
68194 -#define SCTP_DEBUG_PRINTK(whatever...)
68195 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
68196 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
68197 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
68198 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
68199 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
68200 #define SCTP_ENABLE_DEBUG
68201 #define SCTP_DISABLE_DEBUG
68202 #define SCTP_ASSERT(expr, str, func)
68203 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
68204 index fdeb85a..0c554d5 100644
68205 --- a/include/net/sctp/structs.h
68206 +++ b/include/net/sctp/structs.h
68207 @@ -497,7 +497,7 @@ struct sctp_af {
68208 int sockaddr_len;
68209 sa_family_t sa_family;
68210 struct list_head list;
68211 -};
68212 +} __do_const;
68213
68214 struct sctp_af *sctp_get_af_specific(sa_family_t);
68215 int sctp_register_af(struct sctp_af *);
68216 @@ -517,7 +517,7 @@ struct sctp_pf {
68217 struct sctp_association *asoc);
68218 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
68219 struct sctp_af *af;
68220 -};
68221 +} __do_const;
68222
68223
68224 /* Structure to track chunk fragments that have been acked, but peer
68225 diff --git a/include/net/sock.h b/include/net/sock.h
68226 index 25afaa0..8bb0070 100644
68227 --- a/include/net/sock.h
68228 +++ b/include/net/sock.h
68229 @@ -322,7 +322,7 @@ struct sock {
68230 #ifdef CONFIG_RPS
68231 __u32 sk_rxhash;
68232 #endif
68233 - atomic_t sk_drops;
68234 + atomic_unchecked_t sk_drops;
68235 int sk_rcvbuf;
68236
68237 struct sk_filter __rcu *sk_filter;
68238 @@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
68239 }
68240
68241 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
68242 - char __user *from, char *to,
68243 + char __user *from, unsigned char *to,
68244 int copy, int offset)
68245 {
68246 if (skb->ip_summed == CHECKSUM_NONE) {
68247 @@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
68248 }
68249 }
68250
68251 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
68252 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
68253
68254 /**
68255 * sk_page_frag - return an appropriate page_frag
68256 diff --git a/include/net/tcp.h b/include/net/tcp.h
68257 index aed42c7..43890c6 100644
68258 --- a/include/net/tcp.h
68259 +++ b/include/net/tcp.h
68260 @@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
68261 extern void tcp_xmit_retransmit_queue(struct sock *);
68262 extern void tcp_simple_retransmit(struct sock *);
68263 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
68264 -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
68265 +extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
68266
68267 extern void tcp_send_probe0(struct sock *);
68268 extern void tcp_send_partial(struct sock *);
68269 @@ -701,8 +701,8 @@ struct tcp_skb_cb {
68270 struct inet6_skb_parm h6;
68271 #endif
68272 } header; /* For incoming frames */
68273 - __u32 seq; /* Starting sequence number */
68274 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
68275 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
68276 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
68277 __u32 when; /* used to compute rtt's */
68278 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
68279
68280 @@ -716,7 +716,7 @@ struct tcp_skb_cb {
68281
68282 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
68283 /* 1 byte hole */
68284 - __u32 ack_seq; /* Sequence number ACK'd */
68285 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
68286 };
68287
68288 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
68289 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
68290 index 63445ed..74ef61d 100644
68291 --- a/include/net/xfrm.h
68292 +++ b/include/net/xfrm.h
68293 @@ -423,7 +423,7 @@ struct xfrm_mode {
68294 struct module *owner;
68295 unsigned int encap;
68296 int flags;
68297 -};
68298 +} __do_const;
68299
68300 /* Flags for xfrm_mode. */
68301 enum {
68302 @@ -514,7 +514,7 @@ struct xfrm_policy {
68303 struct timer_list timer;
68304
68305 struct flow_cache_object flo;
68306 - atomic_t genid;
68307 + atomic_unchecked_t genid;
68308 u32 priority;
68309 u32 index;
68310 struct xfrm_mark mark;
68311 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
68312 index 1a046b1..ee0bef0 100644
68313 --- a/include/rdma/iw_cm.h
68314 +++ b/include/rdma/iw_cm.h
68315 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
68316 int backlog);
68317
68318 int (*destroy_listen)(struct iw_cm_id *cm_id);
68319 -};
68320 +} __no_const;
68321
68322 /**
68323 * iw_create_cm_id - Create an IW CM identifier.
68324 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
68325 index 399162b..b337f1a 100644
68326 --- a/include/scsi/libfc.h
68327 +++ b/include/scsi/libfc.h
68328 @@ -762,6 +762,7 @@ struct libfc_function_template {
68329 */
68330 void (*disc_stop_final) (struct fc_lport *);
68331 };
68332 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
68333
68334 /**
68335 * struct fc_disc - Discovery context
68336 @@ -866,7 +867,7 @@ struct fc_lport {
68337 struct fc_vport *vport;
68338
68339 /* Operational Information */
68340 - struct libfc_function_template tt;
68341 + libfc_function_template_no_const tt;
68342 u8 link_up;
68343 u8 qfull;
68344 enum fc_lport_state state;
68345 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
68346 index e65c62e..aa2e5a2 100644
68347 --- a/include/scsi/scsi_device.h
68348 +++ b/include/scsi/scsi_device.h
68349 @@ -170,9 +170,9 @@ struct scsi_device {
68350 unsigned int max_device_blocked; /* what device_blocked counts down from */
68351 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
68352
68353 - atomic_t iorequest_cnt;
68354 - atomic_t iodone_cnt;
68355 - atomic_t ioerr_cnt;
68356 + atomic_unchecked_t iorequest_cnt;
68357 + atomic_unchecked_t iodone_cnt;
68358 + atomic_unchecked_t ioerr_cnt;
68359
68360 struct device sdev_gendev,
68361 sdev_dev;
68362 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
68363 index b797e8f..8e2c3aa 100644
68364 --- a/include/scsi/scsi_transport_fc.h
68365 +++ b/include/scsi/scsi_transport_fc.h
68366 @@ -751,7 +751,8 @@ struct fc_function_template {
68367 unsigned long show_host_system_hostname:1;
68368
68369 unsigned long disable_target_scan:1;
68370 -};
68371 +} __do_const;
68372 +typedef struct fc_function_template __no_const fc_function_template_no_const;
68373
68374
68375 /**
68376 diff --git a/include/sound/soc.h b/include/sound/soc.h
68377 index bc56738..a4be132 100644
68378 --- a/include/sound/soc.h
68379 +++ b/include/sound/soc.h
68380 @@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
68381 /* probe ordering - for components with runtime dependencies */
68382 int probe_order;
68383 int remove_order;
68384 -};
68385 +} __do_const;
68386
68387 /* SoC platform interface */
68388 struct snd_soc_platform_driver {
68389 @@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
68390 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
68391 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
68392 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
68393 -};
68394 +} __do_const;
68395
68396 struct snd_soc_platform {
68397 const char *name;
68398 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
68399 index 663e34a..91b306a 100644
68400 --- a/include/target/target_core_base.h
68401 +++ b/include/target/target_core_base.h
68402 @@ -654,7 +654,7 @@ struct se_device {
68403 spinlock_t stats_lock;
68404 /* Active commands on this virtual SE device */
68405 atomic_t simple_cmds;
68406 - atomic_t dev_ordered_id;
68407 + atomic_unchecked_t dev_ordered_id;
68408 atomic_t dev_ordered_sync;
68409 atomic_t dev_qf_count;
68410 int export_count;
68411 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
68412 new file mode 100644
68413 index 0000000..fb634b7
68414 --- /dev/null
68415 +++ b/include/trace/events/fs.h
68416 @@ -0,0 +1,53 @@
68417 +#undef TRACE_SYSTEM
68418 +#define TRACE_SYSTEM fs
68419 +
68420 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
68421 +#define _TRACE_FS_H
68422 +
68423 +#include <linux/fs.h>
68424 +#include <linux/tracepoint.h>
68425 +
68426 +TRACE_EVENT(do_sys_open,
68427 +
68428 + TP_PROTO(const char *filename, int flags, int mode),
68429 +
68430 + TP_ARGS(filename, flags, mode),
68431 +
68432 + TP_STRUCT__entry(
68433 + __string( filename, filename )
68434 + __field( int, flags )
68435 + __field( int, mode )
68436 + ),
68437 +
68438 + TP_fast_assign(
68439 + __assign_str(filename, filename);
68440 + __entry->flags = flags;
68441 + __entry->mode = mode;
68442 + ),
68443 +
68444 + TP_printk("\"%s\" %x %o",
68445 + __get_str(filename), __entry->flags, __entry->mode)
68446 +);
68447 +
68448 +TRACE_EVENT(open_exec,
68449 +
68450 + TP_PROTO(const char *filename),
68451 +
68452 + TP_ARGS(filename),
68453 +
68454 + TP_STRUCT__entry(
68455 + __string( filename, filename )
68456 + ),
68457 +
68458 + TP_fast_assign(
68459 + __assign_str(filename, filename);
68460 + ),
68461 +
68462 + TP_printk("\"%s\"",
68463 + __get_str(filename))
68464 +);
68465 +
68466 +#endif /* _TRACE_FS_H */
68467 +
68468 +/* This part must be outside protection */
68469 +#include <trace/define_trace.h>
68470 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
68471 index 1c09820..7f5ec79 100644
68472 --- a/include/trace/events/irq.h
68473 +++ b/include/trace/events/irq.h
68474 @@ -36,7 +36,7 @@ struct softirq_action;
68475 */
68476 TRACE_EVENT(irq_handler_entry,
68477
68478 - TP_PROTO(int irq, struct irqaction *action),
68479 + TP_PROTO(int irq, const struct irqaction *action),
68480
68481 TP_ARGS(irq, action),
68482
68483 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
68484 */
68485 TRACE_EVENT(irq_handler_exit,
68486
68487 - TP_PROTO(int irq, struct irqaction *action, int ret),
68488 + TP_PROTO(int irq, const struct irqaction *action, int ret),
68489
68490 TP_ARGS(irq, action, ret),
68491
68492 diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
68493 index 7caf44c..23c6f27 100644
68494 --- a/include/uapi/linux/a.out.h
68495 +++ b/include/uapi/linux/a.out.h
68496 @@ -39,6 +39,14 @@ enum machine_type {
68497 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
68498 };
68499
68500 +/* Constants for the N_FLAGS field */
68501 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
68502 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
68503 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
68504 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
68505 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
68506 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
68507 +
68508 #if !defined (N_MAGIC)
68509 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
68510 #endif
68511 diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
68512 index d876736..b36014e 100644
68513 --- a/include/uapi/linux/byteorder/little_endian.h
68514 +++ b/include/uapi/linux/byteorder/little_endian.h
68515 @@ -42,51 +42,51 @@
68516
68517 static inline __le64 __cpu_to_le64p(const __u64 *p)
68518 {
68519 - return (__force __le64)*p;
68520 + return (__force const __le64)*p;
68521 }
68522 static inline __u64 __le64_to_cpup(const __le64 *p)
68523 {
68524 - return (__force __u64)*p;
68525 + return (__force const __u64)*p;
68526 }
68527 static inline __le32 __cpu_to_le32p(const __u32 *p)
68528 {
68529 - return (__force __le32)*p;
68530 + return (__force const __le32)*p;
68531 }
68532 static inline __u32 __le32_to_cpup(const __le32 *p)
68533 {
68534 - return (__force __u32)*p;
68535 + return (__force const __u32)*p;
68536 }
68537 static inline __le16 __cpu_to_le16p(const __u16 *p)
68538 {
68539 - return (__force __le16)*p;
68540 + return (__force const __le16)*p;
68541 }
68542 static inline __u16 __le16_to_cpup(const __le16 *p)
68543 {
68544 - return (__force __u16)*p;
68545 + return (__force const __u16)*p;
68546 }
68547 static inline __be64 __cpu_to_be64p(const __u64 *p)
68548 {
68549 - return (__force __be64)__swab64p(p);
68550 + return (__force const __be64)__swab64p(p);
68551 }
68552 static inline __u64 __be64_to_cpup(const __be64 *p)
68553 {
68554 - return __swab64p((__u64 *)p);
68555 + return __swab64p((const __u64 *)p);
68556 }
68557 static inline __be32 __cpu_to_be32p(const __u32 *p)
68558 {
68559 - return (__force __be32)__swab32p(p);
68560 + return (__force const __be32)__swab32p(p);
68561 }
68562 static inline __u32 __be32_to_cpup(const __be32 *p)
68563 {
68564 - return __swab32p((__u32 *)p);
68565 + return __swab32p((const __u32 *)p);
68566 }
68567 static inline __be16 __cpu_to_be16p(const __u16 *p)
68568 {
68569 - return (__force __be16)__swab16p(p);
68570 + return (__force const __be16)__swab16p(p);
68571 }
68572 static inline __u16 __be16_to_cpup(const __be16 *p)
68573 {
68574 - return __swab16p((__u16 *)p);
68575 + return __swab16p((const __u16 *)p);
68576 }
68577 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
68578 #define __le64_to_cpus(x) do { (void)(x); } while (0)
68579 diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
68580 index 126a817..d522bd1 100644
68581 --- a/include/uapi/linux/elf.h
68582 +++ b/include/uapi/linux/elf.h
68583 @@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
68584 #define PT_GNU_EH_FRAME 0x6474e550
68585
68586 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
68587 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
68588 +
68589 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
68590 +
68591 +/* Constants for the e_flags field */
68592 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
68593 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
68594 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
68595 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
68596 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
68597 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
68598
68599 /*
68600 * Extended Numbering
68601 @@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
68602 #define DT_DEBUG 21
68603 #define DT_TEXTREL 22
68604 #define DT_JMPREL 23
68605 +#define DT_FLAGS 30
68606 + #define DF_TEXTREL 0x00000004
68607 #define DT_ENCODING 32
68608 #define OLD_DT_LOOS 0x60000000
68609 #define DT_LOOS 0x6000000d
68610 @@ -240,6 +253,19 @@ typedef struct elf64_hdr {
68611 #define PF_W 0x2
68612 #define PF_X 0x1
68613
68614 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
68615 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
68616 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
68617 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
68618 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
68619 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
68620 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
68621 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
68622 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
68623 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
68624 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
68625 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
68626 +
68627 typedef struct elf32_phdr{
68628 Elf32_Word p_type;
68629 Elf32_Off p_offset;
68630 @@ -332,6 +358,8 @@ typedef struct elf64_shdr {
68631 #define EI_OSABI 7
68632 #define EI_PAD 8
68633
68634 +#define EI_PAX 14
68635 +
68636 #define ELFMAG0 0x7f /* EI_MAG */
68637 #define ELFMAG1 'E'
68638 #define ELFMAG2 'L'
68639 diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
68640 index aa169c4..6a2771d 100644
68641 --- a/include/uapi/linux/personality.h
68642 +++ b/include/uapi/linux/personality.h
68643 @@ -30,6 +30,7 @@ enum {
68644 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
68645 ADDR_NO_RANDOMIZE | \
68646 ADDR_COMPAT_LAYOUT | \
68647 + ADDR_LIMIT_3GB | \
68648 MMAP_PAGE_ZERO)
68649
68650 /*
68651 diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
68652 index 7530e74..e714828 100644
68653 --- a/include/uapi/linux/screen_info.h
68654 +++ b/include/uapi/linux/screen_info.h
68655 @@ -43,7 +43,8 @@ struct screen_info {
68656 __u16 pages; /* 0x32 */
68657 __u16 vesa_attributes; /* 0x34 */
68658 __u32 capabilities; /* 0x36 */
68659 - __u8 _reserved[6]; /* 0x3a */
68660 + __u16 vesapm_size; /* 0x3a */
68661 + __u8 _reserved[4]; /* 0x3c */
68662 } __attribute__((packed));
68663
68664 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68665 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
68666 index 6d67213..8dab561 100644
68667 --- a/include/uapi/linux/sysctl.h
68668 +++ b/include/uapi/linux/sysctl.h
68669 @@ -155,7 +155,11 @@ enum
68670 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
68671 };
68672
68673 -
68674 +#ifdef CONFIG_PAX_SOFTMODE
68675 +enum {
68676 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
68677 +};
68678 +#endif
68679
68680 /* CTL_VM names: */
68681 enum
68682 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
68683 index 26607bd..588b65f 100644
68684 --- a/include/uapi/linux/xattr.h
68685 +++ b/include/uapi/linux/xattr.h
68686 @@ -60,5 +60,9 @@
68687 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
68688 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
68689
68690 +/* User namespace */
68691 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
68692 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
68693 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
68694
68695 #endif /* _UAPI_LINUX_XATTR_H */
68696 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
68697 index f9466fa..f4e2b81 100644
68698 --- a/include/video/udlfb.h
68699 +++ b/include/video/udlfb.h
68700 @@ -53,10 +53,10 @@ struct dlfb_data {
68701 u32 pseudo_palette[256];
68702 int blank_mode; /*one of FB_BLANK_ */
68703 /* blit-only rendering path metrics, exposed through sysfs */
68704 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
68705 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
68706 - atomic_t bytes_sent; /* to usb, after compression including overhead */
68707 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
68708 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
68709 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
68710 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
68711 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
68712 };
68713
68714 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
68715 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
68716 index 0993a22..32ba2fe 100644
68717 --- a/include/video/uvesafb.h
68718 +++ b/include/video/uvesafb.h
68719 @@ -177,6 +177,7 @@ struct uvesafb_par {
68720 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
68721 u8 pmi_setpal; /* PMI for palette changes */
68722 u16 *pmi_base; /* protected mode interface location */
68723 + u8 *pmi_code; /* protected mode code location */
68724 void *pmi_start;
68725 void *pmi_pal;
68726 u8 *vbe_state_orig; /*
68727 diff --git a/init/Kconfig b/init/Kconfig
68728 index be8b7f5..1eeca9b 100644
68729 --- a/init/Kconfig
68730 +++ b/init/Kconfig
68731 @@ -990,6 +990,7 @@ endif # CGROUPS
68732
68733 config CHECKPOINT_RESTORE
68734 bool "Checkpoint/restore support" if EXPERT
68735 + depends on !GRKERNSEC
68736 default n
68737 help
68738 Enables additional kernel features in a sake of checkpoint/restore.
68739 @@ -1468,7 +1469,7 @@ config SLUB_DEBUG
68740
68741 config COMPAT_BRK
68742 bool "Disable heap randomization"
68743 - default y
68744 + default n
68745 help
68746 Randomizing heap placement makes heap exploits harder, but it
68747 also breaks ancient binaries (including anything libc5 based).
68748 @@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
68749 config STOP_MACHINE
68750 bool
68751 default y
68752 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
68753 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
68754 help
68755 Need stop_machine() primitive.
68756
68757 diff --git a/init/Makefile b/init/Makefile
68758 index 7bc47ee..6da2dc7 100644
68759 --- a/init/Makefile
68760 +++ b/init/Makefile
68761 @@ -2,6 +2,9 @@
68762 # Makefile for the linux kernel.
68763 #
68764
68765 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
68766 +asflags-y := $(GCC_PLUGINS_AFLAGS)
68767 +
68768 obj-y := main.o version.o mounts.o
68769 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
68770 obj-y += noinitramfs.o
68771 diff --git a/init/do_mounts.c b/init/do_mounts.c
68772 index 1d1b634..a1c810f 100644
68773 --- a/init/do_mounts.c
68774 +++ b/init/do_mounts.c
68775 @@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
68776 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
68777 {
68778 struct super_block *s;
68779 - int err = sys_mount(name, "/root", fs, flags, data);
68780 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
68781 if (err)
68782 return err;
68783
68784 - sys_chdir("/root");
68785 + sys_chdir((const char __force_user *)"/root");
68786 s = current->fs->pwd.dentry->d_sb;
68787 ROOT_DEV = s->s_dev;
68788 printk(KERN_INFO
68789 @@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
68790 va_start(args, fmt);
68791 vsprintf(buf, fmt, args);
68792 va_end(args);
68793 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
68794 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
68795 if (fd >= 0) {
68796 sys_ioctl(fd, FDEJECT, 0);
68797 sys_close(fd);
68798 }
68799 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
68800 - fd = sys_open("/dev/console", O_RDWR, 0);
68801 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
68802 if (fd >= 0) {
68803 sys_ioctl(fd, TCGETS, (long)&termios);
68804 termios.c_lflag &= ~ICANON;
68805 sys_ioctl(fd, TCSETSF, (long)&termios);
68806 - sys_read(fd, &c, 1);
68807 + sys_read(fd, (char __user *)&c, 1);
68808 termios.c_lflag |= ICANON;
68809 sys_ioctl(fd, TCSETSF, (long)&termios);
68810 sys_close(fd);
68811 @@ -585,6 +585,6 @@ void __init prepare_namespace(void)
68812 mount_root();
68813 out:
68814 devtmpfs_mount("dev");
68815 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
68816 - sys_chroot(".");
68817 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
68818 + sys_chroot((const char __force_user *)".");
68819 }
68820 diff --git a/init/do_mounts.h b/init/do_mounts.h
68821 index f5b978a..69dbfe8 100644
68822 --- a/init/do_mounts.h
68823 +++ b/init/do_mounts.h
68824 @@ -15,15 +15,15 @@ extern int root_mountflags;
68825
68826 static inline int create_dev(char *name, dev_t dev)
68827 {
68828 - sys_unlink(name);
68829 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
68830 + sys_unlink((char __force_user *)name);
68831 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
68832 }
68833
68834 #if BITS_PER_LONG == 32
68835 static inline u32 bstat(char *name)
68836 {
68837 struct stat64 stat;
68838 - if (sys_stat64(name, &stat) != 0)
68839 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
68840 return 0;
68841 if (!S_ISBLK(stat.st_mode))
68842 return 0;
68843 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
68844 static inline u32 bstat(char *name)
68845 {
68846 struct stat stat;
68847 - if (sys_newstat(name, &stat) != 0)
68848 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
68849 return 0;
68850 if (!S_ISBLK(stat.st_mode))
68851 return 0;
68852 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
68853 index f9acf71..1e19144 100644
68854 --- a/init/do_mounts_initrd.c
68855 +++ b/init/do_mounts_initrd.c
68856 @@ -58,8 +58,8 @@ static void __init handle_initrd(void)
68857 create_dev("/dev/root.old", Root_RAM0);
68858 /* mount initrd on rootfs' /root */
68859 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
68860 - sys_mkdir("/old", 0700);
68861 - sys_chdir("/old");
68862 + sys_mkdir((const char __force_user *)"/old", 0700);
68863 + sys_chdir((const char __force_user *)"/old");
68864
68865 /*
68866 * In case that a resume from disk is carried out by linuxrc or one of
68867 @@ -73,31 +73,31 @@ static void __init handle_initrd(void)
68868 current->flags &= ~PF_FREEZER_SKIP;
68869
68870 /* move initrd to rootfs' /old */
68871 - sys_mount("..", ".", NULL, MS_MOVE, NULL);
68872 + sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
68873 /* switch root and cwd back to / of rootfs */
68874 - sys_chroot("..");
68875 + sys_chroot((const char __force_user *)"..");
68876
68877 if (new_decode_dev(real_root_dev) == Root_RAM0) {
68878 - sys_chdir("/old");
68879 + sys_chdir((const char __force_user *)"/old");
68880 return;
68881 }
68882
68883 - sys_chdir("/");
68884 + sys_chdir((const char __force_user *)"/");
68885 ROOT_DEV = new_decode_dev(real_root_dev);
68886 mount_root();
68887
68888 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
68889 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
68890 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
68891 if (!error)
68892 printk("okay\n");
68893 else {
68894 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
68895 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
68896 if (error == -ENOENT)
68897 printk("/initrd does not exist. Ignored.\n");
68898 else
68899 printk("failed\n");
68900 printk(KERN_NOTICE "Unmounting old root\n");
68901 - sys_umount("/old", MNT_DETACH);
68902 + sys_umount((char __force_user *)"/old", MNT_DETACH);
68903 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
68904 if (fd < 0) {
68905 error = fd;
68906 @@ -120,11 +120,11 @@ int __init initrd_load(void)
68907 * mounted in the normal path.
68908 */
68909 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
68910 - sys_unlink("/initrd.image");
68911 + sys_unlink((const char __force_user *)"/initrd.image");
68912 handle_initrd();
68913 return 1;
68914 }
68915 }
68916 - sys_unlink("/initrd.image");
68917 + sys_unlink((const char __force_user *)"/initrd.image");
68918 return 0;
68919 }
68920 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
68921 index 8cb6db5..d729f50 100644
68922 --- a/init/do_mounts_md.c
68923 +++ b/init/do_mounts_md.c
68924 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
68925 partitioned ? "_d" : "", minor,
68926 md_setup_args[ent].device_names);
68927
68928 - fd = sys_open(name, 0, 0);
68929 + fd = sys_open((char __force_user *)name, 0, 0);
68930 if (fd < 0) {
68931 printk(KERN_ERR "md: open failed - cannot start "
68932 "array %s\n", name);
68933 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
68934 * array without it
68935 */
68936 sys_close(fd);
68937 - fd = sys_open(name, 0, 0);
68938 + fd = sys_open((char __force_user *)name, 0, 0);
68939 sys_ioctl(fd, BLKRRPART, 0);
68940 }
68941 sys_close(fd);
68942 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
68943
68944 wait_for_device_probe();
68945
68946 - fd = sys_open("/dev/md0", 0, 0);
68947 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
68948 if (fd >= 0) {
68949 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
68950 sys_close(fd);
68951 diff --git a/init/init_task.c b/init/init_task.c
68952 index 8b2f399..f0797c9 100644
68953 --- a/init/init_task.c
68954 +++ b/init/init_task.c
68955 @@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
68956 * Initial thread structure. Alignment of this is handled by a special
68957 * linker map entry.
68958 */
68959 +#ifdef CONFIG_X86
68960 +union thread_union init_thread_union __init_task_data;
68961 +#else
68962 union thread_union init_thread_union __init_task_data =
68963 { INIT_THREAD_INFO(init_task) };
68964 +#endif
68965 diff --git a/init/initramfs.c b/init/initramfs.c
68966 index 84c6bf1..8899338 100644
68967 --- a/init/initramfs.c
68968 +++ b/init/initramfs.c
68969 @@ -84,7 +84,7 @@ static void __init free_hash(void)
68970 }
68971 }
68972
68973 -static long __init do_utime(char *filename, time_t mtime)
68974 +static long __init do_utime(char __force_user *filename, time_t mtime)
68975 {
68976 struct timespec t[2];
68977
68978 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
68979 struct dir_entry *de, *tmp;
68980 list_for_each_entry_safe(de, tmp, &dir_list, list) {
68981 list_del(&de->list);
68982 - do_utime(de->name, de->mtime);
68983 + do_utime((char __force_user *)de->name, de->mtime);
68984 kfree(de->name);
68985 kfree(de);
68986 }
68987 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
68988 if (nlink >= 2) {
68989 char *old = find_link(major, minor, ino, mode, collected);
68990 if (old)
68991 - return (sys_link(old, collected) < 0) ? -1 : 1;
68992 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
68993 }
68994 return 0;
68995 }
68996 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
68997 {
68998 struct stat st;
68999
69000 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
69001 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
69002 if (S_ISDIR(st.st_mode))
69003 - sys_rmdir(path);
69004 + sys_rmdir((char __force_user *)path);
69005 else
69006 - sys_unlink(path);
69007 + sys_unlink((char __force_user *)path);
69008 }
69009 }
69010
69011 @@ -315,7 +315,7 @@ static int __init do_name(void)
69012 int openflags = O_WRONLY|O_CREAT;
69013 if (ml != 1)
69014 openflags |= O_TRUNC;
69015 - wfd = sys_open(collected, openflags, mode);
69016 + wfd = sys_open((char __force_user *)collected, openflags, mode);
69017
69018 if (wfd >= 0) {
69019 sys_fchown(wfd, uid, gid);
69020 @@ -327,17 +327,17 @@ static int __init do_name(void)
69021 }
69022 }
69023 } else if (S_ISDIR(mode)) {
69024 - sys_mkdir(collected, mode);
69025 - sys_chown(collected, uid, gid);
69026 - sys_chmod(collected, mode);
69027 + sys_mkdir((char __force_user *)collected, mode);
69028 + sys_chown((char __force_user *)collected, uid, gid);
69029 + sys_chmod((char __force_user *)collected, mode);
69030 dir_add(collected, mtime);
69031 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
69032 S_ISFIFO(mode) || S_ISSOCK(mode)) {
69033 if (maybe_link() == 0) {
69034 - sys_mknod(collected, mode, rdev);
69035 - sys_chown(collected, uid, gid);
69036 - sys_chmod(collected, mode);
69037 - do_utime(collected, mtime);
69038 + sys_mknod((char __force_user *)collected, mode, rdev);
69039 + sys_chown((char __force_user *)collected, uid, gid);
69040 + sys_chmod((char __force_user *)collected, mode);
69041 + do_utime((char __force_user *)collected, mtime);
69042 }
69043 }
69044 return 0;
69045 @@ -346,15 +346,15 @@ static int __init do_name(void)
69046 static int __init do_copy(void)
69047 {
69048 if (count >= body_len) {
69049 - sys_write(wfd, victim, body_len);
69050 + sys_write(wfd, (char __force_user *)victim, body_len);
69051 sys_close(wfd);
69052 - do_utime(vcollected, mtime);
69053 + do_utime((char __force_user *)vcollected, mtime);
69054 kfree(vcollected);
69055 eat(body_len);
69056 state = SkipIt;
69057 return 0;
69058 } else {
69059 - sys_write(wfd, victim, count);
69060 + sys_write(wfd, (char __force_user *)victim, count);
69061 body_len -= count;
69062 eat(count);
69063 return 1;
69064 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
69065 {
69066 collected[N_ALIGN(name_len) + body_len] = '\0';
69067 clean_path(collected, 0);
69068 - sys_symlink(collected + N_ALIGN(name_len), collected);
69069 - sys_lchown(collected, uid, gid);
69070 - do_utime(collected, mtime);
69071 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
69072 + sys_lchown((char __force_user *)collected, uid, gid);
69073 + do_utime((char __force_user *)collected, mtime);
69074 state = SkipIt;
69075 next_state = Reset;
69076 return 0;
69077 diff --git a/init/main.c b/init/main.c
69078 index cee4b5c..9c267d9 100644
69079 --- a/init/main.c
69080 +++ b/init/main.c
69081 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
69082 extern void tc_init(void);
69083 #endif
69084
69085 +extern void grsecurity_init(void);
69086 +
69087 /*
69088 * Debug helper: via this flag we know that we are in 'early bootup code'
69089 * where only the boot processor is running with IRQ disabled. This means
69090 @@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
69091
69092 __setup("reset_devices", set_reset_devices);
69093
69094 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69095 +kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
69096 +static int __init setup_grsec_proc_gid(char *str)
69097 +{
69098 + grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
69099 + return 1;
69100 +}
69101 +__setup("grsec_proc_gid=", setup_grsec_proc_gid);
69102 +#endif
69103 +
69104 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
69105 +extern char pax_enter_kernel_user[];
69106 +extern char pax_exit_kernel_user[];
69107 +extern pgdval_t clone_pgd_mask;
69108 +#endif
69109 +
69110 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
69111 +static int __init setup_pax_nouderef(char *str)
69112 +{
69113 +#ifdef CONFIG_X86_32
69114 + unsigned int cpu;
69115 + struct desc_struct *gdt;
69116 +
69117 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
69118 + gdt = get_cpu_gdt_table(cpu);
69119 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
69120 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
69121 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
69122 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
69123 + }
69124 + loadsegment(ds, __KERNEL_DS);
69125 + loadsegment(es, __KERNEL_DS);
69126 + loadsegment(ss, __KERNEL_DS);
69127 +#else
69128 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
69129 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
69130 + clone_pgd_mask = ~(pgdval_t)0UL;
69131 +#endif
69132 +
69133 + return 0;
69134 +}
69135 +early_param("pax_nouderef", setup_pax_nouderef);
69136 +#endif
69137 +
69138 +#ifdef CONFIG_PAX_SOFTMODE
69139 +int pax_softmode;
69140 +
69141 +static int __init setup_pax_softmode(char *str)
69142 +{
69143 + get_option(&str, &pax_softmode);
69144 + return 1;
69145 +}
69146 +__setup("pax_softmode=", setup_pax_softmode);
69147 +#endif
69148 +
69149 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
69150 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
69151 static const char *panic_later, *panic_param;
69152 @@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
69153 {
69154 int count = preempt_count();
69155 int ret;
69156 + const char *msg1 = "", *msg2 = "";
69157
69158 if (initcall_debug)
69159 ret = do_one_initcall_debug(fn);
69160 @@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
69161 sprintf(msgbuf, "error code %d ", ret);
69162
69163 if (preempt_count() != count) {
69164 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
69165 + msg1 = " preemption imbalance";
69166 preempt_count() = count;
69167 }
69168 if (irqs_disabled()) {
69169 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
69170 + msg2 = " disabled interrupts";
69171 local_irq_enable();
69172 }
69173 - if (msgbuf[0]) {
69174 - printk("initcall %pF returned with %s\n", fn, msgbuf);
69175 + if (msgbuf[0] || *msg1 || *msg2) {
69176 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
69177 }
69178
69179 return ret;
69180 @@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
69181 "late",
69182 };
69183
69184 +#ifdef CONFIG_PAX_LATENT_ENTROPY
69185 +u64 latent_entropy;
69186 +#endif
69187 +
69188 static void __init do_initcall_level(int level)
69189 {
69190 extern const struct kernel_param __start___param[], __stop___param[];
69191 @@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
69192 level, level,
69193 &repair_env_string);
69194
69195 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
69196 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
69197 do_one_initcall(*fn);
69198 +
69199 +#ifdef CONFIG_PAX_LATENT_ENTROPY
69200 + add_device_randomness(&latent_entropy, sizeof(latent_entropy));
69201 +#endif
69202 +
69203 + }
69204 }
69205
69206 static void __init do_initcalls(void)
69207 @@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
69208 {
69209 initcall_t *fn;
69210
69211 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
69212 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
69213 do_one_initcall(*fn);
69214 +
69215 +#ifdef CONFIG_PAX_LATENT_ENTROPY
69216 + add_device_randomness(&latent_entropy, sizeof(latent_entropy));
69217 +#endif
69218 +
69219 + }
69220 }
69221
69222 static int run_init_process(const char *init_filename)
69223 @@ -877,7 +951,7 @@ static noinline void __init kernel_init_freeable(void)
69224 do_basic_setup();
69225
69226 /* Open the /dev/console on the rootfs, this should never fail */
69227 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
69228 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
69229 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
69230
69231 (void) sys_dup(0);
69232 @@ -890,11 +964,13 @@ static noinline void __init kernel_init_freeable(void)
69233 if (!ramdisk_execute_command)
69234 ramdisk_execute_command = "/init";
69235
69236 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
69237 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
69238 ramdisk_execute_command = NULL;
69239 prepare_namespace();
69240 }
69241
69242 + grsecurity_init();
69243 +
69244 /*
69245 * Ok, we have completed the initial bootup, and
69246 * we're essentially up and running. Get rid of the
69247 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
69248 index 71a3ca1..cc330ee 100644
69249 --- a/ipc/mqueue.c
69250 +++ b/ipc/mqueue.c
69251 @@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
69252 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
69253 info->attr.mq_msgsize);
69254
69255 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
69256 spin_lock(&mq_lock);
69257 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
69258 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
69259 diff --git a/ipc/msg.c b/ipc/msg.c
69260 index 950572f..266c15f 100644
69261 --- a/ipc/msg.c
69262 +++ b/ipc/msg.c
69263 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
69264 return security_msg_queue_associate(msq, msgflg);
69265 }
69266
69267 +static struct ipc_ops msg_ops = {
69268 + .getnew = newque,
69269 + .associate = msg_security,
69270 + .more_checks = NULL
69271 +};
69272 +
69273 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
69274 {
69275 struct ipc_namespace *ns;
69276 - struct ipc_ops msg_ops;
69277 struct ipc_params msg_params;
69278
69279 ns = current->nsproxy->ipc_ns;
69280
69281 - msg_ops.getnew = newque;
69282 - msg_ops.associate = msg_security;
69283 - msg_ops.more_checks = NULL;
69284 -
69285 msg_params.key = key;
69286 msg_params.flg = msgflg;
69287
69288 diff --git a/ipc/sem.c b/ipc/sem.c
69289 index 58d31f1..cce7a55 100644
69290 --- a/ipc/sem.c
69291 +++ b/ipc/sem.c
69292 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
69293 return 0;
69294 }
69295
69296 +static struct ipc_ops sem_ops = {
69297 + .getnew = newary,
69298 + .associate = sem_security,
69299 + .more_checks = sem_more_checks
69300 +};
69301 +
69302 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
69303 {
69304 struct ipc_namespace *ns;
69305 - struct ipc_ops sem_ops;
69306 struct ipc_params sem_params;
69307
69308 ns = current->nsproxy->ipc_ns;
69309 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
69310 if (nsems < 0 || nsems > ns->sc_semmsl)
69311 return -EINVAL;
69312
69313 - sem_ops.getnew = newary;
69314 - sem_ops.associate = sem_security;
69315 - sem_ops.more_checks = sem_more_checks;
69316 -
69317 sem_params.key = key;
69318 sem_params.flg = semflg;
69319 sem_params.u.nsems = nsems;
69320 diff --git a/ipc/shm.c b/ipc/shm.c
69321 index 4fa6d8f..55cff14 100644
69322 --- a/ipc/shm.c
69323 +++ b/ipc/shm.c
69324 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
69325 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
69326 #endif
69327
69328 +#ifdef CONFIG_GRKERNSEC
69329 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
69330 + const time_t shm_createtime, const kuid_t cuid,
69331 + const int shmid);
69332 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
69333 + const time_t shm_createtime);
69334 +#endif
69335 +
69336 void shm_init_ns(struct ipc_namespace *ns)
69337 {
69338 ns->shm_ctlmax = SHMMAX;
69339 @@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
69340 shp->shm_lprid = 0;
69341 shp->shm_atim = shp->shm_dtim = 0;
69342 shp->shm_ctim = get_seconds();
69343 +#ifdef CONFIG_GRKERNSEC
69344 + {
69345 + struct timespec timeval;
69346 + do_posix_clock_monotonic_gettime(&timeval);
69347 +
69348 + shp->shm_createtime = timeval.tv_sec;
69349 + }
69350 +#endif
69351 shp->shm_segsz = size;
69352 shp->shm_nattch = 0;
69353 shp->shm_file = file;
69354 @@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
69355 return 0;
69356 }
69357
69358 +static struct ipc_ops shm_ops = {
69359 + .getnew = newseg,
69360 + .associate = shm_security,
69361 + .more_checks = shm_more_checks
69362 +};
69363 +
69364 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
69365 {
69366 struct ipc_namespace *ns;
69367 - struct ipc_ops shm_ops;
69368 struct ipc_params shm_params;
69369
69370 ns = current->nsproxy->ipc_ns;
69371
69372 - shm_ops.getnew = newseg;
69373 - shm_ops.associate = shm_security;
69374 - shm_ops.more_checks = shm_more_checks;
69375 -
69376 shm_params.key = key;
69377 shm_params.flg = shmflg;
69378 shm_params.u.size = size;
69379 @@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
69380 f_mode = FMODE_READ | FMODE_WRITE;
69381 }
69382 if (shmflg & SHM_EXEC) {
69383 +
69384 +#ifdef CONFIG_PAX_MPROTECT
69385 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
69386 + goto out;
69387 +#endif
69388 +
69389 prot |= PROT_EXEC;
69390 acc_mode |= S_IXUGO;
69391 }
69392 @@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
69393 if (err)
69394 goto out_unlock;
69395
69396 +#ifdef CONFIG_GRKERNSEC
69397 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
69398 + shp->shm_perm.cuid, shmid) ||
69399 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
69400 + err = -EACCES;
69401 + goto out_unlock;
69402 + }
69403 +#endif
69404 +
69405 path = shp->shm_file->f_path;
69406 path_get(&path);
69407 shp->shm_nattch++;
69408 +#ifdef CONFIG_GRKERNSEC
69409 + shp->shm_lapid = current->pid;
69410 +#endif
69411 size = i_size_read(path.dentry->d_inode);
69412 shm_unlock(shp);
69413
69414 diff --git a/kernel/acct.c b/kernel/acct.c
69415 index 051e071..15e0920 100644
69416 --- a/kernel/acct.c
69417 +++ b/kernel/acct.c
69418 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
69419 */
69420 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
69421 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
69422 - file->f_op->write(file, (char *)&ac,
69423 + file->f_op->write(file, (char __force_user *)&ac,
69424 sizeof(acct_t), &file->f_pos);
69425 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
69426 set_fs(fs);
69427 diff --git a/kernel/audit.c b/kernel/audit.c
69428 index d596e53..dbef3c3 100644
69429 --- a/kernel/audit.c
69430 +++ b/kernel/audit.c
69431 @@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
69432 3) suppressed due to audit_rate_limit
69433 4) suppressed due to audit_backlog_limit
69434 */
69435 -static atomic_t audit_lost = ATOMIC_INIT(0);
69436 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
69437
69438 /* The netlink socket. */
69439 static struct sock *audit_sock;
69440 @@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
69441 unsigned long now;
69442 int print;
69443
69444 - atomic_inc(&audit_lost);
69445 + atomic_inc_unchecked(&audit_lost);
69446
69447 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
69448
69449 @@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
69450 printk(KERN_WARNING
69451 "audit: audit_lost=%d audit_rate_limit=%d "
69452 "audit_backlog_limit=%d\n",
69453 - atomic_read(&audit_lost),
69454 + atomic_read_unchecked(&audit_lost),
69455 audit_rate_limit,
69456 audit_backlog_limit);
69457 audit_panic(message);
69458 @@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
69459 status_set.pid = audit_pid;
69460 status_set.rate_limit = audit_rate_limit;
69461 status_set.backlog_limit = audit_backlog_limit;
69462 - status_set.lost = atomic_read(&audit_lost);
69463 + status_set.lost = atomic_read_unchecked(&audit_lost);
69464 status_set.backlog = skb_queue_len(&audit_skb_queue);
69465 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
69466 &status_set, sizeof(status_set));
69467 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
69468 index a371f85..da826c1 100644
69469 --- a/kernel/auditsc.c
69470 +++ b/kernel/auditsc.c
69471 @@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
69472 }
69473
69474 /* global counter which is incremented every time something logs in */
69475 -static atomic_t session_id = ATOMIC_INIT(0);
69476 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
69477
69478 /**
69479 * audit_set_loginuid - set current task's audit_context loginuid
69480 @@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
69481 return -EPERM;
69482 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
69483
69484 - sessionid = atomic_inc_return(&session_id);
69485 + sessionid = atomic_inc_return_unchecked(&session_id);
69486 if (context && context->in_syscall) {
69487 struct audit_buffer *ab;
69488
69489 diff --git a/kernel/capability.c b/kernel/capability.c
69490 index 493d972..f87dfbd 100644
69491 --- a/kernel/capability.c
69492 +++ b/kernel/capability.c
69493 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
69494 * before modification is attempted and the application
69495 * fails.
69496 */
69497 + if (tocopy > ARRAY_SIZE(kdata))
69498 + return -EFAULT;
69499 +
69500 if (copy_to_user(dataptr, kdata, tocopy
69501 * sizeof(struct __user_cap_data_struct))) {
69502 return -EFAULT;
69503 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
69504 int ret;
69505
69506 rcu_read_lock();
69507 - ret = security_capable(__task_cred(t), ns, cap);
69508 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
69509 + gr_task_is_capable(t, __task_cred(t), cap);
69510 rcu_read_unlock();
69511
69512 - return (ret == 0);
69513 + return ret;
69514 }
69515
69516 /**
69517 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
69518 int ret;
69519
69520 rcu_read_lock();
69521 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
69522 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
69523 rcu_read_unlock();
69524
69525 - return (ret == 0);
69526 + return ret;
69527 }
69528
69529 /**
69530 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
69531 BUG();
69532 }
69533
69534 - if (security_capable(current_cred(), ns, cap) == 0) {
69535 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
69536 current->flags |= PF_SUPERPRIV;
69537 return true;
69538 }
69539 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
69540 }
69541 EXPORT_SYMBOL(ns_capable);
69542
69543 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
69544 +{
69545 + if (unlikely(!cap_valid(cap))) {
69546 + printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
69547 + BUG();
69548 + }
69549 +
69550 + if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
69551 + current->flags |= PF_SUPERPRIV;
69552 + return true;
69553 + }
69554 + return false;
69555 +}
69556 +EXPORT_SYMBOL(ns_capable_nolog);
69557 +
69558 /**
69559 * capable - Determine if the current task has a superior capability in effect
69560 * @cap: The capability to be tested for
69561 @@ -408,6 +427,12 @@ bool capable(int cap)
69562 }
69563 EXPORT_SYMBOL(capable);
69564
69565 +bool capable_nolog(int cap)
69566 +{
69567 + return ns_capable_nolog(&init_user_ns, cap);
69568 +}
69569 +EXPORT_SYMBOL(capable_nolog);
69570 +
69571 /**
69572 * nsown_capable - Check superior capability to one's own user_ns
69573 * @cap: The capability in question
69574 @@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
69575
69576 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
69577 }
69578 +
69579 +bool inode_capable_nolog(const struct inode *inode, int cap)
69580 +{
69581 + struct user_namespace *ns = current_user_ns();
69582 +
69583 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
69584 +}
69585 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
69586 index 1e23664..570a83d 100644
69587 --- a/kernel/cgroup.c
69588 +++ b/kernel/cgroup.c
69589 @@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
69590 struct css_set *cg = link->cg;
69591 struct task_struct *task;
69592 int count = 0;
69593 - seq_printf(seq, "css_set %p\n", cg);
69594 + seq_printf(seq, "css_set %pK\n", cg);
69595 list_for_each_entry(task, &cg->tasks, cg_list) {
69596 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
69597 seq_puts(seq, " ...\n");
69598 diff --git a/kernel/compat.c b/kernel/compat.c
69599 index 36700e9..73d770c 100644
69600 --- a/kernel/compat.c
69601 +++ b/kernel/compat.c
69602 @@ -13,6 +13,7 @@
69603
69604 #include <linux/linkage.h>
69605 #include <linux/compat.h>
69606 +#include <linux/module.h>
69607 #include <linux/errno.h>
69608 #include <linux/time.h>
69609 #include <linux/signal.h>
69610 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
69611 mm_segment_t oldfs;
69612 long ret;
69613
69614 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
69615 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
69616 oldfs = get_fs();
69617 set_fs(KERNEL_DS);
69618 ret = hrtimer_nanosleep_restart(restart);
69619 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
69620 oldfs = get_fs();
69621 set_fs(KERNEL_DS);
69622 ret = hrtimer_nanosleep(&tu,
69623 - rmtp ? (struct timespec __user *)&rmt : NULL,
69624 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
69625 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
69626 set_fs(oldfs);
69627
69628 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
69629 mm_segment_t old_fs = get_fs();
69630
69631 set_fs(KERNEL_DS);
69632 - ret = sys_sigpending((old_sigset_t __user *) &s);
69633 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
69634 set_fs(old_fs);
69635 if (ret == 0)
69636 ret = put_user(s, set);
69637 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
69638 mm_segment_t old_fs = get_fs();
69639
69640 set_fs(KERNEL_DS);
69641 - ret = sys_old_getrlimit(resource, &r);
69642 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
69643 set_fs(old_fs);
69644
69645 if (!ret) {
69646 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
69647 mm_segment_t old_fs = get_fs();
69648
69649 set_fs(KERNEL_DS);
69650 - ret = sys_getrusage(who, (struct rusage __user *) &r);
69651 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
69652 set_fs(old_fs);
69653
69654 if (ret)
69655 @@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
69656 set_fs (KERNEL_DS);
69657 ret = sys_wait4(pid,
69658 (stat_addr ?
69659 - (unsigned int __user *) &status : NULL),
69660 - options, (struct rusage __user *) &r);
69661 + (unsigned int __force_user *) &status : NULL),
69662 + options, (struct rusage __force_user *) &r);
69663 set_fs (old_fs);
69664
69665 if (ret > 0) {
69666 @@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
69667 memset(&info, 0, sizeof(info));
69668
69669 set_fs(KERNEL_DS);
69670 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
69671 - uru ? (struct rusage __user *)&ru : NULL);
69672 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
69673 + uru ? (struct rusage __force_user *)&ru : NULL);
69674 set_fs(old_fs);
69675
69676 if ((ret < 0) || (info.si_signo == 0))
69677 @@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
69678 oldfs = get_fs();
69679 set_fs(KERNEL_DS);
69680 err = sys_timer_settime(timer_id, flags,
69681 - (struct itimerspec __user *) &newts,
69682 - (struct itimerspec __user *) &oldts);
69683 + (struct itimerspec __force_user *) &newts,
69684 + (struct itimerspec __force_user *) &oldts);
69685 set_fs(oldfs);
69686 if (!err && old && put_compat_itimerspec(old, &oldts))
69687 return -EFAULT;
69688 @@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
69689 oldfs = get_fs();
69690 set_fs(KERNEL_DS);
69691 err = sys_timer_gettime(timer_id,
69692 - (struct itimerspec __user *) &ts);
69693 + (struct itimerspec __force_user *) &ts);
69694 set_fs(oldfs);
69695 if (!err && put_compat_itimerspec(setting, &ts))
69696 return -EFAULT;
69697 @@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
69698 oldfs = get_fs();
69699 set_fs(KERNEL_DS);
69700 err = sys_clock_settime(which_clock,
69701 - (struct timespec __user *) &ts);
69702 + (struct timespec __force_user *) &ts);
69703 set_fs(oldfs);
69704 return err;
69705 }
69706 @@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
69707 oldfs = get_fs();
69708 set_fs(KERNEL_DS);
69709 err = sys_clock_gettime(which_clock,
69710 - (struct timespec __user *) &ts);
69711 + (struct timespec __force_user *) &ts);
69712 set_fs(oldfs);
69713 if (!err && put_compat_timespec(&ts, tp))
69714 return -EFAULT;
69715 @@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
69716
69717 oldfs = get_fs();
69718 set_fs(KERNEL_DS);
69719 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
69720 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
69721 set_fs(oldfs);
69722
69723 err = compat_put_timex(utp, &txc);
69724 @@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
69725 oldfs = get_fs();
69726 set_fs(KERNEL_DS);
69727 err = sys_clock_getres(which_clock,
69728 - (struct timespec __user *) &ts);
69729 + (struct timespec __force_user *) &ts);
69730 set_fs(oldfs);
69731 if (!err && tp && put_compat_timespec(&ts, tp))
69732 return -EFAULT;
69733 @@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
69734 long err;
69735 mm_segment_t oldfs;
69736 struct timespec tu;
69737 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
69738 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
69739
69740 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
69741 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
69742 oldfs = get_fs();
69743 set_fs(KERNEL_DS);
69744 err = clock_nanosleep_restart(restart);
69745 @@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
69746 oldfs = get_fs();
69747 set_fs(KERNEL_DS);
69748 err = sys_clock_nanosleep(which_clock, flags,
69749 - (struct timespec __user *) &in,
69750 - (struct timespec __user *) &out);
69751 + (struct timespec __force_user *) &in,
69752 + (struct timespec __force_user *) &out);
69753 set_fs(oldfs);
69754
69755 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
69756 diff --git a/kernel/configs.c b/kernel/configs.c
69757 index 42e8fa0..9e7406b 100644
69758 --- a/kernel/configs.c
69759 +++ b/kernel/configs.c
69760 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
69761 struct proc_dir_entry *entry;
69762
69763 /* create the current config file */
69764 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
69765 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
69766 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
69767 + &ikconfig_file_ops);
69768 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69769 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
69770 + &ikconfig_file_ops);
69771 +#endif
69772 +#else
69773 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
69774 &ikconfig_file_ops);
69775 +#endif
69776 +
69777 if (!entry)
69778 return -ENOMEM;
69779
69780 diff --git a/kernel/cred.c b/kernel/cred.c
69781 index e0573a4..3874e41 100644
69782 --- a/kernel/cred.c
69783 +++ b/kernel/cred.c
69784 @@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
69785 validate_creds(cred);
69786 alter_cred_subscribers(cred, -1);
69787 put_cred(cred);
69788 +
69789 +#ifdef CONFIG_GRKERNSEC_SETXID
69790 + cred = (struct cred *) tsk->delayed_cred;
69791 + if (cred != NULL) {
69792 + tsk->delayed_cred = NULL;
69793 + validate_creds(cred);
69794 + alter_cred_subscribers(cred, -1);
69795 + put_cred(cred);
69796 + }
69797 +#endif
69798 }
69799
69800 /**
69801 @@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
69802 * Always returns 0 thus allowing this function to be tail-called at the end
69803 * of, say, sys_setgid().
69804 */
69805 -int commit_creds(struct cred *new)
69806 +static int __commit_creds(struct cred *new)
69807 {
69808 struct task_struct *task = current;
69809 const struct cred *old = task->real_cred;
69810 @@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
69811
69812 get_cred(new); /* we will require a ref for the subj creds too */
69813
69814 + gr_set_role_label(task, new->uid, new->gid);
69815 +
69816 /* dumpability changes */
69817 if (!uid_eq(old->euid, new->euid) ||
69818 !gid_eq(old->egid, new->egid) ||
69819 @@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
69820 put_cred(old);
69821 return 0;
69822 }
69823 +#ifdef CONFIG_GRKERNSEC_SETXID
69824 +extern int set_user(struct cred *new);
69825 +
69826 +void gr_delayed_cred_worker(void)
69827 +{
69828 + const struct cred *new = current->delayed_cred;
69829 + struct cred *ncred;
69830 +
69831 + current->delayed_cred = NULL;
69832 +
69833 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
69834 + // from doing get_cred on it when queueing this
69835 + put_cred(new);
69836 + return;
69837 + } else if (new == NULL)
69838 + return;
69839 +
69840 + ncred = prepare_creds();
69841 + if (!ncred)
69842 + goto die;
69843 + // uids
69844 + ncred->uid = new->uid;
69845 + ncred->euid = new->euid;
69846 + ncred->suid = new->suid;
69847 + ncred->fsuid = new->fsuid;
69848 + // gids
69849 + ncred->gid = new->gid;
69850 + ncred->egid = new->egid;
69851 + ncred->sgid = new->sgid;
69852 + ncred->fsgid = new->fsgid;
69853 + // groups
69854 + if (set_groups(ncred, new->group_info) < 0) {
69855 + abort_creds(ncred);
69856 + goto die;
69857 + }
69858 + // caps
69859 + ncred->securebits = new->securebits;
69860 + ncred->cap_inheritable = new->cap_inheritable;
69861 + ncred->cap_permitted = new->cap_permitted;
69862 + ncred->cap_effective = new->cap_effective;
69863 + ncred->cap_bset = new->cap_bset;
69864 +
69865 + if (set_user(ncred)) {
69866 + abort_creds(ncred);
69867 + goto die;
69868 + }
69869 +
69870 + // from doing get_cred on it when queueing this
69871 + put_cred(new);
69872 +
69873 + __commit_creds(ncred);
69874 + return;
69875 +die:
69876 + // from doing get_cred on it when queueing this
69877 + put_cred(new);
69878 + do_group_exit(SIGKILL);
69879 +}
69880 +#endif
69881 +
69882 +int commit_creds(struct cred *new)
69883 +{
69884 +#ifdef CONFIG_GRKERNSEC_SETXID
69885 + int ret;
69886 + int schedule_it = 0;
69887 + struct task_struct *t;
69888 +
69889 + /* we won't get called with tasklist_lock held for writing
69890 + and interrupts disabled as the cred struct in that case is
69891 + init_cred
69892 + */
69893 + if (grsec_enable_setxid && !current_is_single_threaded() &&
69894 + uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
69895 + !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
69896 + schedule_it = 1;
69897 + }
69898 + ret = __commit_creds(new);
69899 + if (schedule_it) {
69900 + rcu_read_lock();
69901 + read_lock(&tasklist_lock);
69902 + for (t = next_thread(current); t != current;
69903 + t = next_thread(t)) {
69904 + if (t->delayed_cred == NULL) {
69905 + t->delayed_cred = get_cred(new);
69906 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
69907 + set_tsk_need_resched(t);
69908 + }
69909 + }
69910 + read_unlock(&tasklist_lock);
69911 + rcu_read_unlock();
69912 + }
69913 + return ret;
69914 +#else
69915 + return __commit_creds(new);
69916 +#endif
69917 +}
69918 +
69919 EXPORT_SYMBOL(commit_creds);
69920
69921 /**
69922 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
69923 index 9a61738..c5c8f3a 100644
69924 --- a/kernel/debug/debug_core.c
69925 +++ b/kernel/debug/debug_core.c
69926 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
69927 */
69928 static atomic_t masters_in_kgdb;
69929 static atomic_t slaves_in_kgdb;
69930 -static atomic_t kgdb_break_tasklet_var;
69931 +static atomic_unchecked_t kgdb_break_tasklet_var;
69932 atomic_t kgdb_setting_breakpoint;
69933
69934 struct task_struct *kgdb_usethread;
69935 @@ -132,7 +132,7 @@ int kgdb_single_step;
69936 static pid_t kgdb_sstep_pid;
69937
69938 /* to keep track of the CPU which is doing the single stepping*/
69939 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
69940 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
69941
69942 /*
69943 * If you are debugging a problem where roundup (the collection of
69944 @@ -540,7 +540,7 @@ return_normal:
69945 * kernel will only try for the value of sstep_tries before
69946 * giving up and continuing on.
69947 */
69948 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
69949 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
69950 (kgdb_info[cpu].task &&
69951 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
69952 atomic_set(&kgdb_active, -1);
69953 @@ -634,8 +634,8 @@ cpu_master_loop:
69954 }
69955
69956 kgdb_restore:
69957 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
69958 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
69959 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
69960 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
69961 if (kgdb_info[sstep_cpu].task)
69962 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
69963 else
69964 @@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
69965 static void kgdb_tasklet_bpt(unsigned long ing)
69966 {
69967 kgdb_breakpoint();
69968 - atomic_set(&kgdb_break_tasklet_var, 0);
69969 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
69970 }
69971
69972 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
69973
69974 void kgdb_schedule_breakpoint(void)
69975 {
69976 - if (atomic_read(&kgdb_break_tasklet_var) ||
69977 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
69978 atomic_read(&kgdb_active) != -1 ||
69979 atomic_read(&kgdb_setting_breakpoint))
69980 return;
69981 - atomic_inc(&kgdb_break_tasklet_var);
69982 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
69983 tasklet_schedule(&kgdb_tasklet_breakpoint);
69984 }
69985 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
69986 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
69987 index 8875254..7cf4928 100644
69988 --- a/kernel/debug/kdb/kdb_main.c
69989 +++ b/kernel/debug/kdb/kdb_main.c
69990 @@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
69991 continue;
69992
69993 kdb_printf("%-20s%8u 0x%p ", mod->name,
69994 - mod->core_size, (void *)mod);
69995 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
69996 #ifdef CONFIG_MODULE_UNLOAD
69997 kdb_printf("%4ld ", module_refcount(mod));
69998 #endif
69999 @@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
70000 kdb_printf(" (Loading)");
70001 else
70002 kdb_printf(" (Live)");
70003 - kdb_printf(" 0x%p", mod->module_core);
70004 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
70005
70006 #ifdef CONFIG_MODULE_UNLOAD
70007 {
70008 diff --git a/kernel/events/core.c b/kernel/events/core.c
70009 index 7b6646a..3cb1135 100644
70010 --- a/kernel/events/core.c
70011 +++ b/kernel/events/core.c
70012 @@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
70013 return 0;
70014 }
70015
70016 -static atomic64_t perf_event_id;
70017 +static atomic64_unchecked_t perf_event_id;
70018
70019 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
70020 enum event_type_t event_type);
70021 @@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
70022
70023 static inline u64 perf_event_count(struct perf_event *event)
70024 {
70025 - return local64_read(&event->count) + atomic64_read(&event->child_count);
70026 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
70027 }
70028
70029 static u64 perf_event_read(struct perf_event *event)
70030 @@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
70031 mutex_lock(&event->child_mutex);
70032 total += perf_event_read(event);
70033 *enabled += event->total_time_enabled +
70034 - atomic64_read(&event->child_total_time_enabled);
70035 + atomic64_read_unchecked(&event->child_total_time_enabled);
70036 *running += event->total_time_running +
70037 - atomic64_read(&event->child_total_time_running);
70038 + atomic64_read_unchecked(&event->child_total_time_running);
70039
70040 list_for_each_entry(child, &event->child_list, child_list) {
70041 total += perf_event_read(child);
70042 @@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
70043 userpg->offset -= local64_read(&event->hw.prev_count);
70044
70045 userpg->time_enabled = enabled +
70046 - atomic64_read(&event->child_total_time_enabled);
70047 + atomic64_read_unchecked(&event->child_total_time_enabled);
70048
70049 userpg->time_running = running +
70050 - atomic64_read(&event->child_total_time_running);
70051 + atomic64_read_unchecked(&event->child_total_time_running);
70052
70053 arch_perf_update_userpage(userpg, now);
70054
70055 @@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
70056 values[n++] = perf_event_count(event);
70057 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
70058 values[n++] = enabled +
70059 - atomic64_read(&event->child_total_time_enabled);
70060 + atomic64_read_unchecked(&event->child_total_time_enabled);
70061 }
70062 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
70063 values[n++] = running +
70064 - atomic64_read(&event->child_total_time_running);
70065 + atomic64_read_unchecked(&event->child_total_time_running);
70066 }
70067 if (read_format & PERF_FORMAT_ID)
70068 values[n++] = primary_event_id(event);
70069 @@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
70070 * need to add enough zero bytes after the string to handle
70071 * the 64bit alignment we do later.
70072 */
70073 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
70074 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
70075 if (!buf) {
70076 name = strncpy(tmp, "//enomem", sizeof(tmp));
70077 goto got_name;
70078 }
70079 - name = d_path(&file->f_path, buf, PATH_MAX);
70080 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
70081 if (IS_ERR(name)) {
70082 name = strncpy(tmp, "//toolong", sizeof(tmp));
70083 goto got_name;
70084 @@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
70085 event->parent = parent_event;
70086
70087 event->ns = get_pid_ns(task_active_pid_ns(current));
70088 - event->id = atomic64_inc_return(&perf_event_id);
70089 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
70090
70091 event->state = PERF_EVENT_STATE_INACTIVE;
70092
70093 @@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
70094 /*
70095 * Add back the child's count to the parent's count:
70096 */
70097 - atomic64_add(child_val, &parent_event->child_count);
70098 - atomic64_add(child_event->total_time_enabled,
70099 + atomic64_add_unchecked(child_val, &parent_event->child_count);
70100 + atomic64_add_unchecked(child_event->total_time_enabled,
70101 &parent_event->child_total_time_enabled);
70102 - atomic64_add(child_event->total_time_running,
70103 + atomic64_add_unchecked(child_event->total_time_running,
70104 &parent_event->child_total_time_running);
70105
70106 /*
70107 diff --git a/kernel/exit.c b/kernel/exit.c
70108 index b4df219..f13c02d 100644
70109 --- a/kernel/exit.c
70110 +++ b/kernel/exit.c
70111 @@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
70112 struct task_struct *leader;
70113 int zap_leader;
70114 repeat:
70115 +#ifdef CONFIG_NET
70116 + gr_del_task_from_ip_table(p);
70117 +#endif
70118 +
70119 /* don't need to get the RCU readlock here - the process is dead and
70120 * can't be modifying its own credentials. But shut RCU-lockdep up */
70121 rcu_read_lock();
70122 @@ -338,7 +342,7 @@ int allow_signal(int sig)
70123 * know it'll be handled, so that they don't get converted to
70124 * SIGKILL or just silently dropped.
70125 */
70126 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
70127 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
70128 recalc_sigpending();
70129 spin_unlock_irq(&current->sighand->siglock);
70130 return 0;
70131 @@ -708,6 +712,8 @@ void do_exit(long code)
70132 struct task_struct *tsk = current;
70133 int group_dead;
70134
70135 + set_fs(USER_DS);
70136 +
70137 profile_task_exit(tsk);
70138
70139 WARN_ON(blk_needs_flush_plug(tsk));
70140 @@ -724,7 +730,6 @@ void do_exit(long code)
70141 * mm_release()->clear_child_tid() from writing to a user-controlled
70142 * kernel address.
70143 */
70144 - set_fs(USER_DS);
70145
70146 ptrace_event(PTRACE_EVENT_EXIT, code);
70147
70148 @@ -783,6 +788,9 @@ void do_exit(long code)
70149 tsk->exit_code = code;
70150 taskstats_exit(tsk, group_dead);
70151
70152 + gr_acl_handle_psacct(tsk, code);
70153 + gr_acl_handle_exit();
70154 +
70155 exit_mm(tsk);
70156
70157 if (group_dead)
70158 @@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
70159 * Take down every thread in the group. This is called by fatal signals
70160 * as well as by sys_exit_group (below).
70161 */
70162 -void
70163 +__noreturn void
70164 do_group_exit(int exit_code)
70165 {
70166 struct signal_struct *sig = current->signal;
70167 diff --git a/kernel/fork.c b/kernel/fork.c
70168 index c535f33..1d768f9 100644
70169 --- a/kernel/fork.c
70170 +++ b/kernel/fork.c
70171 @@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
70172 *stackend = STACK_END_MAGIC; /* for overflow detection */
70173
70174 #ifdef CONFIG_CC_STACKPROTECTOR
70175 - tsk->stack_canary = get_random_int();
70176 + tsk->stack_canary = pax_get_random_long();
70177 #endif
70178
70179 /*
70180 @@ -344,13 +344,81 @@ free_tsk:
70181 }
70182
70183 #ifdef CONFIG_MMU
70184 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
70185 +{
70186 + struct vm_area_struct *tmp;
70187 + unsigned long charge;
70188 + struct mempolicy *pol;
70189 + struct file *file;
70190 +
70191 + charge = 0;
70192 + if (mpnt->vm_flags & VM_ACCOUNT) {
70193 + unsigned long len = vma_pages(mpnt);
70194 +
70195 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
70196 + goto fail_nomem;
70197 + charge = len;
70198 + }
70199 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70200 + if (!tmp)
70201 + goto fail_nomem;
70202 + *tmp = *mpnt;
70203 + tmp->vm_mm = mm;
70204 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
70205 + pol = mpol_dup(vma_policy(mpnt));
70206 + if (IS_ERR(pol))
70207 + goto fail_nomem_policy;
70208 + vma_set_policy(tmp, pol);
70209 + if (anon_vma_fork(tmp, mpnt))
70210 + goto fail_nomem_anon_vma_fork;
70211 + tmp->vm_flags &= ~VM_LOCKED;
70212 + tmp->vm_next = tmp->vm_prev = NULL;
70213 + tmp->vm_mirror = NULL;
70214 + file = tmp->vm_file;
70215 + if (file) {
70216 + struct inode *inode = file->f_path.dentry->d_inode;
70217 + struct address_space *mapping = file->f_mapping;
70218 +
70219 + get_file(file);
70220 + if (tmp->vm_flags & VM_DENYWRITE)
70221 + atomic_dec(&inode->i_writecount);
70222 + mutex_lock(&mapping->i_mmap_mutex);
70223 + if (tmp->vm_flags & VM_SHARED)
70224 + mapping->i_mmap_writable++;
70225 + flush_dcache_mmap_lock(mapping);
70226 + /* insert tmp into the share list, just after mpnt */
70227 + if (unlikely(tmp->vm_flags & VM_NONLINEAR))
70228 + vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
70229 + else
70230 + vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
70231 + flush_dcache_mmap_unlock(mapping);
70232 + mutex_unlock(&mapping->i_mmap_mutex);
70233 + }
70234 +
70235 + /*
70236 + * Clear hugetlb-related page reserves for children. This only
70237 + * affects MAP_PRIVATE mappings. Faults generated by the child
70238 + * are not guaranteed to succeed, even if read-only
70239 + */
70240 + if (is_vm_hugetlb_page(tmp))
70241 + reset_vma_resv_huge_pages(tmp);
70242 +
70243 + return tmp;
70244 +
70245 +fail_nomem_anon_vma_fork:
70246 + mpol_put(pol);
70247 +fail_nomem_policy:
70248 + kmem_cache_free(vm_area_cachep, tmp);
70249 +fail_nomem:
70250 + vm_unacct_memory(charge);
70251 + return NULL;
70252 +}
70253 +
70254 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70255 {
70256 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
70257 struct rb_node **rb_link, *rb_parent;
70258 int retval;
70259 - unsigned long charge;
70260 - struct mempolicy *pol;
70261
70262 uprobe_start_dup_mmap();
70263 down_write(&oldmm->mmap_sem);
70264 @@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70265 mm->locked_vm = 0;
70266 mm->mmap = NULL;
70267 mm->mmap_cache = NULL;
70268 - mm->free_area_cache = oldmm->mmap_base;
70269 - mm->cached_hole_size = ~0UL;
70270 + mm->free_area_cache = oldmm->free_area_cache;
70271 + mm->cached_hole_size = oldmm->cached_hole_size;
70272 mm->map_count = 0;
70273 cpumask_clear(mm_cpumask(mm));
70274 mm->mm_rb = RB_ROOT;
70275 @@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70276
70277 prev = NULL;
70278 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
70279 - struct file *file;
70280 -
70281 if (mpnt->vm_flags & VM_DONTCOPY) {
70282 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
70283 -vma_pages(mpnt));
70284 continue;
70285 }
70286 - charge = 0;
70287 - if (mpnt->vm_flags & VM_ACCOUNT) {
70288 - unsigned long len = vma_pages(mpnt);
70289 -
70290 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
70291 - goto fail_nomem;
70292 - charge = len;
70293 - }
70294 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
70295 - if (!tmp)
70296 - goto fail_nomem;
70297 - *tmp = *mpnt;
70298 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
70299 - pol = mpol_dup(vma_policy(mpnt));
70300 - retval = PTR_ERR(pol);
70301 - if (IS_ERR(pol))
70302 - goto fail_nomem_policy;
70303 - vma_set_policy(tmp, pol);
70304 - tmp->vm_mm = mm;
70305 - if (anon_vma_fork(tmp, mpnt))
70306 - goto fail_nomem_anon_vma_fork;
70307 - tmp->vm_flags &= ~VM_LOCKED;
70308 - tmp->vm_next = tmp->vm_prev = NULL;
70309 - file = tmp->vm_file;
70310 - if (file) {
70311 - struct inode *inode = file->f_path.dentry->d_inode;
70312 - struct address_space *mapping = file->f_mapping;
70313 -
70314 - get_file(file);
70315 - if (tmp->vm_flags & VM_DENYWRITE)
70316 - atomic_dec(&inode->i_writecount);
70317 - mutex_lock(&mapping->i_mmap_mutex);
70318 - if (tmp->vm_flags & VM_SHARED)
70319 - mapping->i_mmap_writable++;
70320 - flush_dcache_mmap_lock(mapping);
70321 - /* insert tmp into the share list, just after mpnt */
70322 - if (unlikely(tmp->vm_flags & VM_NONLINEAR))
70323 - vma_nonlinear_insert(tmp,
70324 - &mapping->i_mmap_nonlinear);
70325 - else
70326 - vma_interval_tree_insert_after(tmp, mpnt,
70327 - &mapping->i_mmap);
70328 - flush_dcache_mmap_unlock(mapping);
70329 - mutex_unlock(&mapping->i_mmap_mutex);
70330 + tmp = dup_vma(mm, oldmm, mpnt);
70331 + if (!tmp) {
70332 + retval = -ENOMEM;
70333 + goto out;
70334 }
70335
70336 /*
70337 @@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70338 if (retval)
70339 goto out;
70340 }
70341 +
70342 +#ifdef CONFIG_PAX_SEGMEXEC
70343 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
70344 + struct vm_area_struct *mpnt_m;
70345 +
70346 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
70347 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
70348 +
70349 + if (!mpnt->vm_mirror)
70350 + continue;
70351 +
70352 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
70353 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
70354 + mpnt->vm_mirror = mpnt_m;
70355 + } else {
70356 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
70357 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
70358 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
70359 + mpnt->vm_mirror->vm_mirror = mpnt;
70360 + }
70361 + }
70362 + BUG_ON(mpnt_m);
70363 + }
70364 +#endif
70365 +
70366 /* a new mm has just been created */
70367 arch_dup_mmap(oldmm, mm);
70368 retval = 0;
70369 @@ -472,14 +523,6 @@ out:
70370 up_write(&oldmm->mmap_sem);
70371 uprobe_end_dup_mmap();
70372 return retval;
70373 -fail_nomem_anon_vma_fork:
70374 - mpol_put(pol);
70375 -fail_nomem_policy:
70376 - kmem_cache_free(vm_area_cachep, tmp);
70377 -fail_nomem:
70378 - retval = -ENOMEM;
70379 - vm_unacct_memory(charge);
70380 - goto out;
70381 }
70382
70383 static inline int mm_alloc_pgd(struct mm_struct *mm)
70384 @@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
70385 return ERR_PTR(err);
70386
70387 mm = get_task_mm(task);
70388 - if (mm && mm != current->mm &&
70389 - !ptrace_may_access(task, mode)) {
70390 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
70391 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
70392 mmput(mm);
70393 mm = ERR_PTR(-EACCES);
70394 }
70395 @@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
70396 spin_unlock(&fs->lock);
70397 return -EAGAIN;
70398 }
70399 - fs->users++;
70400 + atomic_inc(&fs->users);
70401 spin_unlock(&fs->lock);
70402 return 0;
70403 }
70404 tsk->fs = copy_fs_struct(fs);
70405 if (!tsk->fs)
70406 return -ENOMEM;
70407 + /* Carry through gr_chroot_dentry and is_chrooted instead
70408 + of recomputing it here. Already copied when the task struct
70409 + is duplicated. This allows pivot_root to not be treated as
70410 + a chroot
70411 + */
70412 + //gr_set_chroot_entries(tsk, &tsk->fs->root);
70413 +
70414 return 0;
70415 }
70416
70417 @@ -1193,6 +1243,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70418 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
70419 #endif
70420 retval = -EAGAIN;
70421 +
70422 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
70423 +
70424 if (atomic_read(&p->real_cred->user->processes) >=
70425 task_rlimit(p, RLIMIT_NPROC)) {
70426 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
70427 @@ -1432,6 +1485,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70428 goto bad_fork_free_pid;
70429 }
70430
70431 + /* synchronizes with gr_set_acls()
70432 + we need to call this past the point of no return for fork()
70433 + */
70434 + gr_copy_label(p);
70435 +
70436 if (clone_flags & CLONE_THREAD) {
70437 current->signal->nr_threads++;
70438 atomic_inc(&current->signal->live);
70439 @@ -1515,6 +1573,8 @@ bad_fork_cleanup_count:
70440 bad_fork_free:
70441 free_task(p);
70442 fork_out:
70443 + gr_log_forkfail(retval);
70444 +
70445 return ERR_PTR(retval);
70446 }
70447
70448 @@ -1565,6 +1625,23 @@ long do_fork(unsigned long clone_flags,
70449 return -EINVAL;
70450 }
70451
70452 +#ifdef CONFIG_GRKERNSEC
70453 + if (clone_flags & CLONE_NEWUSER) {
70454 + /*
70455 + * This doesn't really inspire confidence:
70456 + * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
70457 + * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
70458 + * Increases kernel attack surface in areas developers
70459 + * previously cared little about ("low importance due
70460 + * to requiring "root" capability")
70461 + * To be removed when this code receives *proper* review
70462 + */
70463 + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
70464 + !capable(CAP_SETGID))
70465 + return -EPERM;
70466 + }
70467 +#endif
70468 +
70469 /*
70470 * Determine whether and which event to report to ptracer. When
70471 * called from kernel_thread or CLONE_UNTRACED is explicitly
70472 @@ -1599,6 +1676,8 @@ long do_fork(unsigned long clone_flags,
70473 if (clone_flags & CLONE_PARENT_SETTID)
70474 put_user(nr, parent_tidptr);
70475
70476 + gr_handle_brute_check();
70477 +
70478 if (clone_flags & CLONE_VFORK) {
70479 p->vfork_done = &vfork;
70480 init_completion(&vfork);
70481 @@ -1752,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
70482 return 0;
70483
70484 /* don't need lock here; in the worst case we'll do useless copy */
70485 - if (fs->users == 1)
70486 + if (atomic_read(&fs->users) == 1)
70487 return 0;
70488
70489 *new_fsp = copy_fs_struct(fs);
70490 @@ -1866,7 +1945,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
70491 fs = current->fs;
70492 spin_lock(&fs->lock);
70493 current->fs = new_fs;
70494 - if (--fs->users)
70495 + gr_set_chroot_entries(current, &current->fs->root);
70496 + if (atomic_dec_return(&fs->users))
70497 new_fs = NULL;
70498 else
70499 new_fs = fs;
70500 diff --git a/kernel/futex.c b/kernel/futex.c
70501 index 8879430..31696f1 100644
70502 --- a/kernel/futex.c
70503 +++ b/kernel/futex.c
70504 @@ -54,6 +54,7 @@
70505 #include <linux/mount.h>
70506 #include <linux/pagemap.h>
70507 #include <linux/syscalls.h>
70508 +#include <linux/ptrace.h>
70509 #include <linux/signal.h>
70510 #include <linux/export.h>
70511 #include <linux/magic.h>
70512 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
70513 struct page *page, *page_head;
70514 int err, ro = 0;
70515
70516 +#ifdef CONFIG_PAX_SEGMEXEC
70517 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
70518 + return -EFAULT;
70519 +#endif
70520 +
70521 /*
70522 * The futex address must be "naturally" aligned.
70523 */
70524 @@ -2731,6 +2737,7 @@ static int __init futex_init(void)
70525 {
70526 u32 curval;
70527 int i;
70528 + mm_segment_t oldfs;
70529
70530 /*
70531 * This will fail and we want it. Some arch implementations do
70532 @@ -2742,8 +2749,11 @@ static int __init futex_init(void)
70533 * implementation, the non-functional ones will return
70534 * -ENOSYS.
70535 */
70536 + oldfs = get_fs();
70537 + set_fs(USER_DS);
70538 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
70539 futex_cmpxchg_enabled = 1;
70540 + set_fs(oldfs);
70541
70542 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
70543 plist_head_init(&futex_queues[i].chain);
70544 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
70545 index 9b22d03..6295b62 100644
70546 --- a/kernel/gcov/base.c
70547 +++ b/kernel/gcov/base.c
70548 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
70549 }
70550
70551 #ifdef CONFIG_MODULES
70552 -static inline int within(void *addr, void *start, unsigned long size)
70553 -{
70554 - return ((addr >= start) && (addr < start + size));
70555 -}
70556 -
70557 /* Update list and generate events when modules are unloaded. */
70558 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
70559 void *data)
70560 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
70561 prev = NULL;
70562 /* Remove entries located in module from linked list. */
70563 for (info = gcov_info_head; info; info = info->next) {
70564 - if (within(info, mod->module_core, mod->core_size)) {
70565 + if (within_module_core_rw((unsigned long)info, mod)) {
70566 if (prev)
70567 prev->next = info->next;
70568 else
70569 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
70570 index cdd5607..c3fc919 100644
70571 --- a/kernel/hrtimer.c
70572 +++ b/kernel/hrtimer.c
70573 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
70574 local_irq_restore(flags);
70575 }
70576
70577 -static void run_hrtimer_softirq(struct softirq_action *h)
70578 +static void run_hrtimer_softirq(void)
70579 {
70580 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
70581
70582 @@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
70583 return NOTIFY_OK;
70584 }
70585
70586 -static struct notifier_block __cpuinitdata hrtimers_nb = {
70587 +static struct notifier_block hrtimers_nb = {
70588 .notifier_call = hrtimer_cpu_notify,
70589 };
70590
70591 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
70592 index 60f48fa..7f3a770 100644
70593 --- a/kernel/jump_label.c
70594 +++ b/kernel/jump_label.c
70595 @@ -13,6 +13,7 @@
70596 #include <linux/sort.h>
70597 #include <linux/err.h>
70598 #include <linux/static_key.h>
70599 +#include <linux/mm.h>
70600
70601 #ifdef HAVE_JUMP_LABEL
70602
70603 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
70604
70605 size = (((unsigned long)stop - (unsigned long)start)
70606 / sizeof(struct jump_entry));
70607 + pax_open_kernel();
70608 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
70609 + pax_close_kernel();
70610 }
70611
70612 static void jump_label_update(struct static_key *key, int enable);
70613 @@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
70614 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
70615 struct jump_entry *iter;
70616
70617 + pax_open_kernel();
70618 for (iter = iter_start; iter < iter_stop; iter++) {
70619 if (within_module_init(iter->code, mod))
70620 iter->code = 0;
70621 }
70622 + pax_close_kernel();
70623 }
70624
70625 static int
70626 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
70627 index 2169fee..706ccca 100644
70628 --- a/kernel/kallsyms.c
70629 +++ b/kernel/kallsyms.c
70630 @@ -11,6 +11,9 @@
70631 * Changed the compression method from stem compression to "table lookup"
70632 * compression (see scripts/kallsyms.c for a more complete description)
70633 */
70634 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70635 +#define __INCLUDED_BY_HIDESYM 1
70636 +#endif
70637 #include <linux/kallsyms.h>
70638 #include <linux/module.h>
70639 #include <linux/init.h>
70640 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
70641
70642 static inline int is_kernel_inittext(unsigned long addr)
70643 {
70644 + if (system_state != SYSTEM_BOOTING)
70645 + return 0;
70646 +
70647 if (addr >= (unsigned long)_sinittext
70648 && addr <= (unsigned long)_einittext)
70649 return 1;
70650 return 0;
70651 }
70652
70653 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70654 +#ifdef CONFIG_MODULES
70655 +static inline int is_module_text(unsigned long addr)
70656 +{
70657 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
70658 + return 1;
70659 +
70660 + addr = ktla_ktva(addr);
70661 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
70662 +}
70663 +#else
70664 +static inline int is_module_text(unsigned long addr)
70665 +{
70666 + return 0;
70667 +}
70668 +#endif
70669 +#endif
70670 +
70671 static inline int is_kernel_text(unsigned long addr)
70672 {
70673 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
70674 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
70675
70676 static inline int is_kernel(unsigned long addr)
70677 {
70678 +
70679 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70680 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
70681 + return 1;
70682 +
70683 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
70684 +#else
70685 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
70686 +#endif
70687 +
70688 return 1;
70689 return in_gate_area_no_mm(addr);
70690 }
70691
70692 static int is_ksym_addr(unsigned long addr)
70693 {
70694 +
70695 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70696 + if (is_module_text(addr))
70697 + return 0;
70698 +#endif
70699 +
70700 if (all_var)
70701 return is_kernel(addr);
70702
70703 @@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
70704
70705 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
70706 {
70707 - iter->name[0] = '\0';
70708 iter->nameoff = get_symbol_offset(new_pos);
70709 iter->pos = new_pos;
70710 }
70711 @@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
70712 {
70713 struct kallsym_iter *iter = m->private;
70714
70715 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70716 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
70717 + return 0;
70718 +#endif
70719 +
70720 /* Some debugging symbols have no name. Ignore them. */
70721 if (!iter->name[0])
70722 return 0;
70723 @@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
70724 */
70725 type = iter->exported ? toupper(iter->type) :
70726 tolower(iter->type);
70727 +
70728 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
70729 type, iter->name, iter->module_name);
70730 } else
70731 @@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
70732 struct kallsym_iter *iter;
70733 int ret;
70734
70735 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
70736 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
70737 if (!iter)
70738 return -ENOMEM;
70739 reset_iter(iter, 0);
70740 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
70741 index e30ac0f..3528cac 100644
70742 --- a/kernel/kcmp.c
70743 +++ b/kernel/kcmp.c
70744 @@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
70745 struct task_struct *task1, *task2;
70746 int ret;
70747
70748 +#ifdef CONFIG_GRKERNSEC
70749 + return -ENOSYS;
70750 +#endif
70751 +
70752 rcu_read_lock();
70753
70754 /*
70755 diff --git a/kernel/kexec.c b/kernel/kexec.c
70756 index 5e4bd78..00c5b91 100644
70757 --- a/kernel/kexec.c
70758 +++ b/kernel/kexec.c
70759 @@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
70760 unsigned long flags)
70761 {
70762 struct compat_kexec_segment in;
70763 - struct kexec_segment out, __user *ksegments;
70764 + struct kexec_segment out;
70765 + struct kexec_segment __user *ksegments;
70766 unsigned long i, result;
70767
70768 /* Don't allow clients that don't understand the native
70769 diff --git a/kernel/kmod.c b/kernel/kmod.c
70770 index 0023a87..b893e79 100644
70771 --- a/kernel/kmod.c
70772 +++ b/kernel/kmod.c
70773 @@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
70774 kfree(info->argv);
70775 }
70776
70777 -static int call_modprobe(char *module_name, int wait)
70778 +static int call_modprobe(char *module_name, char *module_param, int wait)
70779 {
70780 static char *envp[] = {
70781 "HOME=/",
70782 @@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
70783 NULL
70784 };
70785
70786 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
70787 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
70788 if (!argv)
70789 goto out;
70790
70791 @@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
70792 argv[1] = "-q";
70793 argv[2] = "--";
70794 argv[3] = module_name; /* check free_modprobe_argv() */
70795 - argv[4] = NULL;
70796 + argv[4] = module_param;
70797 + argv[5] = NULL;
70798
70799 return call_usermodehelper_fns(modprobe_path, argv, envp,
70800 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
70801 @@ -120,9 +121,8 @@ out:
70802 * If module auto-loading support is disabled then this function
70803 * becomes a no-operation.
70804 */
70805 -int __request_module(bool wait, const char *fmt, ...)
70806 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
70807 {
70808 - va_list args;
70809 char module_name[MODULE_NAME_LEN];
70810 unsigned int max_modprobes;
70811 int ret;
70812 @@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
70813 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
70814 static int kmod_loop_msg;
70815
70816 - va_start(args, fmt);
70817 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
70818 - va_end(args);
70819 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
70820 if (ret >= MODULE_NAME_LEN)
70821 return -ENAMETOOLONG;
70822
70823 @@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
70824 if (ret)
70825 return ret;
70826
70827 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
70828 + if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
70829 + /* hack to workaround consolekit/udisks stupidity */
70830 + read_lock(&tasklist_lock);
70831 + if (!strcmp(current->comm, "mount") &&
70832 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
70833 + read_unlock(&tasklist_lock);
70834 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
70835 + return -EPERM;
70836 + }
70837 + read_unlock(&tasklist_lock);
70838 + }
70839 +#endif
70840 +
70841 /* If modprobe needs a service that is in a module, we get a recursive
70842 * loop. Limit the number of running kmod threads to max_threads/2 or
70843 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
70844 @@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
70845
70846 trace_module_request(module_name, wait, _RET_IP_);
70847
70848 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
70849 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
70850
70851 atomic_dec(&kmod_concurrent);
70852 return ret;
70853 }
70854 +
70855 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
70856 +{
70857 + va_list args;
70858 + int ret;
70859 +
70860 + va_start(args, fmt);
70861 + ret = ____request_module(wait, module_param, fmt, args);
70862 + va_end(args);
70863 +
70864 + return ret;
70865 +}
70866 +
70867 +int __request_module(bool wait, const char *fmt, ...)
70868 +{
70869 + va_list args;
70870 + int ret;
70871 +
70872 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
70873 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
70874 + char module_param[MODULE_NAME_LEN];
70875 +
70876 + memset(module_param, 0, sizeof(module_param));
70877 +
70878 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
70879 +
70880 + va_start(args, fmt);
70881 + ret = ____request_module(wait, module_param, fmt, args);
70882 + va_end(args);
70883 +
70884 + return ret;
70885 + }
70886 +#endif
70887 +
70888 + va_start(args, fmt);
70889 + ret = ____request_module(wait, NULL, fmt, args);
70890 + va_end(args);
70891 +
70892 + return ret;
70893 +}
70894 +
70895 EXPORT_SYMBOL(__request_module);
70896 #endif /* CONFIG_MODULES */
70897
70898 @@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
70899 *
70900 * Thus the __user pointer cast is valid here.
70901 */
70902 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
70903 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
70904
70905 /*
70906 * If ret is 0, either ____call_usermodehelper failed and the
70907 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
70908 index 098f396..fe85ff1 100644
70909 --- a/kernel/kprobes.c
70910 +++ b/kernel/kprobes.c
70911 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
70912 * kernel image and loaded module images reside. This is required
70913 * so x86_64 can correctly handle the %rip-relative fixups.
70914 */
70915 - kip->insns = module_alloc(PAGE_SIZE);
70916 + kip->insns = module_alloc_exec(PAGE_SIZE);
70917 if (!kip->insns) {
70918 kfree(kip);
70919 return NULL;
70920 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
70921 */
70922 if (!list_is_singular(&kip->list)) {
70923 list_del(&kip->list);
70924 - module_free(NULL, kip->insns);
70925 + module_free_exec(NULL, kip->insns);
70926 kfree(kip);
70927 }
70928 return 1;
70929 @@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
70930 {
70931 int i, err = 0;
70932 unsigned long offset = 0, size = 0;
70933 - char *modname, namebuf[128];
70934 + char *modname, namebuf[KSYM_NAME_LEN];
70935 const char *symbol_name;
70936 void *addr;
70937 struct kprobe_blackpoint *kb;
70938 @@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
70939 kprobe_type = "k";
70940
70941 if (sym)
70942 - seq_printf(pi, "%p %s %s+0x%x %s ",
70943 + seq_printf(pi, "%pK %s %s+0x%x %s ",
70944 p->addr, kprobe_type, sym, offset,
70945 (modname ? modname : " "));
70946 else
70947 - seq_printf(pi, "%p %s %p ",
70948 + seq_printf(pi, "%pK %s %pK ",
70949 p->addr, kprobe_type, p->addr);
70950
70951 if (!pp)
70952 @@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
70953 const char *sym = NULL;
70954 unsigned int i = *(loff_t *) v;
70955 unsigned long offset = 0;
70956 - char *modname, namebuf[128];
70957 + char *modname, namebuf[KSYM_NAME_LEN];
70958
70959 head = &kprobe_table[i];
70960 preempt_disable();
70961 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
70962 index 6ada93c..55baf4d 100644
70963 --- a/kernel/ksysfs.c
70964 +++ b/kernel/ksysfs.c
70965 @@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
70966 {
70967 if (count+1 > UEVENT_HELPER_PATH_LEN)
70968 return -ENOENT;
70969 + if (!capable(CAP_SYS_ADMIN))
70970 + return -EPERM;
70971 memcpy(uevent_helper, buf, count);
70972 uevent_helper[count] = '\0';
70973 if (count && uevent_helper[count-1] == '\n')
70974 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
70975 index 7981e5b..7f2105c 100644
70976 --- a/kernel/lockdep.c
70977 +++ b/kernel/lockdep.c
70978 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
70979 end = (unsigned long) &_end,
70980 addr = (unsigned long) obj;
70981
70982 +#ifdef CONFIG_PAX_KERNEXEC
70983 + start = ktla_ktva(start);
70984 +#endif
70985 +
70986 /*
70987 * static variable?
70988 */
70989 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
70990 if (!static_obj(lock->key)) {
70991 debug_locks_off();
70992 printk("INFO: trying to register non-static key.\n");
70993 + printk("lock:%pS key:%pS.\n", lock, lock->key);
70994 printk("the code is fine but needs lockdep annotation.\n");
70995 printk("turning off the locking correctness validator.\n");
70996 dump_stack();
70997 @@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
70998 if (!class)
70999 return 0;
71000 }
71001 - atomic_inc((atomic_t *)&class->ops);
71002 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
71003 if (very_verbose(class)) {
71004 printk("\nacquire class [%p] %s", class->key, class->name);
71005 if (class->name_version > 1)
71006 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
71007 index b2c71c5..7b88d63 100644
71008 --- a/kernel/lockdep_proc.c
71009 +++ b/kernel/lockdep_proc.c
71010 @@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
71011 return 0;
71012 }
71013
71014 - seq_printf(m, "%p", class->key);
71015 + seq_printf(m, "%pK", class->key);
71016 #ifdef CONFIG_DEBUG_LOCKDEP
71017 seq_printf(m, " OPS:%8ld", class->ops);
71018 #endif
71019 @@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
71020
71021 list_for_each_entry(entry, &class->locks_after, entry) {
71022 if (entry->distance == 1) {
71023 - seq_printf(m, " -> [%p] ", entry->class->key);
71024 + seq_printf(m, " -> [%pK] ", entry->class->key);
71025 print_name(m, entry->class);
71026 seq_puts(m, "\n");
71027 }
71028 @@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
71029 if (!class->key)
71030 continue;
71031
71032 - seq_printf(m, "[%p] ", class->key);
71033 + seq_printf(m, "[%pK] ", class->key);
71034 print_name(m, class);
71035 seq_puts(m, "\n");
71036 }
71037 @@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
71038 if (!i)
71039 seq_line(m, '-', 40-namelen, namelen);
71040
71041 - snprintf(ip, sizeof(ip), "[<%p>]",
71042 + snprintf(ip, sizeof(ip), "[<%pK>]",
71043 (void *)class->contention_point[i]);
71044 seq_printf(m, "%40s %14lu %29s %pS\n",
71045 name, stats->contention_point[i],
71046 @@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
71047 if (!i)
71048 seq_line(m, '-', 40-namelen, namelen);
71049
71050 - snprintf(ip, sizeof(ip), "[<%p>]",
71051 + snprintf(ip, sizeof(ip), "[<%pK>]",
71052 (void *)class->contending_point[i]);
71053 seq_printf(m, "%40s %14lu %29s %pS\n",
71054 name, stats->contending_point[i],
71055 diff --git a/kernel/module.c b/kernel/module.c
71056 index eab0827..75ede66 100644
71057 --- a/kernel/module.c
71058 +++ b/kernel/module.c
71059 @@ -61,6 +61,7 @@
71060 #include <linux/pfn.h>
71061 #include <linux/bsearch.h>
71062 #include <linux/fips.h>
71063 +#include <linux/grsecurity.h>
71064 #include <uapi/linux/module.h>
71065 #include "module-internal.h"
71066
71067 @@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
71068
71069 /* Bounds of module allocation, for speeding __module_address.
71070 * Protected by module_mutex. */
71071 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
71072 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
71073 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
71074
71075 int register_module_notifier(struct notifier_block * nb)
71076 {
71077 @@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
71078 return true;
71079
71080 list_for_each_entry_rcu(mod, &modules, list) {
71081 - struct symsearch arr[] = {
71082 + struct symsearch modarr[] = {
71083 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
71084 NOT_GPL_ONLY, false },
71085 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
71086 @@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
71087 if (mod->state == MODULE_STATE_UNFORMED)
71088 continue;
71089
71090 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
71091 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
71092 return true;
71093 }
71094 return false;
71095 @@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
71096 static int percpu_modalloc(struct module *mod,
71097 unsigned long size, unsigned long align)
71098 {
71099 - if (align > PAGE_SIZE) {
71100 + if (align-1 >= PAGE_SIZE) {
71101 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
71102 mod->name, align, PAGE_SIZE);
71103 align = PAGE_SIZE;
71104 @@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
71105 static ssize_t show_coresize(struct module_attribute *mattr,
71106 struct module_kobject *mk, char *buffer)
71107 {
71108 - return sprintf(buffer, "%u\n", mk->mod->core_size);
71109 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
71110 }
71111
71112 static struct module_attribute modinfo_coresize =
71113 @@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
71114 static ssize_t show_initsize(struct module_attribute *mattr,
71115 struct module_kobject *mk, char *buffer)
71116 {
71117 - return sprintf(buffer, "%u\n", mk->mod->init_size);
71118 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
71119 }
71120
71121 static struct module_attribute modinfo_initsize =
71122 @@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
71123 */
71124 #ifdef CONFIG_SYSFS
71125
71126 -#ifdef CONFIG_KALLSYMS
71127 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71128 static inline bool sect_empty(const Elf_Shdr *sect)
71129 {
71130 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
71131 @@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
71132
71133 static void unset_module_core_ro_nx(struct module *mod)
71134 {
71135 - set_page_attributes(mod->module_core + mod->core_text_size,
71136 - mod->module_core + mod->core_size,
71137 + set_page_attributes(mod->module_core_rw,
71138 + mod->module_core_rw + mod->core_size_rw,
71139 set_memory_x);
71140 - set_page_attributes(mod->module_core,
71141 - mod->module_core + mod->core_ro_size,
71142 + set_page_attributes(mod->module_core_rx,
71143 + mod->module_core_rx + mod->core_size_rx,
71144 set_memory_rw);
71145 }
71146
71147 static void unset_module_init_ro_nx(struct module *mod)
71148 {
71149 - set_page_attributes(mod->module_init + mod->init_text_size,
71150 - mod->module_init + mod->init_size,
71151 + set_page_attributes(mod->module_init_rw,
71152 + mod->module_init_rw + mod->init_size_rw,
71153 set_memory_x);
71154 - set_page_attributes(mod->module_init,
71155 - mod->module_init + mod->init_ro_size,
71156 + set_page_attributes(mod->module_init_rx,
71157 + mod->module_init_rx + mod->init_size_rx,
71158 set_memory_rw);
71159 }
71160
71161 @@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
71162 list_for_each_entry_rcu(mod, &modules, list) {
71163 if (mod->state == MODULE_STATE_UNFORMED)
71164 continue;
71165 - if ((mod->module_core) && (mod->core_text_size)) {
71166 - set_page_attributes(mod->module_core,
71167 - mod->module_core + mod->core_text_size,
71168 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
71169 + set_page_attributes(mod->module_core_rx,
71170 + mod->module_core_rx + mod->core_size_rx,
71171 set_memory_rw);
71172 }
71173 - if ((mod->module_init) && (mod->init_text_size)) {
71174 - set_page_attributes(mod->module_init,
71175 - mod->module_init + mod->init_text_size,
71176 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
71177 + set_page_attributes(mod->module_init_rx,
71178 + mod->module_init_rx + mod->init_size_rx,
71179 set_memory_rw);
71180 }
71181 }
71182 @@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
71183 list_for_each_entry_rcu(mod, &modules, list) {
71184 if (mod->state == MODULE_STATE_UNFORMED)
71185 continue;
71186 - if ((mod->module_core) && (mod->core_text_size)) {
71187 - set_page_attributes(mod->module_core,
71188 - mod->module_core + mod->core_text_size,
71189 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
71190 + set_page_attributes(mod->module_core_rx,
71191 + mod->module_core_rx + mod->core_size_rx,
71192 set_memory_ro);
71193 }
71194 - if ((mod->module_init) && (mod->init_text_size)) {
71195 - set_page_attributes(mod->module_init,
71196 - mod->module_init + mod->init_text_size,
71197 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
71198 + set_page_attributes(mod->module_init_rx,
71199 + mod->module_init_rx + mod->init_size_rx,
71200 set_memory_ro);
71201 }
71202 }
71203 @@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
71204
71205 /* This may be NULL, but that's OK */
71206 unset_module_init_ro_nx(mod);
71207 - module_free(mod, mod->module_init);
71208 + module_free(mod, mod->module_init_rw);
71209 + module_free_exec(mod, mod->module_init_rx);
71210 kfree(mod->args);
71211 percpu_modfree(mod);
71212
71213 /* Free lock-classes: */
71214 - lockdep_free_key_range(mod->module_core, mod->core_size);
71215 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
71216 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
71217
71218 /* Finally, free the core (containing the module structure) */
71219 unset_module_core_ro_nx(mod);
71220 - module_free(mod, mod->module_core);
71221 + module_free_exec(mod, mod->module_core_rx);
71222 + module_free(mod, mod->module_core_rw);
71223
71224 #ifdef CONFIG_MPU
71225 update_protections(current->mm);
71226 @@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
71227 int ret = 0;
71228 const struct kernel_symbol *ksym;
71229
71230 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71231 + int is_fs_load = 0;
71232 + int register_filesystem_found = 0;
71233 + char *p;
71234 +
71235 + p = strstr(mod->args, "grsec_modharden_fs");
71236 + if (p) {
71237 + char *endptr = p + sizeof("grsec_modharden_fs") - 1;
71238 + /* copy \0 as well */
71239 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71240 + is_fs_load = 1;
71241 + }
71242 +#endif
71243 +
71244 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
71245 const char *name = info->strtab + sym[i].st_name;
71246
71247 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71248 + /* it's a real shame this will never get ripped and copied
71249 + upstream! ;(
71250 + */
71251 + if (is_fs_load && !strcmp(name, "register_filesystem"))
71252 + register_filesystem_found = 1;
71253 +#endif
71254 +
71255 switch (sym[i].st_shndx) {
71256 case SHN_COMMON:
71257 /* We compiled with -fno-common. These are not
71258 @@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
71259 ksym = resolve_symbol_wait(mod, info, name);
71260 /* Ok if resolved. */
71261 if (ksym && !IS_ERR(ksym)) {
71262 + pax_open_kernel();
71263 sym[i].st_value = ksym->value;
71264 + pax_close_kernel();
71265 break;
71266 }
71267
71268 @@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
71269 secbase = (unsigned long)mod_percpu(mod);
71270 else
71271 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
71272 + pax_open_kernel();
71273 sym[i].st_value += secbase;
71274 + pax_close_kernel();
71275 break;
71276 }
71277 }
71278
71279 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71280 + if (is_fs_load && !register_filesystem_found) {
71281 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
71282 + ret = -EPERM;
71283 + }
71284 +#endif
71285 +
71286 return ret;
71287 }
71288
71289 @@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
71290 || s->sh_entsize != ~0UL
71291 || strstarts(sname, ".init"))
71292 continue;
71293 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
71294 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71295 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
71296 + else
71297 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
71298 pr_debug("\t%s\n", sname);
71299 }
71300 - switch (m) {
71301 - case 0: /* executable */
71302 - mod->core_size = debug_align(mod->core_size);
71303 - mod->core_text_size = mod->core_size;
71304 - break;
71305 - case 1: /* RO: text and ro-data */
71306 - mod->core_size = debug_align(mod->core_size);
71307 - mod->core_ro_size = mod->core_size;
71308 - break;
71309 - case 3: /* whole core */
71310 - mod->core_size = debug_align(mod->core_size);
71311 - break;
71312 - }
71313 }
71314
71315 pr_debug("Init section allocation order:\n");
71316 @@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
71317 || s->sh_entsize != ~0UL
71318 || !strstarts(sname, ".init"))
71319 continue;
71320 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
71321 - | INIT_OFFSET_MASK);
71322 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71323 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
71324 + else
71325 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
71326 + s->sh_entsize |= INIT_OFFSET_MASK;
71327 pr_debug("\t%s\n", sname);
71328 }
71329 - switch (m) {
71330 - case 0: /* executable */
71331 - mod->init_size = debug_align(mod->init_size);
71332 - mod->init_text_size = mod->init_size;
71333 - break;
71334 - case 1: /* RO: text and ro-data */
71335 - mod->init_size = debug_align(mod->init_size);
71336 - mod->init_ro_size = mod->init_size;
71337 - break;
71338 - case 3: /* whole init */
71339 - mod->init_size = debug_align(mod->init_size);
71340 - break;
71341 - }
71342 }
71343 }
71344
71345 @@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
71346
71347 /* Put symbol section at end of init part of module. */
71348 symsect->sh_flags |= SHF_ALLOC;
71349 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
71350 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
71351 info->index.sym) | INIT_OFFSET_MASK;
71352 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
71353
71354 @@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
71355 }
71356
71357 /* Append room for core symbols at end of core part. */
71358 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
71359 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
71360 - mod->core_size += strtab_size;
71361 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
71362 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
71363 + mod->core_size_rx += strtab_size;
71364
71365 /* Put string table section at end of init part of module. */
71366 strsect->sh_flags |= SHF_ALLOC;
71367 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
71368 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
71369 info->index.str) | INIT_OFFSET_MASK;
71370 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
71371 }
71372 @@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
71373 /* Make sure we get permanent strtab: don't use info->strtab. */
71374 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
71375
71376 + pax_open_kernel();
71377 +
71378 /* Set types up while we still have access to sections. */
71379 for (i = 0; i < mod->num_symtab; i++)
71380 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
71381
71382 - mod->core_symtab = dst = mod->module_core + info->symoffs;
71383 - mod->core_strtab = s = mod->module_core + info->stroffs;
71384 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
71385 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
71386 src = mod->symtab;
71387 for (ndst = i = 0; i < mod->num_symtab; i++) {
71388 if (i == 0 ||
71389 @@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
71390 }
71391 }
71392 mod->core_num_syms = ndst;
71393 +
71394 + pax_close_kernel();
71395 }
71396 #else
71397 static inline void layout_symtab(struct module *mod, struct load_info *info)
71398 @@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
71399 return vmalloc_exec(size);
71400 }
71401
71402 -static void *module_alloc_update_bounds(unsigned long size)
71403 +static void *module_alloc_update_bounds_rw(unsigned long size)
71404 {
71405 void *ret = module_alloc(size);
71406
71407 if (ret) {
71408 mutex_lock(&module_mutex);
71409 /* Update module bounds. */
71410 - if ((unsigned long)ret < module_addr_min)
71411 - module_addr_min = (unsigned long)ret;
71412 - if ((unsigned long)ret + size > module_addr_max)
71413 - module_addr_max = (unsigned long)ret + size;
71414 + if ((unsigned long)ret < module_addr_min_rw)
71415 + module_addr_min_rw = (unsigned long)ret;
71416 + if ((unsigned long)ret + size > module_addr_max_rw)
71417 + module_addr_max_rw = (unsigned long)ret + size;
71418 + mutex_unlock(&module_mutex);
71419 + }
71420 + return ret;
71421 +}
71422 +
71423 +static void *module_alloc_update_bounds_rx(unsigned long size)
71424 +{
71425 + void *ret = module_alloc_exec(size);
71426 +
71427 + if (ret) {
71428 + mutex_lock(&module_mutex);
71429 + /* Update module bounds. */
71430 + if ((unsigned long)ret < module_addr_min_rx)
71431 + module_addr_min_rx = (unsigned long)ret;
71432 + if ((unsigned long)ret + size > module_addr_max_rx)
71433 + module_addr_max_rx = (unsigned long)ret + size;
71434 mutex_unlock(&module_mutex);
71435 }
71436 return ret;
71437 @@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
71438 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
71439 {
71440 const char *modmagic = get_modinfo(info, "vermagic");
71441 + const char *license = get_modinfo(info, "license");
71442 int err;
71443
71444 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
71445 + if (!license || !license_is_gpl_compatible(license))
71446 + return -ENOEXEC;
71447 +#endif
71448 +
71449 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
71450 modmagic = NULL;
71451
71452 @@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
71453 }
71454
71455 /* Set up license info based on the info section */
71456 - set_license(mod, get_modinfo(info, "license"));
71457 + set_license(mod, license);
71458
71459 return 0;
71460 }
71461 @@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
71462 void *ptr;
71463
71464 /* Do the allocs. */
71465 - ptr = module_alloc_update_bounds(mod->core_size);
71466 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
71467 /*
71468 * The pointer to this block is stored in the module structure
71469 * which is inside the block. Just mark it as not being a
71470 @@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
71471 if (!ptr)
71472 return -ENOMEM;
71473
71474 - memset(ptr, 0, mod->core_size);
71475 - mod->module_core = ptr;
71476 + memset(ptr, 0, mod->core_size_rw);
71477 + mod->module_core_rw = ptr;
71478
71479 - if (mod->init_size) {
71480 - ptr = module_alloc_update_bounds(mod->init_size);
71481 + if (mod->init_size_rw) {
71482 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
71483 /*
71484 * The pointer to this block is stored in the module structure
71485 * which is inside the block. This block doesn't need to be
71486 @@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
71487 */
71488 kmemleak_ignore(ptr);
71489 if (!ptr) {
71490 - module_free(mod, mod->module_core);
71491 + module_free(mod, mod->module_core_rw);
71492 return -ENOMEM;
71493 }
71494 - memset(ptr, 0, mod->init_size);
71495 - mod->module_init = ptr;
71496 + memset(ptr, 0, mod->init_size_rw);
71497 + mod->module_init_rw = ptr;
71498 } else
71499 - mod->module_init = NULL;
71500 + mod->module_init_rw = NULL;
71501 +
71502 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
71503 + kmemleak_not_leak(ptr);
71504 + if (!ptr) {
71505 + if (mod->module_init_rw)
71506 + module_free(mod, mod->module_init_rw);
71507 + module_free(mod, mod->module_core_rw);
71508 + return -ENOMEM;
71509 + }
71510 +
71511 + pax_open_kernel();
71512 + memset(ptr, 0, mod->core_size_rx);
71513 + pax_close_kernel();
71514 + mod->module_core_rx = ptr;
71515 +
71516 + if (mod->init_size_rx) {
71517 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
71518 + kmemleak_ignore(ptr);
71519 + if (!ptr && mod->init_size_rx) {
71520 + module_free_exec(mod, mod->module_core_rx);
71521 + if (mod->module_init_rw)
71522 + module_free(mod, mod->module_init_rw);
71523 + module_free(mod, mod->module_core_rw);
71524 + return -ENOMEM;
71525 + }
71526 +
71527 + pax_open_kernel();
71528 + memset(ptr, 0, mod->init_size_rx);
71529 + pax_close_kernel();
71530 + mod->module_init_rx = ptr;
71531 + } else
71532 + mod->module_init_rx = NULL;
71533
71534 /* Transfer each section which specifies SHF_ALLOC */
71535 pr_debug("final section addresses:\n");
71536 @@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
71537 if (!(shdr->sh_flags & SHF_ALLOC))
71538 continue;
71539
71540 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
71541 - dest = mod->module_init
71542 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
71543 - else
71544 - dest = mod->module_core + shdr->sh_entsize;
71545 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
71546 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
71547 + dest = mod->module_init_rw
71548 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
71549 + else
71550 + dest = mod->module_init_rx
71551 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
71552 + } else {
71553 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
71554 + dest = mod->module_core_rw + shdr->sh_entsize;
71555 + else
71556 + dest = mod->module_core_rx + shdr->sh_entsize;
71557 + }
71558 +
71559 + if (shdr->sh_type != SHT_NOBITS) {
71560 +
71561 +#ifdef CONFIG_PAX_KERNEXEC
71562 +#ifdef CONFIG_X86_64
71563 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
71564 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
71565 +#endif
71566 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
71567 + pax_open_kernel();
71568 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
71569 + pax_close_kernel();
71570 + } else
71571 +#endif
71572
71573 - if (shdr->sh_type != SHT_NOBITS)
71574 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
71575 + }
71576 /* Update sh_addr to point to copy in image. */
71577 - shdr->sh_addr = (unsigned long)dest;
71578 +
71579 +#ifdef CONFIG_PAX_KERNEXEC
71580 + if (shdr->sh_flags & SHF_EXECINSTR)
71581 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
71582 + else
71583 +#endif
71584 +
71585 + shdr->sh_addr = (unsigned long)dest;
71586 pr_debug("\t0x%lx %s\n",
71587 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
71588 }
71589 @@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
71590 * Do it before processing of module parameters, so the module
71591 * can provide parameter accessor functions of its own.
71592 */
71593 - if (mod->module_init)
71594 - flush_icache_range((unsigned long)mod->module_init,
71595 - (unsigned long)mod->module_init
71596 - + mod->init_size);
71597 - flush_icache_range((unsigned long)mod->module_core,
71598 - (unsigned long)mod->module_core + mod->core_size);
71599 + if (mod->module_init_rx)
71600 + flush_icache_range((unsigned long)mod->module_init_rx,
71601 + (unsigned long)mod->module_init_rx
71602 + + mod->init_size_rx);
71603 + flush_icache_range((unsigned long)mod->module_core_rx,
71604 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
71605
71606 set_fs(old_fs);
71607 }
71608 @@ -2983,8 +3088,10 @@ out:
71609 static void module_deallocate(struct module *mod, struct load_info *info)
71610 {
71611 percpu_modfree(mod);
71612 - module_free(mod, mod->module_init);
71613 - module_free(mod, mod->module_core);
71614 + module_free_exec(mod, mod->module_init_rx);
71615 + module_free_exec(mod, mod->module_core_rx);
71616 + module_free(mod, mod->module_init_rw);
71617 + module_free(mod, mod->module_core_rw);
71618 }
71619
71620 int __weak module_finalize(const Elf_Ehdr *hdr,
71621 @@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
71622 static int post_relocation(struct module *mod, const struct load_info *info)
71623 {
71624 /* Sort exception table now relocations are done. */
71625 + pax_open_kernel();
71626 sort_extable(mod->extable, mod->extable + mod->num_exentries);
71627 + pax_close_kernel();
71628
71629 /* Copy relocated percpu area over. */
71630 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
71631 @@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
71632 MODULE_STATE_COMING, mod);
71633
71634 /* Set RO and NX regions for core */
71635 - set_section_ro_nx(mod->module_core,
71636 - mod->core_text_size,
71637 - mod->core_ro_size,
71638 - mod->core_size);
71639 + set_section_ro_nx(mod->module_core_rx,
71640 + mod->core_size_rx,
71641 + mod->core_size_rx,
71642 + mod->core_size_rx);
71643
71644 /* Set RO and NX regions for init */
71645 - set_section_ro_nx(mod->module_init,
71646 - mod->init_text_size,
71647 - mod->init_ro_size,
71648 - mod->init_size);
71649 + set_section_ro_nx(mod->module_init_rx,
71650 + mod->init_size_rx,
71651 + mod->init_size_rx,
71652 + mod->init_size_rx);
71653
71654 do_mod_ctors(mod);
71655 /* Start the module */
71656 @@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
71657 mod->strtab = mod->core_strtab;
71658 #endif
71659 unset_module_init_ro_nx(mod);
71660 - module_free(mod, mod->module_init);
71661 - mod->module_init = NULL;
71662 - mod->init_size = 0;
71663 - mod->init_ro_size = 0;
71664 - mod->init_text_size = 0;
71665 + module_free(mod, mod->module_init_rw);
71666 + module_free_exec(mod, mod->module_init_rx);
71667 + mod->module_init_rw = NULL;
71668 + mod->module_init_rx = NULL;
71669 + mod->init_size_rw = 0;
71670 + mod->init_size_rx = 0;
71671 mutex_unlock(&module_mutex);
71672 wake_up_all(&module_wq);
71673
71674 @@ -3209,9 +3319,38 @@ again:
71675 if (err)
71676 goto free_unload;
71677
71678 + /* Now copy in args */
71679 + mod->args = strndup_user(uargs, ~0UL >> 1);
71680 + if (IS_ERR(mod->args)) {
71681 + err = PTR_ERR(mod->args);
71682 + goto free_unload;
71683 + }
71684 +
71685 /* Set up MODINFO_ATTR fields */
71686 setup_modinfo(mod, info);
71687
71688 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71689 + {
71690 + char *p, *p2;
71691 +
71692 + if (strstr(mod->args, "grsec_modharden_netdev")) {
71693 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
71694 + err = -EPERM;
71695 + goto free_modinfo;
71696 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
71697 + p += sizeof("grsec_modharden_normal") - 1;
71698 + p2 = strstr(p, "_");
71699 + if (p2) {
71700 + *p2 = '\0';
71701 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
71702 + *p2 = '_';
71703 + }
71704 + err = -EPERM;
71705 + goto free_modinfo;
71706 + }
71707 + }
71708 +#endif
71709 +
71710 /* Fix up syms, so that st_value is a pointer to location. */
71711 err = simplify_symbols(mod, info);
71712 if (err < 0)
71713 @@ -3227,13 +3366,6 @@ again:
71714
71715 flush_module_icache(mod);
71716
71717 - /* Now copy in args */
71718 - mod->args = strndup_user(uargs, ~0UL >> 1);
71719 - if (IS_ERR(mod->args)) {
71720 - err = PTR_ERR(mod->args);
71721 - goto free_arch_cleanup;
71722 - }
71723 -
71724 dynamic_debug_setup(info->debug, info->num_debug);
71725
71726 mutex_lock(&module_mutex);
71727 @@ -3278,11 +3410,10 @@ again:
71728 mutex_unlock(&module_mutex);
71729 dynamic_debug_remove(info->debug);
71730 synchronize_sched();
71731 - kfree(mod->args);
71732 - free_arch_cleanup:
71733 module_arch_cleanup(mod);
71734 free_modinfo:
71735 free_modinfo(mod);
71736 + kfree(mod->args);
71737 free_unload:
71738 module_unload_free(mod);
71739 unlink_mod:
71740 @@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
71741 unsigned long nextval;
71742
71743 /* At worse, next value is at end of module */
71744 - if (within_module_init(addr, mod))
71745 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
71746 + if (within_module_init_rx(addr, mod))
71747 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
71748 + else if (within_module_init_rw(addr, mod))
71749 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
71750 + else if (within_module_core_rx(addr, mod))
71751 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
71752 + else if (within_module_core_rw(addr, mod))
71753 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
71754 else
71755 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
71756 + return NULL;
71757
71758 /* Scan for closest preceding symbol, and next symbol. (ELF
71759 starts real symbols at 1). */
71760 @@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
71761 return 0;
71762
71763 seq_printf(m, "%s %u",
71764 - mod->name, mod->init_size + mod->core_size);
71765 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
71766 print_unload_info(m, mod);
71767
71768 /* Informative for users. */
71769 @@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
71770 mod->state == MODULE_STATE_COMING ? "Loading":
71771 "Live");
71772 /* Used by oprofile and other similar tools. */
71773 - seq_printf(m, " 0x%pK", mod->module_core);
71774 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
71775
71776 /* Taints info */
71777 if (mod->taints)
71778 @@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
71779
71780 static int __init proc_modules_init(void)
71781 {
71782 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71783 +#ifdef CONFIG_GRKERNSEC_PROC_USER
71784 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
71785 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71786 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
71787 +#else
71788 proc_create("modules", 0, NULL, &proc_modules_operations);
71789 +#endif
71790 +#else
71791 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
71792 +#endif
71793 return 0;
71794 }
71795 module_init(proc_modules_init);
71796 @@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
71797 {
71798 struct module *mod;
71799
71800 - if (addr < module_addr_min || addr > module_addr_max)
71801 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
71802 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
71803 return NULL;
71804
71805 list_for_each_entry_rcu(mod, &modules, list) {
71806 if (mod->state == MODULE_STATE_UNFORMED)
71807 continue;
71808 - if (within_module_core(addr, mod)
71809 - || within_module_init(addr, mod))
71810 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
71811 return mod;
71812 }
71813 return NULL;
71814 @@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
71815 */
71816 struct module *__module_text_address(unsigned long addr)
71817 {
71818 - struct module *mod = __module_address(addr);
71819 + struct module *mod;
71820 +
71821 +#ifdef CONFIG_X86_32
71822 + addr = ktla_ktva(addr);
71823 +#endif
71824 +
71825 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
71826 + return NULL;
71827 +
71828 + mod = __module_address(addr);
71829 +
71830 if (mod) {
71831 /* Make sure it's within the text section. */
71832 - if (!within(addr, mod->module_init, mod->init_text_size)
71833 - && !within(addr, mod->module_core, mod->core_text_size))
71834 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
71835 mod = NULL;
71836 }
71837 return mod;
71838 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
71839 index 7e3443f..b2a1e6b 100644
71840 --- a/kernel/mutex-debug.c
71841 +++ b/kernel/mutex-debug.c
71842 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
71843 }
71844
71845 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
71846 - struct thread_info *ti)
71847 + struct task_struct *task)
71848 {
71849 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
71850
71851 /* Mark the current thread as blocked on the lock: */
71852 - ti->task->blocked_on = waiter;
71853 + task->blocked_on = waiter;
71854 }
71855
71856 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
71857 - struct thread_info *ti)
71858 + struct task_struct *task)
71859 {
71860 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
71861 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
71862 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
71863 - ti->task->blocked_on = NULL;
71864 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
71865 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
71866 + task->blocked_on = NULL;
71867
71868 list_del_init(&waiter->list);
71869 waiter->task = NULL;
71870 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
71871 index 0799fd3..d06ae3b 100644
71872 --- a/kernel/mutex-debug.h
71873 +++ b/kernel/mutex-debug.h
71874 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
71875 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
71876 extern void debug_mutex_add_waiter(struct mutex *lock,
71877 struct mutex_waiter *waiter,
71878 - struct thread_info *ti);
71879 + struct task_struct *task);
71880 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
71881 - struct thread_info *ti);
71882 + struct task_struct *task);
71883 extern void debug_mutex_unlock(struct mutex *lock);
71884 extern void debug_mutex_init(struct mutex *lock, const char *name,
71885 struct lock_class_key *key);
71886 diff --git a/kernel/mutex.c b/kernel/mutex.c
71887 index a307cc9..27fd2e9 100644
71888 --- a/kernel/mutex.c
71889 +++ b/kernel/mutex.c
71890 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
71891 spin_lock_mutex(&lock->wait_lock, flags);
71892
71893 debug_mutex_lock_common(lock, &waiter);
71894 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
71895 + debug_mutex_add_waiter(lock, &waiter, task);
71896
71897 /* add waiting tasks to the end of the waitqueue (FIFO): */
71898 list_add_tail(&waiter.list, &lock->wait_list);
71899 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
71900 * TASK_UNINTERRUPTIBLE case.)
71901 */
71902 if (unlikely(signal_pending_state(state, task))) {
71903 - mutex_remove_waiter(lock, &waiter,
71904 - task_thread_info(task));
71905 + mutex_remove_waiter(lock, &waiter, task);
71906 mutex_release(&lock->dep_map, 1, ip);
71907 spin_unlock_mutex(&lock->wait_lock, flags);
71908
71909 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
71910 done:
71911 lock_acquired(&lock->dep_map, ip);
71912 /* got the lock - rejoice! */
71913 - mutex_remove_waiter(lock, &waiter, current_thread_info());
71914 + mutex_remove_waiter(lock, &waiter, task);
71915 mutex_set_owner(lock);
71916
71917 /* set it to 0 if there are no waiters left: */
71918 diff --git a/kernel/notifier.c b/kernel/notifier.c
71919 index 2d5cc4c..d9ea600 100644
71920 --- a/kernel/notifier.c
71921 +++ b/kernel/notifier.c
71922 @@ -5,6 +5,7 @@
71923 #include <linux/rcupdate.h>
71924 #include <linux/vmalloc.h>
71925 #include <linux/reboot.h>
71926 +#include <linux/mm.h>
71927
71928 /*
71929 * Notifier list for kernel code which wants to be called
71930 @@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
71931 while ((*nl) != NULL) {
71932 if (n->priority > (*nl)->priority)
71933 break;
71934 - nl = &((*nl)->next);
71935 + nl = (struct notifier_block **)&((*nl)->next);
71936 }
71937 - n->next = *nl;
71938 + pax_open_kernel();
71939 + *(const void **)&n->next = *nl;
71940 rcu_assign_pointer(*nl, n);
71941 + pax_close_kernel();
71942 return 0;
71943 }
71944
71945 @@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
71946 return 0;
71947 if (n->priority > (*nl)->priority)
71948 break;
71949 - nl = &((*nl)->next);
71950 + nl = (struct notifier_block **)&((*nl)->next);
71951 }
71952 - n->next = *nl;
71953 + pax_open_kernel();
71954 + *(const void **)&n->next = *nl;
71955 rcu_assign_pointer(*nl, n);
71956 + pax_close_kernel();
71957 return 0;
71958 }
71959
71960 @@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
71961 {
71962 while ((*nl) != NULL) {
71963 if ((*nl) == n) {
71964 + pax_open_kernel();
71965 rcu_assign_pointer(*nl, n->next);
71966 + pax_close_kernel();
71967 return 0;
71968 }
71969 - nl = &((*nl)->next);
71970 + nl = (struct notifier_block **)&((*nl)->next);
71971 }
71972 return -ENOENT;
71973 }
71974 diff --git a/kernel/panic.c b/kernel/panic.c
71975 index e1b2822..5edc1d9 100644
71976 --- a/kernel/panic.c
71977 +++ b/kernel/panic.c
71978 @@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
71979 const char *board;
71980
71981 printk(KERN_WARNING "------------[ cut here ]------------\n");
71982 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
71983 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
71984 board = dmi_get_system_info(DMI_PRODUCT_NAME);
71985 if (board)
71986 printk(KERN_WARNING "Hardware name: %s\n", board);
71987 @@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
71988 */
71989 void __stack_chk_fail(void)
71990 {
71991 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
71992 + dump_stack();
71993 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
71994 __builtin_return_address(0));
71995 }
71996 EXPORT_SYMBOL(__stack_chk_fail);
71997 diff --git a/kernel/pid.c b/kernel/pid.c
71998 index f2c6a68..4922d97 100644
71999 --- a/kernel/pid.c
72000 +++ b/kernel/pid.c
72001 @@ -33,6 +33,7 @@
72002 #include <linux/rculist.h>
72003 #include <linux/bootmem.h>
72004 #include <linux/hash.h>
72005 +#include <linux/security.h>
72006 #include <linux/pid_namespace.h>
72007 #include <linux/init_task.h>
72008 #include <linux/syscalls.h>
72009 @@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
72010
72011 int pid_max = PID_MAX_DEFAULT;
72012
72013 -#define RESERVED_PIDS 300
72014 +#define RESERVED_PIDS 500
72015
72016 int pid_max_min = RESERVED_PIDS + 1;
72017 int pid_max_max = PID_MAX_LIMIT;
72018 @@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
72019 */
72020 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
72021 {
72022 + struct task_struct *task;
72023 +
72024 rcu_lockdep_assert(rcu_read_lock_held(),
72025 "find_task_by_pid_ns() needs rcu_read_lock()"
72026 " protection");
72027 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72028 +
72029 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72030 +
72031 + if (gr_pid_is_chrooted(task))
72032 + return NULL;
72033 +
72034 + return task;
72035 }
72036
72037 struct task_struct *find_task_by_vpid(pid_t vnr)
72038 @@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
72039 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
72040 }
72041
72042 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
72043 +{
72044 + rcu_lockdep_assert(rcu_read_lock_held(),
72045 + "find_task_by_pid_ns() needs rcu_read_lock()"
72046 + " protection");
72047 + return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
72048 +}
72049 +
72050 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
72051 {
72052 struct pid *pid;
72053 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
72054 index 942ca27..111e609 100644
72055 --- a/kernel/posix-cpu-timers.c
72056 +++ b/kernel/posix-cpu-timers.c
72057 @@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
72058
72059 static __init int init_posix_cpu_timers(void)
72060 {
72061 - struct k_clock process = {
72062 + static struct k_clock process = {
72063 .clock_getres = process_cpu_clock_getres,
72064 .clock_get = process_cpu_clock_get,
72065 .timer_create = process_cpu_timer_create,
72066 .nsleep = process_cpu_nsleep,
72067 .nsleep_restart = process_cpu_nsleep_restart,
72068 };
72069 - struct k_clock thread = {
72070 + static struct k_clock thread = {
72071 .clock_getres = thread_cpu_clock_getres,
72072 .clock_get = thread_cpu_clock_get,
72073 .timer_create = thread_cpu_timer_create,
72074 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
72075 index e885be1..380fe76 100644
72076 --- a/kernel/posix-timers.c
72077 +++ b/kernel/posix-timers.c
72078 @@ -43,6 +43,7 @@
72079 #include <linux/idr.h>
72080 #include <linux/posix-clock.h>
72081 #include <linux/posix-timers.h>
72082 +#include <linux/grsecurity.h>
72083 #include <linux/syscalls.h>
72084 #include <linux/wait.h>
72085 #include <linux/workqueue.h>
72086 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
72087 * which we beg off on and pass to do_sys_settimeofday().
72088 */
72089
72090 -static struct k_clock posix_clocks[MAX_CLOCKS];
72091 +static struct k_clock *posix_clocks[MAX_CLOCKS];
72092
72093 /*
72094 * These ones are defined below.
72095 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
72096 */
72097 static __init int init_posix_timers(void)
72098 {
72099 - struct k_clock clock_realtime = {
72100 + static struct k_clock clock_realtime = {
72101 .clock_getres = hrtimer_get_res,
72102 .clock_get = posix_clock_realtime_get,
72103 .clock_set = posix_clock_realtime_set,
72104 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
72105 .timer_get = common_timer_get,
72106 .timer_del = common_timer_del,
72107 };
72108 - struct k_clock clock_monotonic = {
72109 + static struct k_clock clock_monotonic = {
72110 .clock_getres = hrtimer_get_res,
72111 .clock_get = posix_ktime_get_ts,
72112 .nsleep = common_nsleep,
72113 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
72114 .timer_get = common_timer_get,
72115 .timer_del = common_timer_del,
72116 };
72117 - struct k_clock clock_monotonic_raw = {
72118 + static struct k_clock clock_monotonic_raw = {
72119 .clock_getres = hrtimer_get_res,
72120 .clock_get = posix_get_monotonic_raw,
72121 };
72122 - struct k_clock clock_realtime_coarse = {
72123 + static struct k_clock clock_realtime_coarse = {
72124 .clock_getres = posix_get_coarse_res,
72125 .clock_get = posix_get_realtime_coarse,
72126 };
72127 - struct k_clock clock_monotonic_coarse = {
72128 + static struct k_clock clock_monotonic_coarse = {
72129 .clock_getres = posix_get_coarse_res,
72130 .clock_get = posix_get_monotonic_coarse,
72131 };
72132 - struct k_clock clock_boottime = {
72133 + static struct k_clock clock_boottime = {
72134 .clock_getres = hrtimer_get_res,
72135 .clock_get = posix_get_boottime,
72136 .nsleep = common_nsleep,
72137 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
72138 return;
72139 }
72140
72141 - posix_clocks[clock_id] = *new_clock;
72142 + posix_clocks[clock_id] = new_clock;
72143 }
72144 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
72145
72146 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
72147 return (id & CLOCKFD_MASK) == CLOCKFD ?
72148 &clock_posix_dynamic : &clock_posix_cpu;
72149
72150 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
72151 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
72152 return NULL;
72153 - return &posix_clocks[id];
72154 + return posix_clocks[id];
72155 }
72156
72157 static int common_timer_create(struct k_itimer *new_timer)
72158 @@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
72159 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
72160 return -EFAULT;
72161
72162 + /* only the CLOCK_REALTIME clock can be set, all other clocks
72163 + have their clock_set fptr set to a nosettime dummy function
72164 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
72165 + call common_clock_set, which calls do_sys_settimeofday, which
72166 + we hook
72167 + */
72168 +
72169 return kc->clock_set(which_clock, &new_tp);
72170 }
72171
72172 diff --git a/kernel/power/process.c b/kernel/power/process.c
72173 index d5a258b..4271191 100644
72174 --- a/kernel/power/process.c
72175 +++ b/kernel/power/process.c
72176 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
72177 u64 elapsed_csecs64;
72178 unsigned int elapsed_csecs;
72179 bool wakeup = false;
72180 + bool timedout = false;
72181
72182 do_gettimeofday(&start);
72183
72184 @@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
72185
72186 while (true) {
72187 todo = 0;
72188 + if (time_after(jiffies, end_time))
72189 + timedout = true;
72190 read_lock(&tasklist_lock);
72191 do_each_thread(g, p) {
72192 if (p == current || !freeze_task(p))
72193 continue;
72194
72195 - if (!freezer_should_skip(p))
72196 + if (!freezer_should_skip(p)) {
72197 todo++;
72198 + if (timedout) {
72199 + printk(KERN_ERR "Task refusing to freeze:\n");
72200 + sched_show_task(p);
72201 + }
72202 + }
72203 } while_each_thread(g, p);
72204 read_unlock(&tasklist_lock);
72205
72206 @@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
72207 todo += wq_busy;
72208 }
72209
72210 - if (!todo || time_after(jiffies, end_time))
72211 + if (!todo || timedout)
72212 break;
72213
72214 if (pm_wakeup_pending()) {
72215 diff --git a/kernel/printk.c b/kernel/printk.c
72216 index 267ce78..2487112 100644
72217 --- a/kernel/printk.c
72218 +++ b/kernel/printk.c
72219 @@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
72220 return ret;
72221 }
72222
72223 +static int check_syslog_permissions(int type, bool from_file);
72224 +
72225 static int devkmsg_open(struct inode *inode, struct file *file)
72226 {
72227 struct devkmsg_user *user;
72228 int err;
72229
72230 + err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
72231 + if (err)
72232 + return err;
72233 +
72234 /* write-only does not need any file context */
72235 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
72236 return 0;
72237 @@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
72238 if (dmesg_restrict)
72239 return 1;
72240 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
72241 - return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
72242 + return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
72243 }
72244
72245 static int check_syslog_permissions(int type, bool from_file)
72246 @@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
72247 if (from_file && type != SYSLOG_ACTION_OPEN)
72248 return 0;
72249
72250 +#ifdef CONFIG_GRKERNSEC_DMESG
72251 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
72252 + return -EPERM;
72253 +#endif
72254 +
72255 if (syslog_action_restricted(type)) {
72256 if (capable(CAP_SYSLOG))
72257 return 0;
72258 diff --git a/kernel/profile.c b/kernel/profile.c
72259 index 1f39181..86093471 100644
72260 --- a/kernel/profile.c
72261 +++ b/kernel/profile.c
72262 @@ -40,7 +40,7 @@ struct profile_hit {
72263 /* Oprofile timer tick hook */
72264 static int (*timer_hook)(struct pt_regs *) __read_mostly;
72265
72266 -static atomic_t *prof_buffer;
72267 +static atomic_unchecked_t *prof_buffer;
72268 static unsigned long prof_len, prof_shift;
72269
72270 int prof_on __read_mostly;
72271 @@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
72272 hits[i].pc = 0;
72273 continue;
72274 }
72275 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
72276 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
72277 hits[i].hits = hits[i].pc = 0;
72278 }
72279 }
72280 @@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
72281 * Add the current hit(s) and flush the write-queue out
72282 * to the global buffer:
72283 */
72284 - atomic_add(nr_hits, &prof_buffer[pc]);
72285 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
72286 for (i = 0; i < NR_PROFILE_HIT; ++i) {
72287 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
72288 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
72289 hits[i].pc = hits[i].hits = 0;
72290 }
72291 out:
72292 @@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
72293 {
72294 unsigned long pc;
72295 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
72296 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
72297 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
72298 }
72299 #endif /* !CONFIG_SMP */
72300
72301 @@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
72302 return -EFAULT;
72303 buf++; p++; count--; read++;
72304 }
72305 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
72306 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
72307 if (copy_to_user(buf, (void *)pnt, count))
72308 return -EFAULT;
72309 read += count;
72310 @@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
72311 }
72312 #endif
72313 profile_discard_flip_buffers();
72314 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
72315 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
72316 return count;
72317 }
72318
72319 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
72320 index 6cbeaae..363c48a 100644
72321 --- a/kernel/ptrace.c
72322 +++ b/kernel/ptrace.c
72323 @@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
72324 if (seize)
72325 flags |= PT_SEIZED;
72326 rcu_read_lock();
72327 - if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
72328 + if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
72329 flags |= PT_PTRACE_CAP;
72330 rcu_read_unlock();
72331 task->ptrace = flags;
72332 @@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
72333 break;
72334 return -EIO;
72335 }
72336 - if (copy_to_user(dst, buf, retval))
72337 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
72338 return -EFAULT;
72339 copied += retval;
72340 src += retval;
72341 @@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
72342 bool seized = child->ptrace & PT_SEIZED;
72343 int ret = -EIO;
72344 siginfo_t siginfo, *si;
72345 - void __user *datavp = (void __user *) data;
72346 + void __user *datavp = (__force void __user *) data;
72347 unsigned long __user *datalp = datavp;
72348 unsigned long flags;
72349
72350 @@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
72351 goto out;
72352 }
72353
72354 + if (gr_handle_ptrace(child, request)) {
72355 + ret = -EPERM;
72356 + goto out_put_task_struct;
72357 + }
72358 +
72359 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72360 ret = ptrace_attach(child, request, addr, data);
72361 /*
72362 * Some architectures need to do book-keeping after
72363 * a ptrace attach.
72364 */
72365 - if (!ret)
72366 + if (!ret) {
72367 arch_ptrace_attach(child);
72368 + gr_audit_ptrace(child);
72369 + }
72370 goto out_put_task_struct;
72371 }
72372
72373 @@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
72374 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
72375 if (copied != sizeof(tmp))
72376 return -EIO;
72377 - return put_user(tmp, (unsigned long __user *)data);
72378 + return put_user(tmp, (__force unsigned long __user *)data);
72379 }
72380
72381 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
72382 @@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
72383 goto out;
72384 }
72385
72386 + if (gr_handle_ptrace(child, request)) {
72387 + ret = -EPERM;
72388 + goto out_put_task_struct;
72389 + }
72390 +
72391 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72392 ret = ptrace_attach(child, request, addr, data);
72393 /*
72394 * Some architectures need to do book-keeping after
72395 * a ptrace attach.
72396 */
72397 - if (!ret)
72398 + if (!ret) {
72399 arch_ptrace_attach(child);
72400 + gr_audit_ptrace(child);
72401 + }
72402 goto out_put_task_struct;
72403 }
72404
72405 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
72406 index e7dce58..ad0d7b7 100644
72407 --- a/kernel/rcutiny.c
72408 +++ b/kernel/rcutiny.c
72409 @@ -46,7 +46,7 @@
72410 struct rcu_ctrlblk;
72411 static void invoke_rcu_callbacks(void);
72412 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
72413 -static void rcu_process_callbacks(struct softirq_action *unused);
72414 +static void rcu_process_callbacks(void);
72415 static void __call_rcu(struct rcu_head *head,
72416 void (*func)(struct rcu_head *rcu),
72417 struct rcu_ctrlblk *rcp);
72418 @@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
72419 rcu_is_callbacks_kthread()));
72420 }
72421
72422 -static void rcu_process_callbacks(struct softirq_action *unused)
72423 +static void rcu_process_callbacks(void)
72424 {
72425 __rcu_process_callbacks(&rcu_sched_ctrlblk);
72426 __rcu_process_callbacks(&rcu_bh_ctrlblk);
72427 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
72428 index f85016a..91cb03b 100644
72429 --- a/kernel/rcutiny_plugin.h
72430 +++ b/kernel/rcutiny_plugin.h
72431 @@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
72432 have_rcu_kthread_work = morework;
72433 local_irq_restore(flags);
72434 if (work)
72435 - rcu_process_callbacks(NULL);
72436 + rcu_process_callbacks();
72437 schedule_timeout_interruptible(1); /* Leave CPU for others. */
72438 }
72439
72440 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
72441 index 31dea01..ad91ffb 100644
72442 --- a/kernel/rcutorture.c
72443 +++ b/kernel/rcutorture.c
72444 @@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
72445 { 0 };
72446 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
72447 { 0 };
72448 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
72449 -static atomic_t n_rcu_torture_alloc;
72450 -static atomic_t n_rcu_torture_alloc_fail;
72451 -static atomic_t n_rcu_torture_free;
72452 -static atomic_t n_rcu_torture_mberror;
72453 -static atomic_t n_rcu_torture_error;
72454 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
72455 +static atomic_unchecked_t n_rcu_torture_alloc;
72456 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
72457 +static atomic_unchecked_t n_rcu_torture_free;
72458 +static atomic_unchecked_t n_rcu_torture_mberror;
72459 +static atomic_unchecked_t n_rcu_torture_error;
72460 static long n_rcu_torture_barrier_error;
72461 static long n_rcu_torture_boost_ktrerror;
72462 static long n_rcu_torture_boost_rterror;
72463 @@ -272,11 +272,11 @@ rcu_torture_alloc(void)
72464
72465 spin_lock_bh(&rcu_torture_lock);
72466 if (list_empty(&rcu_torture_freelist)) {
72467 - atomic_inc(&n_rcu_torture_alloc_fail);
72468 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
72469 spin_unlock_bh(&rcu_torture_lock);
72470 return NULL;
72471 }
72472 - atomic_inc(&n_rcu_torture_alloc);
72473 + atomic_inc_unchecked(&n_rcu_torture_alloc);
72474 p = rcu_torture_freelist.next;
72475 list_del_init(p);
72476 spin_unlock_bh(&rcu_torture_lock);
72477 @@ -289,7 +289,7 @@ rcu_torture_alloc(void)
72478 static void
72479 rcu_torture_free(struct rcu_torture *p)
72480 {
72481 - atomic_inc(&n_rcu_torture_free);
72482 + atomic_inc_unchecked(&n_rcu_torture_free);
72483 spin_lock_bh(&rcu_torture_lock);
72484 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
72485 spin_unlock_bh(&rcu_torture_lock);
72486 @@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
72487 i = rp->rtort_pipe_count;
72488 if (i > RCU_TORTURE_PIPE_LEN)
72489 i = RCU_TORTURE_PIPE_LEN;
72490 - atomic_inc(&rcu_torture_wcount[i]);
72491 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
72492 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
72493 rp->rtort_mbtest = 0;
72494 rcu_torture_free(rp);
72495 @@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
72496 i = rp->rtort_pipe_count;
72497 if (i > RCU_TORTURE_PIPE_LEN)
72498 i = RCU_TORTURE_PIPE_LEN;
72499 - atomic_inc(&rcu_torture_wcount[i]);
72500 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
72501 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
72502 rp->rtort_mbtest = 0;
72503 list_del(&rp->rtort_free);
72504 @@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
72505 i = old_rp->rtort_pipe_count;
72506 if (i > RCU_TORTURE_PIPE_LEN)
72507 i = RCU_TORTURE_PIPE_LEN;
72508 - atomic_inc(&rcu_torture_wcount[i]);
72509 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
72510 old_rp->rtort_pipe_count++;
72511 cur_ops->deferred_free(old_rp);
72512 }
72513 @@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
72514 }
72515 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
72516 if (p->rtort_mbtest == 0)
72517 - atomic_inc(&n_rcu_torture_mberror);
72518 + atomic_inc_unchecked(&n_rcu_torture_mberror);
72519 spin_lock(&rand_lock);
72520 cur_ops->read_delay(&rand);
72521 n_rcu_torture_timers++;
72522 @@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
72523 }
72524 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
72525 if (p->rtort_mbtest == 0)
72526 - atomic_inc(&n_rcu_torture_mberror);
72527 + atomic_inc_unchecked(&n_rcu_torture_mberror);
72528 cur_ops->read_delay(&rand);
72529 preempt_disable();
72530 pipe_count = p->rtort_pipe_count;
72531 @@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
72532 rcu_torture_current,
72533 rcu_torture_current_version,
72534 list_empty(&rcu_torture_freelist),
72535 - atomic_read(&n_rcu_torture_alloc),
72536 - atomic_read(&n_rcu_torture_alloc_fail),
72537 - atomic_read(&n_rcu_torture_free));
72538 + atomic_read_unchecked(&n_rcu_torture_alloc),
72539 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
72540 + atomic_read_unchecked(&n_rcu_torture_free));
72541 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
72542 - atomic_read(&n_rcu_torture_mberror),
72543 + atomic_read_unchecked(&n_rcu_torture_mberror),
72544 n_rcu_torture_boost_ktrerror,
72545 n_rcu_torture_boost_rterror);
72546 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
72547 @@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
72548 n_barrier_attempts,
72549 n_rcu_torture_barrier_error);
72550 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
72551 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
72552 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
72553 n_rcu_torture_barrier_error != 0 ||
72554 n_rcu_torture_boost_ktrerror != 0 ||
72555 n_rcu_torture_boost_rterror != 0 ||
72556 n_rcu_torture_boost_failure != 0 ||
72557 i > 1) {
72558 cnt += sprintf(&page[cnt], "!!! ");
72559 - atomic_inc(&n_rcu_torture_error);
72560 + atomic_inc_unchecked(&n_rcu_torture_error);
72561 WARN_ON_ONCE(1);
72562 }
72563 cnt += sprintf(&page[cnt], "Reader Pipe: ");
72564 @@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
72565 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
72566 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
72567 cnt += sprintf(&page[cnt], " %d",
72568 - atomic_read(&rcu_torture_wcount[i]));
72569 + atomic_read_unchecked(&rcu_torture_wcount[i]));
72570 }
72571 cnt += sprintf(&page[cnt], "\n");
72572 if (cur_ops->stats)
72573 @@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
72574
72575 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
72576
72577 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
72578 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
72579 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
72580 else if (n_online_successes != n_online_attempts ||
72581 n_offline_successes != n_offline_attempts)
72582 @@ -1989,18 +1989,18 @@ rcu_torture_init(void)
72583
72584 rcu_torture_current = NULL;
72585 rcu_torture_current_version = 0;
72586 - atomic_set(&n_rcu_torture_alloc, 0);
72587 - atomic_set(&n_rcu_torture_alloc_fail, 0);
72588 - atomic_set(&n_rcu_torture_free, 0);
72589 - atomic_set(&n_rcu_torture_mberror, 0);
72590 - atomic_set(&n_rcu_torture_error, 0);
72591 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
72592 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
72593 + atomic_set_unchecked(&n_rcu_torture_free, 0);
72594 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
72595 + atomic_set_unchecked(&n_rcu_torture_error, 0);
72596 n_rcu_torture_barrier_error = 0;
72597 n_rcu_torture_boost_ktrerror = 0;
72598 n_rcu_torture_boost_rterror = 0;
72599 n_rcu_torture_boost_failure = 0;
72600 n_rcu_torture_boosts = 0;
72601 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
72602 - atomic_set(&rcu_torture_wcount[i], 0);
72603 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
72604 for_each_possible_cpu(cpu) {
72605 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
72606 per_cpu(rcu_torture_count, cpu)[i] = 0;
72607 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
72608 index e441b77..dd54f17 100644
72609 --- a/kernel/rcutree.c
72610 +++ b/kernel/rcutree.c
72611 @@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
72612 rcu_prepare_for_idle(smp_processor_id());
72613 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
72614 smp_mb__before_atomic_inc(); /* See above. */
72615 - atomic_inc(&rdtp->dynticks);
72616 + atomic_inc_unchecked(&rdtp->dynticks);
72617 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
72618 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
72619 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
72620
72621 /*
72622 * It is illegal to enter an extended quiescent state while
72623 @@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
72624 int user)
72625 {
72626 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
72627 - atomic_inc(&rdtp->dynticks);
72628 + atomic_inc_unchecked(&rdtp->dynticks);
72629 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
72630 smp_mb__after_atomic_inc(); /* See above. */
72631 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
72632 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
72633 rcu_cleanup_after_idle(smp_processor_id());
72634 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
72635 if (!user && !is_idle_task(current)) {
72636 @@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
72637 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
72638
72639 if (rdtp->dynticks_nmi_nesting == 0 &&
72640 - (atomic_read(&rdtp->dynticks) & 0x1))
72641 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
72642 return;
72643 rdtp->dynticks_nmi_nesting++;
72644 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
72645 - atomic_inc(&rdtp->dynticks);
72646 + atomic_inc_unchecked(&rdtp->dynticks);
72647 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
72648 smp_mb__after_atomic_inc(); /* See above. */
72649 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
72650 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
72651 }
72652
72653 /**
72654 @@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
72655 return;
72656 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
72657 smp_mb__before_atomic_inc(); /* See above. */
72658 - atomic_inc(&rdtp->dynticks);
72659 + atomic_inc_unchecked(&rdtp->dynticks);
72660 smp_mb__after_atomic_inc(); /* Force delay to next write. */
72661 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
72662 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
72663 }
72664
72665 /**
72666 @@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
72667 int ret;
72668
72669 preempt_disable();
72670 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
72671 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
72672 preempt_enable();
72673 return ret;
72674 }
72675 @@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
72676 */
72677 static int dyntick_save_progress_counter(struct rcu_data *rdp)
72678 {
72679 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
72680 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
72681 return (rdp->dynticks_snap & 0x1) == 0;
72682 }
72683
72684 @@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
72685 unsigned int curr;
72686 unsigned int snap;
72687
72688 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
72689 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
72690 snap = (unsigned int)rdp->dynticks_snap;
72691
72692 /*
72693 @@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
72694 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
72695 */
72696 if (till_stall_check < 3) {
72697 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
72698 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
72699 till_stall_check = 3;
72700 } else if (till_stall_check > 300) {
72701 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
72702 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
72703 till_stall_check = 300;
72704 }
72705 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
72706 @@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
72707 rsp->qlen += rdp->qlen;
72708 rdp->n_cbs_orphaned += rdp->qlen;
72709 rdp->qlen_lazy = 0;
72710 - ACCESS_ONCE(rdp->qlen) = 0;
72711 + ACCESS_ONCE_RW(rdp->qlen) = 0;
72712 }
72713
72714 /*
72715 @@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
72716 }
72717 smp_mb(); /* List handling before counting for rcu_barrier(). */
72718 rdp->qlen_lazy -= count_lazy;
72719 - ACCESS_ONCE(rdp->qlen) -= count;
72720 + ACCESS_ONCE_RW(rdp->qlen) -= count;
72721 rdp->n_cbs_invoked += count;
72722
72723 /* Reinstate batch limit if we have worked down the excess. */
72724 @@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
72725 /*
72726 * Do RCU core processing for the current CPU.
72727 */
72728 -static void rcu_process_callbacks(struct softirq_action *unused)
72729 +static void rcu_process_callbacks(void)
72730 {
72731 struct rcu_state *rsp;
72732
72733 @@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
72734 local_irq_restore(flags);
72735 return;
72736 }
72737 - ACCESS_ONCE(rdp->qlen)++;
72738 + ACCESS_ONCE_RW(rdp->qlen)++;
72739 if (lazy)
72740 rdp->qlen_lazy++;
72741 else
72742 @@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
72743 * counter wrap on a 32-bit system. Quite a few more CPUs would of
72744 * course be required on a 64-bit system.
72745 */
72746 - if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
72747 + if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
72748 (ulong)atomic_long_read(&rsp->expedited_done) +
72749 ULONG_MAX / 8)) {
72750 synchronize_sched();
72751 - atomic_long_inc(&rsp->expedited_wrap);
72752 + atomic_long_inc_unchecked(&rsp->expedited_wrap);
72753 return;
72754 }
72755
72756 @@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
72757 * Take a ticket. Note that atomic_inc_return() implies a
72758 * full memory barrier.
72759 */
72760 - snap = atomic_long_inc_return(&rsp->expedited_start);
72761 + snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
72762 firstsnap = snap;
72763 get_online_cpus();
72764 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
72765 @@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
72766 synchronize_sched_expedited_cpu_stop,
72767 NULL) == -EAGAIN) {
72768 put_online_cpus();
72769 - atomic_long_inc(&rsp->expedited_tryfail);
72770 + atomic_long_inc_unchecked(&rsp->expedited_tryfail);
72771
72772 /* Check to see if someone else did our work for us. */
72773 s = atomic_long_read(&rsp->expedited_done);
72774 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
72775 /* ensure test happens before caller kfree */
72776 smp_mb__before_atomic_inc(); /* ^^^ */
72777 - atomic_long_inc(&rsp->expedited_workdone1);
72778 + atomic_long_inc_unchecked(&rsp->expedited_workdone1);
72779 return;
72780 }
72781
72782 @@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
72783 udelay(trycount * num_online_cpus());
72784 } else {
72785 wait_rcu_gp(call_rcu_sched);
72786 - atomic_long_inc(&rsp->expedited_normal);
72787 + atomic_long_inc_unchecked(&rsp->expedited_normal);
72788 return;
72789 }
72790
72791 @@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
72792 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
72793 /* ensure test happens before caller kfree */
72794 smp_mb__before_atomic_inc(); /* ^^^ */
72795 - atomic_long_inc(&rsp->expedited_workdone2);
72796 + atomic_long_inc_unchecked(&rsp->expedited_workdone2);
72797 return;
72798 }
72799
72800 @@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
72801 * period works for us.
72802 */
72803 get_online_cpus();
72804 - snap = atomic_long_read(&rsp->expedited_start);
72805 + snap = atomic_long_read_unchecked(&rsp->expedited_start);
72806 smp_mb(); /* ensure read is before try_stop_cpus(). */
72807 }
72808 - atomic_long_inc(&rsp->expedited_stoppedcpus);
72809 + atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
72810
72811 /*
72812 * Everyone up to our most recent fetch is covered by our grace
72813 @@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
72814 * than we did already did their update.
72815 */
72816 do {
72817 - atomic_long_inc(&rsp->expedited_done_tries);
72818 + atomic_long_inc_unchecked(&rsp->expedited_done_tries);
72819 s = atomic_long_read(&rsp->expedited_done);
72820 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
72821 /* ensure test happens before caller kfree */
72822 smp_mb__before_atomic_inc(); /* ^^^ */
72823 - atomic_long_inc(&rsp->expedited_done_lost);
72824 + atomic_long_inc_unchecked(&rsp->expedited_done_lost);
72825 break;
72826 }
72827 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
72828 - atomic_long_inc(&rsp->expedited_done_exit);
72829 + atomic_long_inc_unchecked(&rsp->expedited_done_exit);
72830
72831 put_online_cpus();
72832 }
72833 @@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
72834 * ACCESS_ONCE() to prevent the compiler from speculating
72835 * the increment to precede the early-exit check.
72836 */
72837 - ACCESS_ONCE(rsp->n_barrier_done)++;
72838 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
72839 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
72840 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
72841 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
72842 @@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
72843
72844 /* Increment ->n_barrier_done to prevent duplicate work. */
72845 smp_mb(); /* Keep increment after above mechanism. */
72846 - ACCESS_ONCE(rsp->n_barrier_done)++;
72847 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
72848 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
72849 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
72850 smp_mb(); /* Keep increment before caller's subsequent code. */
72851 @@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
72852 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
72853 init_callback_list(rdp);
72854 rdp->qlen_lazy = 0;
72855 - ACCESS_ONCE(rdp->qlen) = 0;
72856 + ACCESS_ONCE_RW(rdp->qlen) = 0;
72857 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
72858 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
72859 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
72860 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
72861 #ifdef CONFIG_RCU_USER_QS
72862 WARN_ON_ONCE(rdp->dynticks->in_user);
72863 #endif
72864 @@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
72865 rdp->blimit = blimit;
72866 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
72867 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
72868 - atomic_set(&rdp->dynticks->dynticks,
72869 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
72870 + atomic_set_unchecked(&rdp->dynticks->dynticks,
72871 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
72872 rcu_prepare_for_idle_init(cpu);
72873 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
72874
72875 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
72876 index 4b69291..704c92e 100644
72877 --- a/kernel/rcutree.h
72878 +++ b/kernel/rcutree.h
72879 @@ -86,7 +86,7 @@ struct rcu_dynticks {
72880 long long dynticks_nesting; /* Track irq/process nesting level. */
72881 /* Process level is worth LLONG_MAX/2. */
72882 int dynticks_nmi_nesting; /* Track NMI nesting level. */
72883 - atomic_t dynticks; /* Even value for idle, else odd. */
72884 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
72885 #ifdef CONFIG_RCU_FAST_NO_HZ
72886 int dyntick_drain; /* Prepare-for-idle state variable. */
72887 unsigned long dyntick_holdoff;
72888 @@ -423,17 +423,17 @@ struct rcu_state {
72889 /* _rcu_barrier(). */
72890 /* End of fields guarded by barrier_mutex. */
72891
72892 - atomic_long_t expedited_start; /* Starting ticket. */
72893 - atomic_long_t expedited_done; /* Done ticket. */
72894 - atomic_long_t expedited_wrap; /* # near-wrap incidents. */
72895 - atomic_long_t expedited_tryfail; /* # acquisition failures. */
72896 - atomic_long_t expedited_workdone1; /* # done by others #1. */
72897 - atomic_long_t expedited_workdone2; /* # done by others #2. */
72898 - atomic_long_t expedited_normal; /* # fallbacks to normal. */
72899 - atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
72900 - atomic_long_t expedited_done_tries; /* # tries to update _done. */
72901 - atomic_long_t expedited_done_lost; /* # times beaten to _done. */
72902 - atomic_long_t expedited_done_exit; /* # times exited _done loop. */
72903 + atomic_long_unchecked_t expedited_start; /* Starting ticket. */
72904 + atomic_long_t expedited_done; /* Done ticket. */
72905 + atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
72906 + atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
72907 + atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
72908 + atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
72909 + atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
72910 + atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
72911 + atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
72912 + atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
72913 + atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
72914
72915 unsigned long jiffies_force_qs; /* Time at which to invoke */
72916 /* force_quiescent_state(). */
72917 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
72918 index c1cc7e1..5043e0e 100644
72919 --- a/kernel/rcutree_plugin.h
72920 +++ b/kernel/rcutree_plugin.h
72921 @@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
72922
72923 /* Clean up and exit. */
72924 smp_mb(); /* ensure expedited GP seen before counter increment. */
72925 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
72926 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
72927 unlock_mb_ret:
72928 mutex_unlock(&sync_rcu_preempt_exp_mutex);
72929 mb_ret:
72930 @@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
72931 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
72932 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
72933 cpu, ticks_value, ticks_title,
72934 - atomic_read(&rdtp->dynticks) & 0xfff,
72935 + atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
72936 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
72937 fast_no_hz);
72938 }
72939 @@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
72940
72941 /* Enqueue the callback on the nocb list and update counts. */
72942 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
72943 - ACCESS_ONCE(*old_rhpp) = rhp;
72944 + ACCESS_ONCE_RW(*old_rhpp) = rhp;
72945 atomic_long_add(rhcount, &rdp->nocb_q_count);
72946 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
72947
72948 @@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
72949 * Extract queued callbacks, update counts, and wait
72950 * for a grace period to elapse.
72951 */
72952 - ACCESS_ONCE(rdp->nocb_head) = NULL;
72953 + ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
72954 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
72955 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
72956 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
72957 - ACCESS_ONCE(rdp->nocb_p_count) += c;
72958 - ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
72959 + ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
72960 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
72961 wait_rcu_gp(rdp->rsp->call_remote);
72962
72963 /* Each pass through the following loop invokes a callback. */
72964 @@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
72965 list = next;
72966 }
72967 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
72968 - ACCESS_ONCE(rdp->nocb_p_count) -= c;
72969 - ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
72970 + ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
72971 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
72972 rdp->n_nocbs_invoked += c;
72973 }
72974 return 0;
72975 @@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
72976 rdp = per_cpu_ptr(rsp->rda, cpu);
72977 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
72978 BUG_ON(IS_ERR(t));
72979 - ACCESS_ONCE(rdp->nocb_kthread) = t;
72980 + ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
72981 }
72982 }
72983
72984 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
72985 index 0d095dc..1985b19 100644
72986 --- a/kernel/rcutree_trace.c
72987 +++ b/kernel/rcutree_trace.c
72988 @@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
72989 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
72990 rdp->passed_quiesce, rdp->qs_pending);
72991 seq_printf(m, " dt=%d/%llx/%d df=%lu",
72992 - atomic_read(&rdp->dynticks->dynticks),
72993 + atomic_read_unchecked(&rdp->dynticks->dynticks),
72994 rdp->dynticks->dynticks_nesting,
72995 rdp->dynticks->dynticks_nmi_nesting,
72996 rdp->dynticks_fqs);
72997 @@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
72998 struct rcu_state *rsp = (struct rcu_state *)m->private;
72999
73000 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
73001 - atomic_long_read(&rsp->expedited_start),
73002 + atomic_long_read_unchecked(&rsp->expedited_start),
73003 atomic_long_read(&rsp->expedited_done),
73004 - atomic_long_read(&rsp->expedited_wrap),
73005 - atomic_long_read(&rsp->expedited_tryfail),
73006 - atomic_long_read(&rsp->expedited_workdone1),
73007 - atomic_long_read(&rsp->expedited_workdone2),
73008 - atomic_long_read(&rsp->expedited_normal),
73009 - atomic_long_read(&rsp->expedited_stoppedcpus),
73010 - atomic_long_read(&rsp->expedited_done_tries),
73011 - atomic_long_read(&rsp->expedited_done_lost),
73012 - atomic_long_read(&rsp->expedited_done_exit));
73013 + atomic_long_read_unchecked(&rsp->expedited_wrap),
73014 + atomic_long_read_unchecked(&rsp->expedited_tryfail),
73015 + atomic_long_read_unchecked(&rsp->expedited_workdone1),
73016 + atomic_long_read_unchecked(&rsp->expedited_workdone2),
73017 + atomic_long_read_unchecked(&rsp->expedited_normal),
73018 + atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
73019 + atomic_long_read_unchecked(&rsp->expedited_done_tries),
73020 + atomic_long_read_unchecked(&rsp->expedited_done_lost),
73021 + atomic_long_read_unchecked(&rsp->expedited_done_exit));
73022 return 0;
73023 }
73024
73025 diff --git a/kernel/resource.c b/kernel/resource.c
73026 index 73f35d4..4684fc4 100644
73027 --- a/kernel/resource.c
73028 +++ b/kernel/resource.c
73029 @@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
73030
73031 static int __init ioresources_init(void)
73032 {
73033 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73034 +#ifdef CONFIG_GRKERNSEC_PROC_USER
73035 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
73036 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
73037 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73038 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
73039 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
73040 +#endif
73041 +#else
73042 proc_create("ioports", 0, NULL, &proc_ioports_operations);
73043 proc_create("iomem", 0, NULL, &proc_iomem_operations);
73044 +#endif
73045 return 0;
73046 }
73047 __initcall(ioresources_init);
73048 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
73049 index 98ec494..4241d6d 100644
73050 --- a/kernel/rtmutex-tester.c
73051 +++ b/kernel/rtmutex-tester.c
73052 @@ -20,7 +20,7 @@
73053 #define MAX_RT_TEST_MUTEXES 8
73054
73055 static spinlock_t rttest_lock;
73056 -static atomic_t rttest_event;
73057 +static atomic_unchecked_t rttest_event;
73058
73059 struct test_thread_data {
73060 int opcode;
73061 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73062
73063 case RTTEST_LOCKCONT:
73064 td->mutexes[td->opdata] = 1;
73065 - td->event = atomic_add_return(1, &rttest_event);
73066 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73067 return 0;
73068
73069 case RTTEST_RESET:
73070 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73071 return 0;
73072
73073 case RTTEST_RESETEVENT:
73074 - atomic_set(&rttest_event, 0);
73075 + atomic_set_unchecked(&rttest_event, 0);
73076 return 0;
73077
73078 default:
73079 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73080 return ret;
73081
73082 td->mutexes[id] = 1;
73083 - td->event = atomic_add_return(1, &rttest_event);
73084 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73085 rt_mutex_lock(&mutexes[id]);
73086 - td->event = atomic_add_return(1, &rttest_event);
73087 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73088 td->mutexes[id] = 4;
73089 return 0;
73090
73091 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73092 return ret;
73093
73094 td->mutexes[id] = 1;
73095 - td->event = atomic_add_return(1, &rttest_event);
73096 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73097 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
73098 - td->event = atomic_add_return(1, &rttest_event);
73099 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73100 td->mutexes[id] = ret ? 0 : 4;
73101 return ret ? -EINTR : 0;
73102
73103 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73104 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
73105 return ret;
73106
73107 - td->event = atomic_add_return(1, &rttest_event);
73108 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73109 rt_mutex_unlock(&mutexes[id]);
73110 - td->event = atomic_add_return(1, &rttest_event);
73111 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73112 td->mutexes[id] = 0;
73113 return 0;
73114
73115 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73116 break;
73117
73118 td->mutexes[dat] = 2;
73119 - td->event = atomic_add_return(1, &rttest_event);
73120 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73121 break;
73122
73123 default:
73124 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73125 return;
73126
73127 td->mutexes[dat] = 3;
73128 - td->event = atomic_add_return(1, &rttest_event);
73129 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73130 break;
73131
73132 case RTTEST_LOCKNOWAIT:
73133 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73134 return;
73135
73136 td->mutexes[dat] = 1;
73137 - td->event = atomic_add_return(1, &rttest_event);
73138 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73139 return;
73140
73141 default:
73142 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
73143 index 0984a21..939f183 100644
73144 --- a/kernel/sched/auto_group.c
73145 +++ b/kernel/sched/auto_group.c
73146 @@ -11,7 +11,7 @@
73147
73148 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
73149 static struct autogroup autogroup_default;
73150 -static atomic_t autogroup_seq_nr;
73151 +static atomic_unchecked_t autogroup_seq_nr;
73152
73153 void __init autogroup_init(struct task_struct *init_task)
73154 {
73155 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
73156
73157 kref_init(&ag->kref);
73158 init_rwsem(&ag->lock);
73159 - ag->id = atomic_inc_return(&autogroup_seq_nr);
73160 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
73161 ag->tg = tg;
73162 #ifdef CONFIG_RT_GROUP_SCHED
73163 /*
73164 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
73165 index 26058d0..06f15dd 100644
73166 --- a/kernel/sched/core.c
73167 +++ b/kernel/sched/core.c
73168 @@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
73169 /* convert nice value [19,-20] to rlimit style value [1,40] */
73170 int nice_rlim = 20 - nice;
73171
73172 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
73173 +
73174 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
73175 capable(CAP_SYS_NICE));
73176 }
73177 @@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
73178 if (nice > 19)
73179 nice = 19;
73180
73181 - if (increment < 0 && !can_nice(current, nice))
73182 + if (increment < 0 && (!can_nice(current, nice) ||
73183 + gr_handle_chroot_nice()))
73184 return -EPERM;
73185
73186 retval = security_task_setnice(current, nice);
73187 @@ -3818,6 +3821,7 @@ recheck:
73188 unsigned long rlim_rtprio =
73189 task_rlimit(p, RLIMIT_RTPRIO);
73190
73191 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
73192 /* can't set/change the rt policy */
73193 if (policy != p->policy && !rlim_rtprio)
73194 return -EPERM;
73195 @@ -5162,7 +5166,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
73196 * happens before everything else. This has to be lower priority than
73197 * the notifier in the perf_event subsystem, though.
73198 */
73199 -static struct notifier_block __cpuinitdata migration_notifier = {
73200 +static struct notifier_block migration_notifier = {
73201 .notifier_call = migration_call,
73202 .priority = CPU_PRI_MIGRATION,
73203 };
73204 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
73205 index 81fa536..6ccf96a 100644
73206 --- a/kernel/sched/fair.c
73207 +++ b/kernel/sched/fair.c
73208 @@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
73209
73210 static void reset_ptenuma_scan(struct task_struct *p)
73211 {
73212 - ACCESS_ONCE(p->mm->numa_scan_seq)++;
73213 + ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
73214 p->mm->numa_scan_offset = 0;
73215 }
73216
73217 @@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
73218 */
73219 static int select_idle_sibling(struct task_struct *p, int target)
73220 {
73221 - int cpu = smp_processor_id();
73222 - int prev_cpu = task_cpu(p);
73223 struct sched_domain *sd;
73224 struct sched_group *sg;
73225 - int i;
73226 + int i = task_cpu(p);
73227
73228 - /*
73229 - * If the task is going to be woken-up on this cpu and if it is
73230 - * already idle, then it is the right target.
73231 - */
73232 - if (target == cpu && idle_cpu(cpu))
73233 - return cpu;
73234 + if (idle_cpu(target))
73235 + return target;
73236
73237 /*
73238 - * If the task is going to be woken-up on the cpu where it previously
73239 - * ran and if it is currently idle, then it the right target.
73240 + * If the prevous cpu is cache affine and idle, don't be stupid.
73241 */
73242 - if (target == prev_cpu && idle_cpu(prev_cpu))
73243 - return prev_cpu;
73244 + if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
73245 + return i;
73246
73247 /*
73248 * Otherwise, iterate the domains and find an elegible idle cpu.
73249 @@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
73250 goto next;
73251
73252 for_each_cpu(i, sched_group_cpus(sg)) {
73253 - if (!idle_cpu(i))
73254 + if (i == target || !idle_cpu(i))
73255 goto next;
73256 }
73257
73258 @@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
73259 * run_rebalance_domains is triggered when needed from the scheduler tick.
73260 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
73261 */
73262 -static void run_rebalance_domains(struct softirq_action *h)
73263 +static void run_rebalance_domains(void)
73264 {
73265 int this_cpu = smp_processor_id();
73266 struct rq *this_rq = cpu_rq(this_cpu);
73267 diff --git a/kernel/signal.c b/kernel/signal.c
73268 index 3d09cf6..a67d2c6 100644
73269 --- a/kernel/signal.c
73270 +++ b/kernel/signal.c
73271 @@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
73272
73273 int print_fatal_signals __read_mostly;
73274
73275 -static void __user *sig_handler(struct task_struct *t, int sig)
73276 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
73277 {
73278 return t->sighand->action[sig - 1].sa.sa_handler;
73279 }
73280
73281 -static int sig_handler_ignored(void __user *handler, int sig)
73282 +static int sig_handler_ignored(__sighandler_t handler, int sig)
73283 {
73284 /* Is it explicitly or implicitly ignored? */
73285 return handler == SIG_IGN ||
73286 @@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
73287
73288 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
73289 {
73290 - void __user *handler;
73291 + __sighandler_t handler;
73292
73293 handler = sig_handler(t, sig);
73294
73295 @@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
73296 atomic_inc(&user->sigpending);
73297 rcu_read_unlock();
73298
73299 + if (!override_rlimit)
73300 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
73301 +
73302 if (override_rlimit ||
73303 atomic_read(&user->sigpending) <=
73304 task_rlimit(t, RLIMIT_SIGPENDING)) {
73305 @@ -492,7 +495,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
73306
73307 int unhandled_signal(struct task_struct *tsk, int sig)
73308 {
73309 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
73310 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
73311 if (is_global_init(tsk))
73312 return 1;
73313 if (handler != SIG_IGN && handler != SIG_DFL)
73314 @@ -812,6 +815,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
73315 }
73316 }
73317
73318 + /* allow glibc communication via tgkill to other threads in our
73319 + thread group */
73320 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
73321 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
73322 + && gr_handle_signal(t, sig))
73323 + return -EPERM;
73324 +
73325 return security_task_kill(t, info, sig, 0);
73326 }
73327
73328 @@ -1194,7 +1204,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73329 return send_signal(sig, info, p, 1);
73330 }
73331
73332 -static int
73333 +int
73334 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73335 {
73336 return send_signal(sig, info, t, 0);
73337 @@ -1231,6 +1241,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73338 unsigned long int flags;
73339 int ret, blocked, ignored;
73340 struct k_sigaction *action;
73341 + int is_unhandled = 0;
73342
73343 spin_lock_irqsave(&t->sighand->siglock, flags);
73344 action = &t->sighand->action[sig-1];
73345 @@ -1245,9 +1256,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73346 }
73347 if (action->sa.sa_handler == SIG_DFL)
73348 t->signal->flags &= ~SIGNAL_UNKILLABLE;
73349 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
73350 + is_unhandled = 1;
73351 ret = specific_send_sig_info(sig, info, t);
73352 spin_unlock_irqrestore(&t->sighand->siglock, flags);
73353
73354 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
73355 + normal operation */
73356 + if (is_unhandled) {
73357 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
73358 + gr_handle_crash(t, sig);
73359 + }
73360 +
73361 return ret;
73362 }
73363
73364 @@ -1314,8 +1334,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73365 ret = check_kill_permission(sig, info, p);
73366 rcu_read_unlock();
73367
73368 - if (!ret && sig)
73369 + if (!ret && sig) {
73370 ret = do_send_sig_info(sig, info, p, true);
73371 + if (!ret)
73372 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
73373 + }
73374
73375 return ret;
73376 }
73377 @@ -2852,7 +2875,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
73378 int error = -ESRCH;
73379
73380 rcu_read_lock();
73381 - p = find_task_by_vpid(pid);
73382 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73383 + /* allow glibc communication via tgkill to other threads in our
73384 + thread group */
73385 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
73386 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
73387 + p = find_task_by_vpid_unrestricted(pid);
73388 + else
73389 +#endif
73390 + p = find_task_by_vpid(pid);
73391 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
73392 error = check_kill_permission(sig, info, p);
73393 /*
73394 @@ -3135,8 +3166,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
73395 }
73396 seg = get_fs();
73397 set_fs(KERNEL_DS);
73398 - ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
73399 - (stack_t __force __user *) &uoss,
73400 + ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
73401 + (stack_t __force_user *) &uoss,
73402 compat_user_stack_pointer());
73403 set_fs(seg);
73404 if (ret >= 0 && uoss_ptr) {
73405 diff --git a/kernel/smp.c b/kernel/smp.c
73406 index 69f38bd..77bbf12 100644
73407 --- a/kernel/smp.c
73408 +++ b/kernel/smp.c
73409 @@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
73410 return NOTIFY_OK;
73411 }
73412
73413 -static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
73414 +static struct notifier_block hotplug_cfd_notifier = {
73415 .notifier_call = hotplug_cfd,
73416 };
73417
73418 diff --git a/kernel/softirq.c b/kernel/softirq.c
73419 index ed567ba..dc61b61 100644
73420 --- a/kernel/softirq.c
73421 +++ b/kernel/softirq.c
73422 @@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
73423 EXPORT_SYMBOL(irq_stat);
73424 #endif
73425
73426 -static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
73427 +static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
73428
73429 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
73430
73431 -char *softirq_to_name[NR_SOFTIRQS] = {
73432 +const char * const softirq_to_name[NR_SOFTIRQS] = {
73433 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
73434 "TASKLET", "SCHED", "HRTIMER", "RCU"
73435 };
73436 @@ -244,7 +244,7 @@ restart:
73437 kstat_incr_softirqs_this_cpu(vec_nr);
73438
73439 trace_softirq_entry(vec_nr);
73440 - h->action(h);
73441 + h->action();
73442 trace_softirq_exit(vec_nr);
73443 if (unlikely(prev_count != preempt_count())) {
73444 printk(KERN_ERR "huh, entered softirq %u %s %p"
73445 @@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
73446 or_softirq_pending(1UL << nr);
73447 }
73448
73449 -void open_softirq(int nr, void (*action)(struct softirq_action *))
73450 +void __init open_softirq(int nr, void (*action)(void))
73451 {
73452 softirq_vec[nr].action = action;
73453 }
73454 @@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
73455
73456 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
73457
73458 -static void tasklet_action(struct softirq_action *a)
73459 +static void tasklet_action(void)
73460 {
73461 struct tasklet_struct *list;
73462
73463 @@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
73464 }
73465 }
73466
73467 -static void tasklet_hi_action(struct softirq_action *a)
73468 +static void tasklet_hi_action(void)
73469 {
73470 struct tasklet_struct *list;
73471
73472 @@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
73473 return NOTIFY_OK;
73474 }
73475
73476 -static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
73477 +static struct notifier_block remote_softirq_cpu_notifier = {
73478 .notifier_call = remote_softirq_cpu_notify,
73479 };
73480
73481 @@ -835,7 +835,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
73482 return NOTIFY_OK;
73483 }
73484
73485 -static struct notifier_block __cpuinitdata cpu_nfb = {
73486 +static struct notifier_block cpu_nfb = {
73487 .notifier_call = cpu_callback
73488 };
73489
73490 diff --git a/kernel/srcu.c b/kernel/srcu.c
73491 index 2b85982..d52ab26 100644
73492 --- a/kernel/srcu.c
73493 +++ b/kernel/srcu.c
73494 @@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
73495 preempt_disable();
73496 idx = rcu_dereference_index_check(sp->completed,
73497 rcu_read_lock_sched_held()) & 0x1;
73498 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
73499 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
73500 smp_mb(); /* B */ /* Avoid leaking the critical section. */
73501 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
73502 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
73503 preempt_enable();
73504 return idx;
73505 }
73506 @@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
73507 {
73508 preempt_disable();
73509 smp_mb(); /* C */ /* Avoid leaking the critical section. */
73510 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
73511 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
73512 preempt_enable();
73513 }
73514 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
73515 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
73516 index 2f194e9..2c05ea9 100644
73517 --- a/kernel/stop_machine.c
73518 +++ b/kernel/stop_machine.c
73519 @@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
73520 * cpu notifiers. It currently shares the same priority as sched
73521 * migration_notifier.
73522 */
73523 -static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
73524 +static struct notifier_block cpu_stop_cpu_notifier = {
73525 .notifier_call = cpu_stop_cpu_callback,
73526 .priority = 10,
73527 };
73528 diff --git a/kernel/sys.c b/kernel/sys.c
73529 index 265b376..4e42ef5 100644
73530 --- a/kernel/sys.c
73531 +++ b/kernel/sys.c
73532 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
73533 error = -EACCES;
73534 goto out;
73535 }
73536 +
73537 + if (gr_handle_chroot_setpriority(p, niceval)) {
73538 + error = -EACCES;
73539 + goto out;
73540 + }
73541 +
73542 no_nice = security_task_setnice(p, niceval);
73543 if (no_nice) {
73544 error = no_nice;
73545 @@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
73546 goto error;
73547 }
73548
73549 + if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
73550 + goto error;
73551 +
73552 if (rgid != (gid_t) -1 ||
73553 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
73554 new->sgid = new->egid;
73555 @@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
73556 old = current_cred();
73557
73558 retval = -EPERM;
73559 +
73560 + if (gr_check_group_change(kgid, kgid, kgid))
73561 + goto error;
73562 +
73563 if (nsown_capable(CAP_SETGID))
73564 new->gid = new->egid = new->sgid = new->fsgid = kgid;
73565 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
73566 @@ -647,7 +660,7 @@ error:
73567 /*
73568 * change the user struct in a credentials set to match the new UID
73569 */
73570 -static int set_user(struct cred *new)
73571 +int set_user(struct cred *new)
73572 {
73573 struct user_struct *new_user;
73574
73575 @@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
73576 goto error;
73577 }
73578
73579 + if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
73580 + goto error;
73581 +
73582 if (!uid_eq(new->uid, old->uid)) {
73583 retval = set_user(new);
73584 if (retval < 0)
73585 @@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
73586 old = current_cred();
73587
73588 retval = -EPERM;
73589 +
73590 + if (gr_check_crash_uid(kuid))
73591 + goto error;
73592 + if (gr_check_user_change(kuid, kuid, kuid))
73593 + goto error;
73594 +
73595 if (nsown_capable(CAP_SETUID)) {
73596 new->suid = new->uid = kuid;
73597 if (!uid_eq(kuid, old->uid)) {
73598 @@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
73599 goto error;
73600 }
73601
73602 + if (gr_check_user_change(kruid, keuid, INVALID_UID))
73603 + goto error;
73604 +
73605 if (ruid != (uid_t) -1) {
73606 new->uid = kruid;
73607 if (!uid_eq(kruid, old->uid)) {
73608 @@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
73609 goto error;
73610 }
73611
73612 + if (gr_check_group_change(krgid, kegid, INVALID_GID))
73613 + goto error;
73614 +
73615 if (rgid != (gid_t) -1)
73616 new->gid = krgid;
73617 if (egid != (gid_t) -1)
73618 @@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
73619 if (!uid_valid(kuid))
73620 return old_fsuid;
73621
73622 + if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
73623 + goto error;
73624 +
73625 new = prepare_creds();
73626 if (!new)
73627 return old_fsuid;
73628 @@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
73629 }
73630 }
73631
73632 +error:
73633 abort_creds(new);
73634 return old_fsuid;
73635
73636 @@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
73637 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
73638 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
73639 nsown_capable(CAP_SETGID)) {
73640 + if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
73641 + goto error;
73642 +
73643 if (!gid_eq(kgid, old->fsgid)) {
73644 new->fsgid = kgid;
73645 goto change_okay;
73646 }
73647 }
73648
73649 +error:
73650 abort_creds(new);
73651 return old_fsgid;
73652
73653 @@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
73654 return -EFAULT;
73655
73656 down_read(&uts_sem);
73657 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
73658 + error = __copy_to_user(name->sysname, &utsname()->sysname,
73659 __OLD_UTS_LEN);
73660 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
73661 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
73662 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
73663 __OLD_UTS_LEN);
73664 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
73665 - error |= __copy_to_user(&name->release, &utsname()->release,
73666 + error |= __copy_to_user(name->release, &utsname()->release,
73667 __OLD_UTS_LEN);
73668 error |= __put_user(0, name->release + __OLD_UTS_LEN);
73669 - error |= __copy_to_user(&name->version, &utsname()->version,
73670 + error |= __copy_to_user(name->version, &utsname()->version,
73671 __OLD_UTS_LEN);
73672 error |= __put_user(0, name->version + __OLD_UTS_LEN);
73673 - error |= __copy_to_user(&name->machine, &utsname()->machine,
73674 + error |= __copy_to_user(name->machine, &utsname()->machine,
73675 __OLD_UTS_LEN);
73676 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
73677 up_read(&uts_sem);
73678 @@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
73679 error = get_dumpable(me->mm);
73680 break;
73681 case PR_SET_DUMPABLE:
73682 - if (arg2 < 0 || arg2 > 1) {
73683 + if (arg2 > 1) {
73684 error = -EINVAL;
73685 break;
73686 }
73687 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
73688 index c88878d..99d321b 100644
73689 --- a/kernel/sysctl.c
73690 +++ b/kernel/sysctl.c
73691 @@ -92,7 +92,6 @@
73692
73693
73694 #if defined(CONFIG_SYSCTL)
73695 -
73696 /* External variables not in a header file. */
73697 extern int sysctl_overcommit_memory;
73698 extern int sysctl_overcommit_ratio;
73699 @@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
73700 void __user *buffer, size_t *lenp, loff_t *ppos);
73701 #endif
73702
73703 -#ifdef CONFIG_PRINTK
73704 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
73705 void __user *buffer, size_t *lenp, loff_t *ppos);
73706 -#endif
73707
73708 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
73709 void __user *buffer, size_t *lenp, loff_t *ppos);
73710 @@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
73711
73712 #endif
73713
73714 +extern struct ctl_table grsecurity_table[];
73715 +
73716 static struct ctl_table kern_table[];
73717 static struct ctl_table vm_table[];
73718 static struct ctl_table fs_table[];
73719 @@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
73720 int sysctl_legacy_va_layout;
73721 #endif
73722
73723 +#ifdef CONFIG_PAX_SOFTMODE
73724 +static ctl_table pax_table[] = {
73725 + {
73726 + .procname = "softmode",
73727 + .data = &pax_softmode,
73728 + .maxlen = sizeof(unsigned int),
73729 + .mode = 0600,
73730 + .proc_handler = &proc_dointvec,
73731 + },
73732 +
73733 + { }
73734 +};
73735 +#endif
73736 +
73737 /* The default sysctl tables: */
73738
73739 static struct ctl_table sysctl_base_table[] = {
73740 @@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
73741 #endif
73742
73743 static struct ctl_table kern_table[] = {
73744 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
73745 + {
73746 + .procname = "grsecurity",
73747 + .mode = 0500,
73748 + .child = grsecurity_table,
73749 + },
73750 +#endif
73751 +
73752 +#ifdef CONFIG_PAX_SOFTMODE
73753 + {
73754 + .procname = "pax",
73755 + .mode = 0500,
73756 + .child = pax_table,
73757 + },
73758 +#endif
73759 +
73760 {
73761 .procname = "sched_child_runs_first",
73762 .data = &sysctl_sched_child_runs_first,
73763 @@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
73764 .data = &modprobe_path,
73765 .maxlen = KMOD_PATH_LEN,
73766 .mode = 0644,
73767 - .proc_handler = proc_dostring,
73768 + .proc_handler = proc_dostring_modpriv,
73769 },
73770 {
73771 .procname = "modules_disabled",
73772 @@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
73773 .extra1 = &zero,
73774 .extra2 = &one,
73775 },
73776 +#endif
73777 {
73778 .procname = "kptr_restrict",
73779 .data = &kptr_restrict,
73780 .maxlen = sizeof(int),
73781 .mode = 0644,
73782 .proc_handler = proc_dointvec_minmax_sysadmin,
73783 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73784 + .extra1 = &two,
73785 +#else
73786 .extra1 = &zero,
73787 +#endif
73788 .extra2 = &two,
73789 },
73790 -#endif
73791 {
73792 .procname = "ngroups_max",
73793 .data = &ngroups_max,
73794 @@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
73795 .proc_handler = proc_dointvec_minmax,
73796 .extra1 = &zero,
73797 },
73798 + {
73799 + .procname = "heap_stack_gap",
73800 + .data = &sysctl_heap_stack_gap,
73801 + .maxlen = sizeof(sysctl_heap_stack_gap),
73802 + .mode = 0644,
73803 + .proc_handler = proc_doulongvec_minmax,
73804 + },
73805 #else
73806 {
73807 .procname = "nr_trim_pages",
73808 @@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
73809 buffer, lenp, ppos);
73810 }
73811
73812 +int proc_dostring_modpriv(struct ctl_table *table, int write,
73813 + void __user *buffer, size_t *lenp, loff_t *ppos)
73814 +{
73815 + if (write && !capable(CAP_SYS_MODULE))
73816 + return -EPERM;
73817 +
73818 + return _proc_do_string(table->data, table->maxlen, write,
73819 + buffer, lenp, ppos);
73820 +}
73821 +
73822 static size_t proc_skip_spaces(char **buf)
73823 {
73824 size_t ret;
73825 @@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
73826 len = strlen(tmp);
73827 if (len > *size)
73828 len = *size;
73829 + if (len > sizeof(tmp))
73830 + len = sizeof(tmp);
73831 if (copy_to_user(*buf, tmp, len))
73832 return -EFAULT;
73833 *size -= len;
73834 @@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
73835 return err;
73836 }
73837
73838 -#ifdef CONFIG_PRINTK
73839 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
73840 void __user *buffer, size_t *lenp, loff_t *ppos)
73841 {
73842 @@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
73843
73844 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
73845 }
73846 -#endif
73847
73848 struct do_proc_dointvec_minmax_conv_param {
73849 int *min;
73850 @@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
73851 *i = val;
73852 } else {
73853 val = convdiv * (*i) / convmul;
73854 - if (!first)
73855 + if (!first) {
73856 err = proc_put_char(&buffer, &left, '\t');
73857 + if (err)
73858 + break;
73859 + }
73860 err = proc_put_long(&buffer, &left, val, false);
73861 if (err)
73862 break;
73863 @@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
73864 return -ENOSYS;
73865 }
73866
73867 +int proc_dostring_modpriv(struct ctl_table *table, int write,
73868 + void __user *buffer, size_t *lenp, loff_t *ppos)
73869 +{
73870 + return -ENOSYS;
73871 +}
73872 +
73873 int proc_dointvec(struct ctl_table *table, int write,
73874 void __user *buffer, size_t *lenp, loff_t *ppos)
73875 {
73876 @@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
73877 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
73878 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
73879 EXPORT_SYMBOL(proc_dostring);
73880 +EXPORT_SYMBOL(proc_dostring_modpriv);
73881 EXPORT_SYMBOL(proc_doulongvec_minmax);
73882 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
73883 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
73884 index 0ddf3a0..a199f50 100644
73885 --- a/kernel/sysctl_binary.c
73886 +++ b/kernel/sysctl_binary.c
73887 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
73888 int i;
73889
73890 set_fs(KERNEL_DS);
73891 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
73892 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
73893 set_fs(old_fs);
73894 if (result < 0)
73895 goto out_kfree;
73896 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
73897 }
73898
73899 set_fs(KERNEL_DS);
73900 - result = vfs_write(file, buffer, str - buffer, &pos);
73901 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
73902 set_fs(old_fs);
73903 if (result < 0)
73904 goto out_kfree;
73905 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
73906 int i;
73907
73908 set_fs(KERNEL_DS);
73909 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
73910 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
73911 set_fs(old_fs);
73912 if (result < 0)
73913 goto out_kfree;
73914 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
73915 }
73916
73917 set_fs(KERNEL_DS);
73918 - result = vfs_write(file, buffer, str - buffer, &pos);
73919 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
73920 set_fs(old_fs);
73921 if (result < 0)
73922 goto out_kfree;
73923 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
73924 int i;
73925
73926 set_fs(KERNEL_DS);
73927 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
73928 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
73929 set_fs(old_fs);
73930 if (result < 0)
73931 goto out;
73932 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
73933 __le16 dnaddr;
73934
73935 set_fs(KERNEL_DS);
73936 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
73937 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
73938 set_fs(old_fs);
73939 if (result < 0)
73940 goto out;
73941 @@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
73942 le16_to_cpu(dnaddr) & 0x3ff);
73943
73944 set_fs(KERNEL_DS);
73945 - result = vfs_write(file, buf, len, &pos);
73946 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
73947 set_fs(old_fs);
73948 if (result < 0)
73949 goto out;
73950 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
73951 index 145bb4d..b2aa969 100644
73952 --- a/kernel/taskstats.c
73953 +++ b/kernel/taskstats.c
73954 @@ -28,9 +28,12 @@
73955 #include <linux/fs.h>
73956 #include <linux/file.h>
73957 #include <linux/pid_namespace.h>
73958 +#include <linux/grsecurity.h>
73959 #include <net/genetlink.h>
73960 #include <linux/atomic.h>
73961
73962 +extern int gr_is_taskstats_denied(int pid);
73963 +
73964 /*
73965 * Maximum length of a cpumask that can be specified in
73966 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
73967 @@ -570,6 +573,9 @@ err:
73968
73969 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
73970 {
73971 + if (gr_is_taskstats_denied(current->pid))
73972 + return -EACCES;
73973 +
73974 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
73975 return cmd_attr_register_cpumask(info);
73976 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
73977 diff --git a/kernel/time.c b/kernel/time.c
73978 index d226c6a..c7c0960 100644
73979 --- a/kernel/time.c
73980 +++ b/kernel/time.c
73981 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
73982 return error;
73983
73984 if (tz) {
73985 + /* we log in do_settimeofday called below, so don't log twice
73986 + */
73987 + if (!tv)
73988 + gr_log_timechange();
73989 +
73990 sys_tz = *tz;
73991 update_vsyscall_tz();
73992 if (firsttime) {
73993 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
73994 index f11d83b..d016d91 100644
73995 --- a/kernel/time/alarmtimer.c
73996 +++ b/kernel/time/alarmtimer.c
73997 @@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
73998 struct platform_device *pdev;
73999 int error = 0;
74000 int i;
74001 - struct k_clock alarm_clock = {
74002 + static struct k_clock alarm_clock = {
74003 .clock_getres = alarm_clock_getres,
74004 .clock_get = alarm_clock_get,
74005 .timer_create = alarm_timer_create,
74006 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
74007 index f113755..ec24223 100644
74008 --- a/kernel/time/tick-broadcast.c
74009 +++ b/kernel/time/tick-broadcast.c
74010 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
74011 * then clear the broadcast bit.
74012 */
74013 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
74014 - int cpu = smp_processor_id();
74015 + cpu = smp_processor_id();
74016
74017 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
74018 tick_broadcast_clear_oneshot(cpu);
74019 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
74020 index cbc6acb..3a77191 100644
74021 --- a/kernel/time/timekeeping.c
74022 +++ b/kernel/time/timekeeping.c
74023 @@ -15,6 +15,7 @@
74024 #include <linux/init.h>
74025 #include <linux/mm.h>
74026 #include <linux/sched.h>
74027 +#include <linux/grsecurity.h>
74028 #include <linux/syscore_ops.h>
74029 #include <linux/clocksource.h>
74030 #include <linux/jiffies.h>
74031 @@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
74032 if (!timespec_valid_strict(tv))
74033 return -EINVAL;
74034
74035 + gr_log_timechange();
74036 +
74037 write_seqlock_irqsave(&tk->lock, flags);
74038
74039 timekeeping_forward_now(tk);
74040 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
74041 index af5a7e9..715611a 100644
74042 --- a/kernel/time/timer_list.c
74043 +++ b/kernel/time/timer_list.c
74044 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
74045
74046 static void print_name_offset(struct seq_file *m, void *sym)
74047 {
74048 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74049 + SEQ_printf(m, "<%p>", NULL);
74050 +#else
74051 char symname[KSYM_NAME_LEN];
74052
74053 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
74054 SEQ_printf(m, "<%pK>", sym);
74055 else
74056 SEQ_printf(m, "%s", symname);
74057 +#endif
74058 }
74059
74060 static void
74061 @@ -112,7 +116,11 @@ next_one:
74062 static void
74063 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
74064 {
74065 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74066 + SEQ_printf(m, " .base: %p\n", NULL);
74067 +#else
74068 SEQ_printf(m, " .base: %pK\n", base);
74069 +#endif
74070 SEQ_printf(m, " .index: %d\n",
74071 base->index);
74072 SEQ_printf(m, " .resolution: %Lu nsecs\n",
74073 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
74074 {
74075 struct proc_dir_entry *pe;
74076
74077 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74078 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
74079 +#else
74080 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
74081 +#endif
74082 if (!pe)
74083 return -ENOMEM;
74084 return 0;
74085 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
74086 index 0b537f2..40d6c20 100644
74087 --- a/kernel/time/timer_stats.c
74088 +++ b/kernel/time/timer_stats.c
74089 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
74090 static unsigned long nr_entries;
74091 static struct entry entries[MAX_ENTRIES];
74092
74093 -static atomic_t overflow_count;
74094 +static atomic_unchecked_t overflow_count;
74095
74096 /*
74097 * The entries are in a hash-table, for fast lookup:
74098 @@ -140,7 +140,7 @@ static void reset_entries(void)
74099 nr_entries = 0;
74100 memset(entries, 0, sizeof(entries));
74101 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
74102 - atomic_set(&overflow_count, 0);
74103 + atomic_set_unchecked(&overflow_count, 0);
74104 }
74105
74106 static struct entry *alloc_entry(void)
74107 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74108 if (likely(entry))
74109 entry->count++;
74110 else
74111 - atomic_inc(&overflow_count);
74112 + atomic_inc_unchecked(&overflow_count);
74113
74114 out_unlock:
74115 raw_spin_unlock_irqrestore(lock, flags);
74116 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74117
74118 static void print_name_offset(struct seq_file *m, unsigned long addr)
74119 {
74120 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74121 + seq_printf(m, "<%p>", NULL);
74122 +#else
74123 char symname[KSYM_NAME_LEN];
74124
74125 if (lookup_symbol_name(addr, symname) < 0)
74126 - seq_printf(m, "<%p>", (void *)addr);
74127 + seq_printf(m, "<%pK>", (void *)addr);
74128 else
74129 seq_printf(m, "%s", symname);
74130 +#endif
74131 }
74132
74133 static int tstats_show(struct seq_file *m, void *v)
74134 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
74135
74136 seq_puts(m, "Timer Stats Version: v0.2\n");
74137 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
74138 - if (atomic_read(&overflow_count))
74139 + if (atomic_read_unchecked(&overflow_count))
74140 seq_printf(m, "Overflow: %d entries\n",
74141 - atomic_read(&overflow_count));
74142 + atomic_read_unchecked(&overflow_count));
74143
74144 for (i = 0; i < nr_entries; i++) {
74145 entry = entries + i;
74146 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
74147 {
74148 struct proc_dir_entry *pe;
74149
74150 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74151 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
74152 +#else
74153 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
74154 +#endif
74155 if (!pe)
74156 return -ENOMEM;
74157 return 0;
74158 diff --git a/kernel/timer.c b/kernel/timer.c
74159 index 367d008..1ee9ed9 100644
74160 --- a/kernel/timer.c
74161 +++ b/kernel/timer.c
74162 @@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
74163 /*
74164 * This function runs timers and the timer-tq in bottom half context.
74165 */
74166 -static void run_timer_softirq(struct softirq_action *h)
74167 +static void run_timer_softirq(void)
74168 {
74169 struct tvec_base *base = __this_cpu_read(tvec_bases);
74170
74171 @@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
74172 return NOTIFY_OK;
74173 }
74174
74175 -static struct notifier_block __cpuinitdata timers_nb = {
74176 +static struct notifier_block timers_nb = {
74177 .notifier_call = timer_cpu_notify,
74178 };
74179
74180 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
74181 index c0bd030..62a1927 100644
74182 --- a/kernel/trace/blktrace.c
74183 +++ b/kernel/trace/blktrace.c
74184 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
74185 struct blk_trace *bt = filp->private_data;
74186 char buf[16];
74187
74188 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
74189 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
74190
74191 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
74192 }
74193 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
74194 return 1;
74195
74196 bt = buf->chan->private_data;
74197 - atomic_inc(&bt->dropped);
74198 + atomic_inc_unchecked(&bt->dropped);
74199 return 0;
74200 }
74201
74202 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
74203
74204 bt->dir = dir;
74205 bt->dev = dev;
74206 - atomic_set(&bt->dropped, 0);
74207 + atomic_set_unchecked(&bt->dropped, 0);
74208
74209 ret = -EIO;
74210 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
74211 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
74212 index 43defd1..76da436 100644
74213 --- a/kernel/trace/ftrace.c
74214 +++ b/kernel/trace/ftrace.c
74215 @@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
74216 if (unlikely(ftrace_disabled))
74217 return 0;
74218
74219 + ret = ftrace_arch_code_modify_prepare();
74220 + FTRACE_WARN_ON(ret);
74221 + if (ret)
74222 + return 0;
74223 +
74224 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
74225 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
74226 if (ret) {
74227 ftrace_bug(ret, ip);
74228 - return 0;
74229 }
74230 - return 1;
74231 + return ret ? 0 : 1;
74232 }
74233
74234 /*
74235 @@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
74236
74237 int
74238 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
74239 - void *data)
74240 + void *data)
74241 {
74242 struct ftrace_func_probe *entry;
74243 struct ftrace_page *pg;
74244 @@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
74245 if (!count)
74246 return 0;
74247
74248 + pax_open_kernel();
74249 sort(start, count, sizeof(*start),
74250 ftrace_cmp_ips, ftrace_swap_ips);
74251 + pax_close_kernel();
74252
74253 start_pg = ftrace_allocate_pages(count);
74254 if (!start_pg)
74255 @@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
74256 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
74257
74258 static int ftrace_graph_active;
74259 -static struct notifier_block ftrace_suspend_notifier;
74260 -
74261 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
74262 {
74263 return 0;
74264 @@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
74265 return NOTIFY_DONE;
74266 }
74267
74268 +static struct notifier_block ftrace_suspend_notifier = {
74269 + .notifier_call = ftrace_suspend_notifier_call
74270 +};
74271 +
74272 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
74273 trace_func_graph_ent_t entryfunc)
74274 {
74275 @@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
74276 goto out;
74277 }
74278
74279 - ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
74280 register_pm_notifier(&ftrace_suspend_notifier);
74281
74282 ftrace_graph_active++;
74283 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
74284 index ce8514f..8233573 100644
74285 --- a/kernel/trace/ring_buffer.c
74286 +++ b/kernel/trace/ring_buffer.c
74287 @@ -346,9 +346,9 @@ struct buffer_data_page {
74288 */
74289 struct buffer_page {
74290 struct list_head list; /* list of buffer pages */
74291 - local_t write; /* index for next write */
74292 + local_unchecked_t write; /* index for next write */
74293 unsigned read; /* index for next read */
74294 - local_t entries; /* entries on this page */
74295 + local_unchecked_t entries; /* entries on this page */
74296 unsigned long real_end; /* real end of data */
74297 struct buffer_data_page *page; /* Actual data page */
74298 };
74299 @@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
74300 unsigned long last_overrun;
74301 local_t entries_bytes;
74302 local_t entries;
74303 - local_t overrun;
74304 - local_t commit_overrun;
74305 + local_unchecked_t overrun;
74306 + local_unchecked_t commit_overrun;
74307 local_t dropped_events;
74308 local_t committing;
74309 local_t commits;
74310 @@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
74311 *
74312 * We add a counter to the write field to denote this.
74313 */
74314 - old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
74315 - old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
74316 + old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
74317 + old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
74318
74319 /*
74320 * Just make sure we have seen our old_write and synchronize
74321 @@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
74322 * cmpxchg to only update if an interrupt did not already
74323 * do it for us. If the cmpxchg fails, we don't care.
74324 */
74325 - (void)local_cmpxchg(&next_page->write, old_write, val);
74326 - (void)local_cmpxchg(&next_page->entries, old_entries, eval);
74327 + (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
74328 + (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
74329
74330 /*
74331 * No need to worry about races with clearing out the commit.
74332 @@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
74333
74334 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
74335 {
74336 - return local_read(&bpage->entries) & RB_WRITE_MASK;
74337 + return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
74338 }
74339
74340 static inline unsigned long rb_page_write(struct buffer_page *bpage)
74341 {
74342 - return local_read(&bpage->write) & RB_WRITE_MASK;
74343 + return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
74344 }
74345
74346 static int
74347 @@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
74348 * bytes consumed in ring buffer from here.
74349 * Increment overrun to account for the lost events.
74350 */
74351 - local_add(page_entries, &cpu_buffer->overrun);
74352 + local_add_unchecked(page_entries, &cpu_buffer->overrun);
74353 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
74354 }
74355
74356 @@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
74357 * it is our responsibility to update
74358 * the counters.
74359 */
74360 - local_add(entries, &cpu_buffer->overrun);
74361 + local_add_unchecked(entries, &cpu_buffer->overrun);
74362 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
74363
74364 /*
74365 @@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
74366 if (tail == BUF_PAGE_SIZE)
74367 tail_page->real_end = 0;
74368
74369 - local_sub(length, &tail_page->write);
74370 + local_sub_unchecked(length, &tail_page->write);
74371 return;
74372 }
74373
74374 @@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
74375 rb_event_set_padding(event);
74376
74377 /* Set the write back to the previous setting */
74378 - local_sub(length, &tail_page->write);
74379 + local_sub_unchecked(length, &tail_page->write);
74380 return;
74381 }
74382
74383 @@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
74384
74385 /* Set write to end of buffer */
74386 length = (tail + length) - BUF_PAGE_SIZE;
74387 - local_sub(length, &tail_page->write);
74388 + local_sub_unchecked(length, &tail_page->write);
74389 }
74390
74391 /*
74392 @@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
74393 * about it.
74394 */
74395 if (unlikely(next_page == commit_page)) {
74396 - local_inc(&cpu_buffer->commit_overrun);
74397 + local_inc_unchecked(&cpu_buffer->commit_overrun);
74398 goto out_reset;
74399 }
74400
74401 @@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
74402 cpu_buffer->tail_page) &&
74403 (cpu_buffer->commit_page ==
74404 cpu_buffer->reader_page))) {
74405 - local_inc(&cpu_buffer->commit_overrun);
74406 + local_inc_unchecked(&cpu_buffer->commit_overrun);
74407 goto out_reset;
74408 }
74409 }
74410 @@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
74411 length += RB_LEN_TIME_EXTEND;
74412
74413 tail_page = cpu_buffer->tail_page;
74414 - write = local_add_return(length, &tail_page->write);
74415 + write = local_add_return_unchecked(length, &tail_page->write);
74416
74417 /* set write to only the index of the write */
74418 write &= RB_WRITE_MASK;
74419 @@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
74420 kmemcheck_annotate_bitfield(event, bitfield);
74421 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
74422
74423 - local_inc(&tail_page->entries);
74424 + local_inc_unchecked(&tail_page->entries);
74425
74426 /*
74427 * If this is the first commit on the page, then update
74428 @@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
74429
74430 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
74431 unsigned long write_mask =
74432 - local_read(&bpage->write) & ~RB_WRITE_MASK;
74433 + local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
74434 unsigned long event_length = rb_event_length(event);
74435 /*
74436 * This is on the tail page. It is possible that
74437 @@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
74438 */
74439 old_index += write_mask;
74440 new_index += write_mask;
74441 - index = local_cmpxchg(&bpage->write, old_index, new_index);
74442 + index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
74443 if (index == old_index) {
74444 /* update counters */
74445 local_sub(event_length, &cpu_buffer->entries_bytes);
74446 @@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
74447
74448 /* Do the likely case first */
74449 if (likely(bpage->page == (void *)addr)) {
74450 - local_dec(&bpage->entries);
74451 + local_dec_unchecked(&bpage->entries);
74452 return;
74453 }
74454
74455 @@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
74456 start = bpage;
74457 do {
74458 if (bpage->page == (void *)addr) {
74459 - local_dec(&bpage->entries);
74460 + local_dec_unchecked(&bpage->entries);
74461 return;
74462 }
74463 rb_inc_page(cpu_buffer, &bpage);
74464 @@ -2926,7 +2926,7 @@ static inline unsigned long
74465 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
74466 {
74467 return local_read(&cpu_buffer->entries) -
74468 - (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
74469 + (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
74470 }
74471
74472 /**
74473 @@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
74474 return 0;
74475
74476 cpu_buffer = buffer->buffers[cpu];
74477 - ret = local_read(&cpu_buffer->overrun);
74478 + ret = local_read_unchecked(&cpu_buffer->overrun);
74479
74480 return ret;
74481 }
74482 @@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
74483 return 0;
74484
74485 cpu_buffer = buffer->buffers[cpu];
74486 - ret = local_read(&cpu_buffer->commit_overrun);
74487 + ret = local_read_unchecked(&cpu_buffer->commit_overrun);
74488
74489 return ret;
74490 }
74491 @@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
74492 /* if you care about this being correct, lock the buffer */
74493 for_each_buffer_cpu(buffer, cpu) {
74494 cpu_buffer = buffer->buffers[cpu];
74495 - overruns += local_read(&cpu_buffer->overrun);
74496 + overruns += local_read_unchecked(&cpu_buffer->overrun);
74497 }
74498
74499 return overruns;
74500 @@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
74501 /*
74502 * Reset the reader page to size zero.
74503 */
74504 - local_set(&cpu_buffer->reader_page->write, 0);
74505 - local_set(&cpu_buffer->reader_page->entries, 0);
74506 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
74507 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
74508 local_set(&cpu_buffer->reader_page->page->commit, 0);
74509 cpu_buffer->reader_page->real_end = 0;
74510
74511 @@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
74512 * want to compare with the last_overrun.
74513 */
74514 smp_mb();
74515 - overwrite = local_read(&(cpu_buffer->overrun));
74516 + overwrite = local_read_unchecked(&(cpu_buffer->overrun));
74517
74518 /*
74519 * Here's the tricky part.
74520 @@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
74521
74522 cpu_buffer->head_page
74523 = list_entry(cpu_buffer->pages, struct buffer_page, list);
74524 - local_set(&cpu_buffer->head_page->write, 0);
74525 - local_set(&cpu_buffer->head_page->entries, 0);
74526 + local_set_unchecked(&cpu_buffer->head_page->write, 0);
74527 + local_set_unchecked(&cpu_buffer->head_page->entries, 0);
74528 local_set(&cpu_buffer->head_page->page->commit, 0);
74529
74530 cpu_buffer->head_page->read = 0;
74531 @@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
74532
74533 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
74534 INIT_LIST_HEAD(&cpu_buffer->new_pages);
74535 - local_set(&cpu_buffer->reader_page->write, 0);
74536 - local_set(&cpu_buffer->reader_page->entries, 0);
74537 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
74538 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
74539 local_set(&cpu_buffer->reader_page->page->commit, 0);
74540 cpu_buffer->reader_page->read = 0;
74541
74542 local_set(&cpu_buffer->entries_bytes, 0);
74543 - local_set(&cpu_buffer->overrun, 0);
74544 - local_set(&cpu_buffer->commit_overrun, 0);
74545 + local_set_unchecked(&cpu_buffer->overrun, 0);
74546 + local_set_unchecked(&cpu_buffer->commit_overrun, 0);
74547 local_set(&cpu_buffer->dropped_events, 0);
74548 local_set(&cpu_buffer->entries, 0);
74549 local_set(&cpu_buffer->committing, 0);
74550 @@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
74551 rb_init_page(bpage);
74552 bpage = reader->page;
74553 reader->page = *data_page;
74554 - local_set(&reader->write, 0);
74555 - local_set(&reader->entries, 0);
74556 + local_set_unchecked(&reader->write, 0);
74557 + local_set_unchecked(&reader->entries, 0);
74558 reader->read = 0;
74559 *data_page = bpage;
74560
74561 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
74562 index 3c13e46..883d039 100644
74563 --- a/kernel/trace/trace.c
74564 +++ b/kernel/trace/trace.c
74565 @@ -4465,10 +4465,9 @@ static const struct file_operations tracing_dyn_info_fops = {
74566 };
74567 #endif
74568
74569 -static struct dentry *d_tracer;
74570 -
74571 struct dentry *tracing_init_dentry(void)
74572 {
74573 + static struct dentry *d_tracer;
74574 static int once;
74575
74576 if (d_tracer)
74577 @@ -4488,10 +4487,9 @@ struct dentry *tracing_init_dentry(void)
74578 return d_tracer;
74579 }
74580
74581 -static struct dentry *d_percpu;
74582 -
74583 struct dentry *tracing_dentry_percpu(void)
74584 {
74585 + static struct dentry *d_percpu;
74586 static int once;
74587 struct dentry *d_tracer;
74588
74589 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
74590 index 880073d..42db7c3 100644
74591 --- a/kernel/trace/trace_events.c
74592 +++ b/kernel/trace/trace_events.c
74593 @@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
74594 struct ftrace_module_file_ops {
74595 struct list_head list;
74596 struct module *mod;
74597 - struct file_operations id;
74598 - struct file_operations enable;
74599 - struct file_operations format;
74600 - struct file_operations filter;
74601 };
74602
74603 static struct ftrace_module_file_ops *
74604 @@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
74605
74606 file_ops->mod = mod;
74607
74608 - file_ops->id = ftrace_event_id_fops;
74609 - file_ops->id.owner = mod;
74610 -
74611 - file_ops->enable = ftrace_enable_fops;
74612 - file_ops->enable.owner = mod;
74613 -
74614 - file_ops->filter = ftrace_event_filter_fops;
74615 - file_ops->filter.owner = mod;
74616 -
74617 - file_ops->format = ftrace_event_format_fops;
74618 - file_ops->format.owner = mod;
74619 + pax_open_kernel();
74620 + mod->trace_id.owner = mod;
74621 + mod->trace_enable.owner = mod;
74622 + mod->trace_filter.owner = mod;
74623 + mod->trace_format.owner = mod;
74624 + pax_close_kernel();
74625
74626 list_add(&file_ops->list, &ftrace_module_file_list);
74627
74628 @@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
74629
74630 for_each_event(call, start, end) {
74631 __trace_add_event_call(*call, mod,
74632 - &file_ops->id, &file_ops->enable,
74633 - &file_ops->filter, &file_ops->format);
74634 + &mod->trace_id, &mod->trace_enable,
74635 + &mod->trace_filter, &mod->trace_format);
74636 }
74637 }
74638
74639 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
74640 index fd3c8aa..5f324a6 100644
74641 --- a/kernel/trace/trace_mmiotrace.c
74642 +++ b/kernel/trace/trace_mmiotrace.c
74643 @@ -24,7 +24,7 @@ struct header_iter {
74644 static struct trace_array *mmio_trace_array;
74645 static bool overrun_detected;
74646 static unsigned long prev_overruns;
74647 -static atomic_t dropped_count;
74648 +static atomic_unchecked_t dropped_count;
74649
74650 static void mmio_reset_data(struct trace_array *tr)
74651 {
74652 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
74653
74654 static unsigned long count_overruns(struct trace_iterator *iter)
74655 {
74656 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
74657 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
74658 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
74659
74660 if (over > prev_overruns)
74661 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
74662 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
74663 sizeof(*entry), 0, pc);
74664 if (!event) {
74665 - atomic_inc(&dropped_count);
74666 + atomic_inc_unchecked(&dropped_count);
74667 return;
74668 }
74669 entry = ring_buffer_event_data(event);
74670 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
74671 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
74672 sizeof(*entry), 0, pc);
74673 if (!event) {
74674 - atomic_inc(&dropped_count);
74675 + atomic_inc_unchecked(&dropped_count);
74676 return;
74677 }
74678 entry = ring_buffer_event_data(event);
74679 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
74680 index 194d796..76edb8f 100644
74681 --- a/kernel/trace/trace_output.c
74682 +++ b/kernel/trace/trace_output.c
74683 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
74684
74685 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
74686 if (!IS_ERR(p)) {
74687 - p = mangle_path(s->buffer + s->len, p, "\n");
74688 + p = mangle_path(s->buffer + s->len, p, "\n\\");
74689 if (p) {
74690 s->len = p - s->buffer;
74691 return 1;
74692 @@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
74693 goto out;
74694 }
74695
74696 + pax_open_kernel();
74697 if (event->funcs->trace == NULL)
74698 - event->funcs->trace = trace_nop_print;
74699 + *(void **)&event->funcs->trace = trace_nop_print;
74700 if (event->funcs->raw == NULL)
74701 - event->funcs->raw = trace_nop_print;
74702 + *(void **)&event->funcs->raw = trace_nop_print;
74703 if (event->funcs->hex == NULL)
74704 - event->funcs->hex = trace_nop_print;
74705 + *(void **)&event->funcs->hex = trace_nop_print;
74706 if (event->funcs->binary == NULL)
74707 - event->funcs->binary = trace_nop_print;
74708 + *(void **)&event->funcs->binary = trace_nop_print;
74709 + pax_close_kernel();
74710
74711 key = event->type & (EVENT_HASHSIZE - 1);
74712
74713 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
74714 index 42ca822..cdcacc6 100644
74715 --- a/kernel/trace/trace_stack.c
74716 +++ b/kernel/trace/trace_stack.c
74717 @@ -52,7 +52,7 @@ static inline void check_stack(void)
74718 return;
74719
74720 /* we do not handle interrupt stacks yet */
74721 - if (!object_is_on_stack(&this_size))
74722 + if (!object_starts_on_stack(&this_size))
74723 return;
74724
74725 local_irq_save(flags);
74726 diff --git a/kernel/user.c b/kernel/user.c
74727 index 33acb5e..57ebfd4 100644
74728 --- a/kernel/user.c
74729 +++ b/kernel/user.c
74730 @@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
74731 .count = 4294967295U,
74732 },
74733 },
74734 - .kref = {
74735 - .refcount = ATOMIC_INIT(3),
74736 - },
74737 + .count = ATOMIC_INIT(3),
74738 .owner = GLOBAL_ROOT_UID,
74739 .group = GLOBAL_ROOT_GID,
74740 .proc_inum = PROC_USER_INIT_INO,
74741 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
74742 index 2b042c4..24f8ec3 100644
74743 --- a/kernel/user_namespace.c
74744 +++ b/kernel/user_namespace.c
74745 @@ -78,7 +78,7 @@ int create_user_ns(struct cred *new)
74746 return ret;
74747 }
74748
74749 - kref_init(&ns->kref);
74750 + atomic_set(&ns->count, 1);
74751 /* Leave the new->user_ns reference with the new user namespace. */
74752 ns->parent = parent_ns;
74753 ns->owner = owner;
74754 @@ -104,15 +104,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
74755 return create_user_ns(cred);
74756 }
74757
74758 -void free_user_ns(struct kref *kref)
74759 +void free_user_ns(struct user_namespace *ns)
74760 {
74761 - struct user_namespace *parent, *ns =
74762 - container_of(kref, struct user_namespace, kref);
74763 + struct user_namespace *parent;
74764
74765 - parent = ns->parent;
74766 - proc_free_inum(ns->proc_inum);
74767 - kmem_cache_free(user_ns_cachep, ns);
74768 - put_user_ns(parent);
74769 + do {
74770 + parent = ns->parent;
74771 + proc_free_inum(ns->proc_inum);
74772 + kmem_cache_free(user_ns_cachep, ns);
74773 + ns = parent;
74774 + } while (atomic_dec_and_test(&parent->count));
74775 }
74776 EXPORT_SYMBOL(free_user_ns);
74777
74778 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
74779 index 67604e5..3ebb003 100644
74780 --- a/lib/Kconfig.debug
74781 +++ b/lib/Kconfig.debug
74782 @@ -1278,6 +1278,7 @@ config LATENCYTOP
74783 depends on DEBUG_KERNEL
74784 depends on STACKTRACE_SUPPORT
74785 depends on PROC_FS
74786 + depends on !GRKERNSEC_HIDESYM
74787 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
74788 select KALLSYMS
74789 select KALLSYMS_ALL
74790 @@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
74791
74792 config PROVIDE_OHCI1394_DMA_INIT
74793 bool "Remote debugging over FireWire early on boot"
74794 - depends on PCI && X86
74795 + depends on PCI && X86 && !GRKERNSEC
74796 help
74797 If you want to debug problems which hang or crash the kernel early
74798 on boot and the crashing machine has a FireWire port, you can use
74799 @@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
74800
74801 config FIREWIRE_OHCI_REMOTE_DMA
74802 bool "Remote debugging over FireWire with firewire-ohci"
74803 - depends on FIREWIRE_OHCI
74804 + depends on FIREWIRE_OHCI && !GRKERNSEC
74805 help
74806 This option lets you use the FireWire bus for remote debugging
74807 with help of the firewire-ohci driver. It enables unfiltered
74808 diff --git a/lib/Makefile b/lib/Makefile
74809 index 02ed6c0..bd243da 100644
74810 --- a/lib/Makefile
74811 +++ b/lib/Makefile
74812 @@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
74813
74814 obj-$(CONFIG_BTREE) += btree.o
74815 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
74816 -obj-$(CONFIG_DEBUG_LIST) += list_debug.o
74817 +obj-y += list_debug.o
74818 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
74819
74820 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
74821 diff --git a/lib/bitmap.c b/lib/bitmap.c
74822 index 06f7e4f..f3cf2b0 100644
74823 --- a/lib/bitmap.c
74824 +++ b/lib/bitmap.c
74825 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
74826 {
74827 int c, old_c, totaldigits, ndigits, nchunks, nbits;
74828 u32 chunk;
74829 - const char __user __force *ubuf = (const char __user __force *)buf;
74830 + const char __user *ubuf = (const char __force_user *)buf;
74831
74832 bitmap_zero(maskp, nmaskbits);
74833
74834 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
74835 {
74836 if (!access_ok(VERIFY_READ, ubuf, ulen))
74837 return -EFAULT;
74838 - return __bitmap_parse((const char __force *)ubuf,
74839 + return __bitmap_parse((const char __force_kernel *)ubuf,
74840 ulen, 1, maskp, nmaskbits);
74841
74842 }
74843 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
74844 {
74845 unsigned a, b;
74846 int c, old_c, totaldigits;
74847 - const char __user __force *ubuf = (const char __user __force *)buf;
74848 + const char __user *ubuf = (const char __force_user *)buf;
74849 int exp_digit, in_range;
74850
74851 totaldigits = c = 0;
74852 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
74853 {
74854 if (!access_ok(VERIFY_READ, ubuf, ulen))
74855 return -EFAULT;
74856 - return __bitmap_parselist((const char __force *)ubuf,
74857 + return __bitmap_parselist((const char __force_kernel *)ubuf,
74858 ulen, 1, maskp, nmaskbits);
74859 }
74860 EXPORT_SYMBOL(bitmap_parselist_user);
74861 diff --git a/lib/bug.c b/lib/bug.c
74862 index d0cdf14..4d07bd2 100644
74863 --- a/lib/bug.c
74864 +++ b/lib/bug.c
74865 @@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
74866 return BUG_TRAP_TYPE_NONE;
74867
74868 bug = find_bug(bugaddr);
74869 + if (!bug)
74870 + return BUG_TRAP_TYPE_NONE;
74871
74872 file = NULL;
74873 line = 0;
74874 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
74875 index d11808c..dc2d6f8 100644
74876 --- a/lib/debugobjects.c
74877 +++ b/lib/debugobjects.c
74878 @@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
74879 if (limit > 4)
74880 return;
74881
74882 - is_on_stack = object_is_on_stack(addr);
74883 + is_on_stack = object_starts_on_stack(addr);
74884 if (is_on_stack == onstack)
74885 return;
74886
74887 diff --git a/lib/devres.c b/lib/devres.c
74888 index 80b9c76..9e32279 100644
74889 --- a/lib/devres.c
74890 +++ b/lib/devres.c
74891 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
74892 void devm_iounmap(struct device *dev, void __iomem *addr)
74893 {
74894 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
74895 - (void *)addr));
74896 + (void __force *)addr));
74897 iounmap(addr);
74898 }
74899 EXPORT_SYMBOL(devm_iounmap);
74900 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
74901 {
74902 ioport_unmap(addr);
74903 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
74904 - devm_ioport_map_match, (void *)addr));
74905 + devm_ioport_map_match, (void __force *)addr));
74906 }
74907 EXPORT_SYMBOL(devm_ioport_unmap);
74908
74909 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
74910 index 5e396ac..58d5de1 100644
74911 --- a/lib/dma-debug.c
74912 +++ b/lib/dma-debug.c
74913 @@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
74914
74915 void dma_debug_add_bus(struct bus_type *bus)
74916 {
74917 - struct notifier_block *nb;
74918 + notifier_block_no_const *nb;
74919
74920 if (global_disable)
74921 return;
74922 @@ -942,7 +942,7 @@ out:
74923
74924 static void check_for_stack(struct device *dev, void *addr)
74925 {
74926 - if (object_is_on_stack(addr))
74927 + if (object_starts_on_stack(addr))
74928 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
74929 "stack [addr=%p]\n", addr);
74930 }
74931 diff --git a/lib/inflate.c b/lib/inflate.c
74932 index 013a761..c28f3fc 100644
74933 --- a/lib/inflate.c
74934 +++ b/lib/inflate.c
74935 @@ -269,7 +269,7 @@ static void free(void *where)
74936 malloc_ptr = free_mem_ptr;
74937 }
74938 #else
74939 -#define malloc(a) kmalloc(a, GFP_KERNEL)
74940 +#define malloc(a) kmalloc((a), GFP_KERNEL)
74941 #define free(a) kfree(a)
74942 #endif
74943
74944 diff --git a/lib/ioremap.c b/lib/ioremap.c
74945 index 0c9216c..863bd89 100644
74946 --- a/lib/ioremap.c
74947 +++ b/lib/ioremap.c
74948 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
74949 unsigned long next;
74950
74951 phys_addr -= addr;
74952 - pmd = pmd_alloc(&init_mm, pud, addr);
74953 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74954 if (!pmd)
74955 return -ENOMEM;
74956 do {
74957 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
74958 unsigned long next;
74959
74960 phys_addr -= addr;
74961 - pud = pud_alloc(&init_mm, pgd, addr);
74962 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
74963 if (!pud)
74964 return -ENOMEM;
74965 do {
74966 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
74967 index bd2bea9..6b3c95e 100644
74968 --- a/lib/is_single_threaded.c
74969 +++ b/lib/is_single_threaded.c
74970 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
74971 struct task_struct *p, *t;
74972 bool ret;
74973
74974 + if (!mm)
74975 + return true;
74976 +
74977 if (atomic_read(&task->signal->live) != 1)
74978 return false;
74979
74980 diff --git a/lib/list_debug.c b/lib/list_debug.c
74981 index c24c2f7..3fc5da0 100644
74982 --- a/lib/list_debug.c
74983 +++ b/lib/list_debug.c
74984 @@ -11,7 +11,9 @@
74985 #include <linux/bug.h>
74986 #include <linux/kernel.h>
74987 #include <linux/rculist.h>
74988 +#include <linux/mm.h>
74989
74990 +#ifdef CONFIG_DEBUG_LIST
74991 /*
74992 * Insert a new entry between two known consecutive entries.
74993 *
74994 @@ -19,21 +21,32 @@
74995 * the prev/next entries already!
74996 */
74997
74998 -void __list_add(struct list_head *new,
74999 - struct list_head *prev,
75000 - struct list_head *next)
75001 +static bool __list_add_debug(struct list_head *new,
75002 + struct list_head *prev,
75003 + struct list_head *next)
75004 {
75005 - WARN(next->prev != prev,
75006 + if (WARN(next->prev != prev,
75007 "list_add corruption. next->prev should be "
75008 "prev (%p), but was %p. (next=%p).\n",
75009 - prev, next->prev, next);
75010 - WARN(prev->next != next,
75011 + prev, next->prev, next) ||
75012 + WARN(prev->next != next,
75013 "list_add corruption. prev->next should be "
75014 "next (%p), but was %p. (prev=%p).\n",
75015 - next, prev->next, prev);
75016 - WARN(new == prev || new == next,
75017 + next, prev->next, prev) ||
75018 + WARN(new == prev || new == next,
75019 "list_add double add: new=%p, prev=%p, next=%p.\n",
75020 - new, prev, next);
75021 + new, prev, next))
75022 + return false;
75023 + return true;
75024 +}
75025 +
75026 +void __list_add(struct list_head *new,
75027 + struct list_head *prev,
75028 + struct list_head *next)
75029 +{
75030 + if (!__list_add_debug(new, prev, next))
75031 + return;
75032 +
75033 next->prev = new;
75034 new->next = next;
75035 new->prev = prev;
75036 @@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
75037 }
75038 EXPORT_SYMBOL(__list_add);
75039
75040 -void __list_del_entry(struct list_head *entry)
75041 +static bool __list_del_entry_debug(struct list_head *entry)
75042 {
75043 struct list_head *prev, *next;
75044
75045 @@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
75046 WARN(next->prev != entry,
75047 "list_del corruption. next->prev should be %p, "
75048 "but was %p\n", entry, next->prev))
75049 + return false;
75050 + return true;
75051 +}
75052 +
75053 +void __list_del_entry(struct list_head *entry)
75054 +{
75055 + if (!__list_del_entry_debug(entry))
75056 return;
75057
75058 - __list_del(prev, next);
75059 + __list_del(entry->prev, entry->next);
75060 }
75061 EXPORT_SYMBOL(__list_del_entry);
75062
75063 @@ -86,15 +106,54 @@ EXPORT_SYMBOL(list_del);
75064 void __list_add_rcu(struct list_head *new,
75065 struct list_head *prev, struct list_head *next)
75066 {
75067 - WARN(next->prev != prev,
75068 + if (WARN(next->prev != prev,
75069 "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
75070 - prev, next->prev, next);
75071 - WARN(prev->next != next,
75072 + prev, next->prev, next) ||
75073 + WARN(prev->next != next,
75074 "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
75075 - next, prev->next, prev);
75076 + next, prev->next, prev))
75077 + return;
75078 +
75079 new->next = next;
75080 new->prev = prev;
75081 rcu_assign_pointer(list_next_rcu(prev), new);
75082 next->prev = new;
75083 }
75084 EXPORT_SYMBOL(__list_add_rcu);
75085 +#endif
75086 +
75087 +void pax_list_add_tail(struct list_head *new, struct list_head *head)
75088 +{
75089 + struct list_head *prev, *next;
75090 +
75091 + prev = head->prev;
75092 + next = head;
75093 +
75094 +#ifdef CONFIG_DEBUG_LIST
75095 + if (!__list_add_debug(new, prev, next))
75096 + return;
75097 +#endif
75098 +
75099 + pax_open_kernel();
75100 + next->prev = new;
75101 + new->next = next;
75102 + new->prev = prev;
75103 + prev->next = new;
75104 + pax_close_kernel();
75105 +}
75106 +EXPORT_SYMBOL(pax_list_add_tail);
75107 +
75108 +void pax_list_del(struct list_head *entry)
75109 +{
75110 +#ifdef CONFIG_DEBUG_LIST
75111 + if (!__list_del_entry_debug(entry))
75112 + return;
75113 +#endif
75114 +
75115 + pax_open_kernel();
75116 + __list_del(entry->prev, entry->next);
75117 + entry->next = LIST_POISON1;
75118 + entry->prev = LIST_POISON2;
75119 + pax_close_kernel();
75120 +}
75121 +EXPORT_SYMBOL(pax_list_del);
75122 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
75123 index e796429..6e38f9f 100644
75124 --- a/lib/radix-tree.c
75125 +++ b/lib/radix-tree.c
75126 @@ -92,7 +92,7 @@ struct radix_tree_preload {
75127 int nr;
75128 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
75129 };
75130 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
75131 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
75132
75133 static inline void *ptr_to_indirect(void *ptr)
75134 {
75135 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
75136 index bb2b201..46abaf9 100644
75137 --- a/lib/strncpy_from_user.c
75138 +++ b/lib/strncpy_from_user.c
75139 @@ -21,7 +21,7 @@
75140 */
75141 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
75142 {
75143 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
75144 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
75145 long res = 0;
75146
75147 /*
75148 diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
75149 index a28df52..3d55877 100644
75150 --- a/lib/strnlen_user.c
75151 +++ b/lib/strnlen_user.c
75152 @@ -26,7 +26,7 @@
75153 */
75154 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
75155 {
75156 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
75157 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
75158 long align, res = 0;
75159 unsigned long c;
75160
75161 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
75162 index fab33a9..3b5fe68 100644
75163 --- a/lib/vsprintf.c
75164 +++ b/lib/vsprintf.c
75165 @@ -16,6 +16,9 @@
75166 * - scnprintf and vscnprintf
75167 */
75168
75169 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75170 +#define __INCLUDED_BY_HIDESYM 1
75171 +#endif
75172 #include <stdarg.h>
75173 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
75174 #include <linux/types.h>
75175 @@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
75176 char sym[KSYM_SYMBOL_LEN];
75177 if (ext == 'B')
75178 sprint_backtrace(sym, value);
75179 - else if (ext != 'f' && ext != 's')
75180 + else if (ext != 'f' && ext != 's' && ext != 'a')
75181 sprint_symbol(sym, value);
75182 else
75183 sprint_symbol_no_offset(sym, value);
75184 @@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
75185 return number(buf, end, *(const netdev_features_t *)addr, spec);
75186 }
75187
75188 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75189 +int kptr_restrict __read_mostly = 2;
75190 +#else
75191 int kptr_restrict __read_mostly;
75192 +#endif
75193
75194 /*
75195 * Show a '%p' thing. A kernel extension is that the '%p' is followed
75196 @@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
75197 * - 'S' For symbolic direct pointers with offset
75198 * - 's' For symbolic direct pointers without offset
75199 * - 'B' For backtraced symbolic direct pointers with offset
75200 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
75201 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
75202 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
75203 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
75204 * - 'M' For a 6-byte MAC address, it prints the address in the
75205 @@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75206
75207 if (!ptr && *fmt != 'K') {
75208 /*
75209 - * Print (null) with the same width as a pointer so it makes
75210 + * Print (nil) with the same width as a pointer so it makes
75211 * tabular output look nice.
75212 */
75213 if (spec.field_width == -1)
75214 spec.field_width = default_width;
75215 - return string(buf, end, "(null)", spec);
75216 + return string(buf, end, "(nil)", spec);
75217 }
75218
75219 switch (*fmt) {
75220 @@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75221 /* Fallthrough */
75222 case 'S':
75223 case 's':
75224 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75225 + break;
75226 +#else
75227 + return symbol_string(buf, end, ptr, spec, *fmt);
75228 +#endif
75229 + case 'A':
75230 + case 'a':
75231 case 'B':
75232 return symbol_string(buf, end, ptr, spec, *fmt);
75233 case 'R':
75234 @@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75235 va_end(va);
75236 return buf;
75237 }
75238 + case 'P':
75239 + break;
75240 case 'K':
75241 /*
75242 * %pK cannot be used in IRQ context because its test
75243 @@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75244 }
75245 break;
75246 }
75247 +
75248 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75249 + /* 'P' = approved pointers to copy to userland,
75250 + as in the /proc/kallsyms case, as we make it display nothing
75251 + for non-root users, and the real contents for root users
75252 + Also ignore 'K' pointers, since we force their NULLing for non-root users
75253 + above
75254 + */
75255 + if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
75256 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
75257 + dump_stack();
75258 + ptr = NULL;
75259 + }
75260 +#endif
75261 +
75262 spec.flags |= SMALL;
75263 if (spec.field_width == -1) {
75264 spec.field_width = default_width;
75265 @@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75266 typeof(type) value; \
75267 if (sizeof(type) == 8) { \
75268 args = PTR_ALIGN(args, sizeof(u32)); \
75269 - *(u32 *)&value = *(u32 *)args; \
75270 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
75271 + *(u32 *)&value = *(const u32 *)args; \
75272 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
75273 } else { \
75274 args = PTR_ALIGN(args, sizeof(type)); \
75275 - value = *(typeof(type) *)args; \
75276 + value = *(const typeof(type) *)args; \
75277 } \
75278 args += sizeof(type); \
75279 value; \
75280 @@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75281 case FORMAT_TYPE_STR: {
75282 const char *str_arg = args;
75283 args += strlen(str_arg) + 1;
75284 - str = string(str, end, (char *)str_arg, spec);
75285 + str = string(str, end, str_arg, spec);
75286 break;
75287 }
75288
75289 diff --git a/localversion-grsec b/localversion-grsec
75290 new file mode 100644
75291 index 0000000..7cd6065
75292 --- /dev/null
75293 +++ b/localversion-grsec
75294 @@ -0,0 +1 @@
75295 +-grsec
75296 diff --git a/mm/Kconfig b/mm/Kconfig
75297 index 278e3ab..87c384d 100644
75298 --- a/mm/Kconfig
75299 +++ b/mm/Kconfig
75300 @@ -286,10 +286,10 @@ config KSM
75301 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
75302
75303 config DEFAULT_MMAP_MIN_ADDR
75304 - int "Low address space to protect from user allocation"
75305 + int "Low address space to protect from user allocation"
75306 depends on MMU
75307 - default 4096
75308 - help
75309 + default 65536
75310 + help
75311 This is the portion of low virtual memory which should be protected
75312 from userspace allocation. Keeping a user from writing to low pages
75313 can help reduce the impact of kernel NULL pointer bugs.
75314 @@ -320,7 +320,7 @@ config MEMORY_FAILURE
75315
75316 config HWPOISON_INJECT
75317 tristate "HWPoison pages injector"
75318 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
75319 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
75320 select PROC_PAGE_MONITOR
75321
75322 config NOMMU_INITIAL_TRIM_EXCESS
75323 diff --git a/mm/filemap.c b/mm/filemap.c
75324 index 83efee7..3f99381 100644
75325 --- a/mm/filemap.c
75326 +++ b/mm/filemap.c
75327 @@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
75328 struct address_space *mapping = file->f_mapping;
75329
75330 if (!mapping->a_ops->readpage)
75331 - return -ENOEXEC;
75332 + return -ENODEV;
75333 file_accessed(file);
75334 vma->vm_ops = &generic_file_vm_ops;
75335 return 0;
75336 @@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
75337 *pos = i_size_read(inode);
75338
75339 if (limit != RLIM_INFINITY) {
75340 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
75341 if (*pos >= limit) {
75342 send_sig(SIGXFSZ, current, 0);
75343 return -EFBIG;
75344 diff --git a/mm/fremap.c b/mm/fremap.c
75345 index a0aaf0e..20325c3 100644
75346 --- a/mm/fremap.c
75347 +++ b/mm/fremap.c
75348 @@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75349 retry:
75350 vma = find_vma(mm, start);
75351
75352 +#ifdef CONFIG_PAX_SEGMEXEC
75353 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
75354 + goto out;
75355 +#endif
75356 +
75357 /*
75358 * Make sure the vma is shared, that it supports prefaulting,
75359 * and that the remapped range is valid and fully within
75360 diff --git a/mm/highmem.c b/mm/highmem.c
75361 index b32b70c..e512eb0 100644
75362 --- a/mm/highmem.c
75363 +++ b/mm/highmem.c
75364 @@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
75365 * So no dangers, even with speculative execution.
75366 */
75367 page = pte_page(pkmap_page_table[i]);
75368 + pax_open_kernel();
75369 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
75370 -
75371 + pax_close_kernel();
75372 set_page_address(page, NULL);
75373 need_flush = 1;
75374 }
75375 @@ -198,9 +199,11 @@ start:
75376 }
75377 }
75378 vaddr = PKMAP_ADDR(last_pkmap_nr);
75379 +
75380 + pax_open_kernel();
75381 set_pte_at(&init_mm, vaddr,
75382 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
75383 -
75384 + pax_close_kernel();
75385 pkmap_count[last_pkmap_nr] = 1;
75386 set_page_address(page, (void *)vaddr);
75387
75388 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
75389 index 546db81..01d5c53 100644
75390 --- a/mm/hugetlb.c
75391 +++ b/mm/hugetlb.c
75392 @@ -2511,6 +2511,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
75393 return 1;
75394 }
75395
75396 +#ifdef CONFIG_PAX_SEGMEXEC
75397 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
75398 +{
75399 + struct mm_struct *mm = vma->vm_mm;
75400 + struct vm_area_struct *vma_m;
75401 + unsigned long address_m;
75402 + pte_t *ptep_m;
75403 +
75404 + vma_m = pax_find_mirror_vma(vma);
75405 + if (!vma_m)
75406 + return;
75407 +
75408 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75409 + address_m = address + SEGMEXEC_TASK_SIZE;
75410 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
75411 + get_page(page_m);
75412 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
75413 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
75414 +}
75415 +#endif
75416 +
75417 /*
75418 * Hugetlb_cow() should be called with page lock of the original hugepage held.
75419 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
75420 @@ -2629,6 +2650,11 @@ retry_avoidcopy:
75421 make_huge_pte(vma, new_page, 1));
75422 page_remove_rmap(old_page);
75423 hugepage_add_new_anon_rmap(new_page, vma, address);
75424 +
75425 +#ifdef CONFIG_PAX_SEGMEXEC
75426 + pax_mirror_huge_pte(vma, address, new_page);
75427 +#endif
75428 +
75429 /* Make the old page be freed below */
75430 new_page = old_page;
75431 }
75432 @@ -2788,6 +2814,10 @@ retry:
75433 && (vma->vm_flags & VM_SHARED)));
75434 set_huge_pte_at(mm, address, ptep, new_pte);
75435
75436 +#ifdef CONFIG_PAX_SEGMEXEC
75437 + pax_mirror_huge_pte(vma, address, page);
75438 +#endif
75439 +
75440 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
75441 /* Optimization, do the COW without a second fault */
75442 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
75443 @@ -2817,6 +2847,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75444 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
75445 struct hstate *h = hstate_vma(vma);
75446
75447 +#ifdef CONFIG_PAX_SEGMEXEC
75448 + struct vm_area_struct *vma_m;
75449 +#endif
75450 +
75451 address &= huge_page_mask(h);
75452
75453 ptep = huge_pte_offset(mm, address);
75454 @@ -2830,6 +2864,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75455 VM_FAULT_SET_HINDEX(hstate_index(h));
75456 }
75457
75458 +#ifdef CONFIG_PAX_SEGMEXEC
75459 + vma_m = pax_find_mirror_vma(vma);
75460 + if (vma_m) {
75461 + unsigned long address_m;
75462 +
75463 + if (vma->vm_start > vma_m->vm_start) {
75464 + address_m = address;
75465 + address -= SEGMEXEC_TASK_SIZE;
75466 + vma = vma_m;
75467 + h = hstate_vma(vma);
75468 + } else
75469 + address_m = address + SEGMEXEC_TASK_SIZE;
75470 +
75471 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
75472 + return VM_FAULT_OOM;
75473 + address_m &= HPAGE_MASK;
75474 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
75475 + }
75476 +#endif
75477 +
75478 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
75479 if (!ptep)
75480 return VM_FAULT_OOM;
75481 diff --git a/mm/internal.h b/mm/internal.h
75482 index 9ba2110..eaf0674 100644
75483 --- a/mm/internal.h
75484 +++ b/mm/internal.h
75485 @@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
75486 * in mm/page_alloc.c
75487 */
75488 extern void __free_pages_bootmem(struct page *page, unsigned int order);
75489 +extern void free_compound_page(struct page *page);
75490 extern void prep_compound_page(struct page *page, unsigned long order);
75491 #ifdef CONFIG_MEMORY_FAILURE
75492 extern bool is_free_buddy_page(struct page *page);
75493 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
75494 index 752a705..6c3102e 100644
75495 --- a/mm/kmemleak.c
75496 +++ b/mm/kmemleak.c
75497 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
75498
75499 for (i = 0; i < object->trace_len; i++) {
75500 void *ptr = (void *)object->trace[i];
75501 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
75502 + seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
75503 }
75504 }
75505
75506 @@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
75507 return -ENOMEM;
75508 }
75509
75510 - dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
75511 + dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
75512 &kmemleak_fops);
75513 if (!dentry)
75514 pr_warning("Failed to create the debugfs kmemleak file\n");
75515 diff --git a/mm/maccess.c b/mm/maccess.c
75516 index d53adf9..03a24bf 100644
75517 --- a/mm/maccess.c
75518 +++ b/mm/maccess.c
75519 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
75520 set_fs(KERNEL_DS);
75521 pagefault_disable();
75522 ret = __copy_from_user_inatomic(dst,
75523 - (__force const void __user *)src, size);
75524 + (const void __force_user *)src, size);
75525 pagefault_enable();
75526 set_fs(old_fs);
75527
75528 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
75529
75530 set_fs(KERNEL_DS);
75531 pagefault_disable();
75532 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
75533 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
75534 pagefault_enable();
75535 set_fs(old_fs);
75536
75537 diff --git a/mm/madvise.c b/mm/madvise.c
75538 index 03dfa5c..b032917 100644
75539 --- a/mm/madvise.c
75540 +++ b/mm/madvise.c
75541 @@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
75542 pgoff_t pgoff;
75543 unsigned long new_flags = vma->vm_flags;
75544
75545 +#ifdef CONFIG_PAX_SEGMEXEC
75546 + struct vm_area_struct *vma_m;
75547 +#endif
75548 +
75549 switch (behavior) {
75550 case MADV_NORMAL:
75551 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
75552 @@ -123,6 +127,13 @@ success:
75553 /*
75554 * vm_flags is protected by the mmap_sem held in write mode.
75555 */
75556 +
75557 +#ifdef CONFIG_PAX_SEGMEXEC
75558 + vma_m = pax_find_mirror_vma(vma);
75559 + if (vma_m)
75560 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
75561 +#endif
75562 +
75563 vma->vm_flags = new_flags;
75564
75565 out:
75566 @@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75567 struct vm_area_struct ** prev,
75568 unsigned long start, unsigned long end)
75569 {
75570 +
75571 +#ifdef CONFIG_PAX_SEGMEXEC
75572 + struct vm_area_struct *vma_m;
75573 +#endif
75574 +
75575 *prev = vma;
75576 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
75577 return -EINVAL;
75578 @@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75579 zap_page_range(vma, start, end - start, &details);
75580 } else
75581 zap_page_range(vma, start, end - start, NULL);
75582 +
75583 +#ifdef CONFIG_PAX_SEGMEXEC
75584 + vma_m = pax_find_mirror_vma(vma);
75585 + if (vma_m) {
75586 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
75587 + struct zap_details details = {
75588 + .nonlinear_vma = vma_m,
75589 + .last_index = ULONG_MAX,
75590 + };
75591 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
75592 + } else
75593 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
75594 + }
75595 +#endif
75596 +
75597 return 0;
75598 }
75599
75600 @@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
75601 if (end < start)
75602 goto out;
75603
75604 +#ifdef CONFIG_PAX_SEGMEXEC
75605 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
75606 + if (end > SEGMEXEC_TASK_SIZE)
75607 + goto out;
75608 + } else
75609 +#endif
75610 +
75611 + if (end > TASK_SIZE)
75612 + goto out;
75613 +
75614 error = 0;
75615 if (end == start)
75616 goto out;
75617 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
75618 index c6e4dd3..fdb2ca6 100644
75619 --- a/mm/memory-failure.c
75620 +++ b/mm/memory-failure.c
75621 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
75622
75623 int sysctl_memory_failure_recovery __read_mostly = 1;
75624
75625 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75626 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75627
75628 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
75629
75630 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
75631 pfn, t->comm, t->pid);
75632 si.si_signo = SIGBUS;
75633 si.si_errno = 0;
75634 - si.si_addr = (void *)addr;
75635 + si.si_addr = (void __user *)addr;
75636 #ifdef __ARCH_SI_TRAPNO
75637 si.si_trapno = trapno;
75638 #endif
75639 @@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
75640 }
75641
75642 nr_pages = 1 << compound_trans_order(hpage);
75643 - atomic_long_add(nr_pages, &mce_bad_pages);
75644 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
75645
75646 /*
75647 * We need/can do nothing about count=0 pages.
75648 @@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
75649 if (!PageHWPoison(hpage)
75650 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
75651 || (p != hpage && TestSetPageHWPoison(hpage))) {
75652 - atomic_long_sub(nr_pages, &mce_bad_pages);
75653 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
75654 return 0;
75655 }
75656 set_page_hwpoison_huge_page(hpage);
75657 @@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
75658 }
75659 if (hwpoison_filter(p)) {
75660 if (TestClearPageHWPoison(p))
75661 - atomic_long_sub(nr_pages, &mce_bad_pages);
75662 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
75663 unlock_page(hpage);
75664 put_page(hpage);
75665 return 0;
75666 @@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
75667 return 0;
75668 }
75669 if (TestClearPageHWPoison(p))
75670 - atomic_long_sub(nr_pages, &mce_bad_pages);
75671 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
75672 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
75673 return 0;
75674 }
75675 @@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
75676 */
75677 if (TestClearPageHWPoison(page)) {
75678 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
75679 - atomic_long_sub(nr_pages, &mce_bad_pages);
75680 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
75681 freeit = 1;
75682 if (PageHuge(page))
75683 clear_page_hwpoison_huge_page(page);
75684 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
75685 }
75686 done:
75687 if (!PageHWPoison(hpage))
75688 - atomic_long_add(1 << compound_trans_order(hpage),
75689 + atomic_long_add_unchecked(1 << compound_trans_order(hpage),
75690 &mce_bad_pages);
75691 set_page_hwpoison_huge_page(hpage);
75692 dequeue_hwpoisoned_huge_page(hpage);
75693 @@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
75694 return ret;
75695
75696 done:
75697 - atomic_long_add(1, &mce_bad_pages);
75698 + atomic_long_add_unchecked(1, &mce_bad_pages);
75699 SetPageHWPoison(page);
75700 /* keep elevated page count for bad page */
75701 return ret;
75702 diff --git a/mm/memory.c b/mm/memory.c
75703 index bb1369f..efb96b5 100644
75704 --- a/mm/memory.c
75705 +++ b/mm/memory.c
75706 @@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
75707 free_pte_range(tlb, pmd, addr);
75708 } while (pmd++, addr = next, addr != end);
75709
75710 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
75711 start &= PUD_MASK;
75712 if (start < floor)
75713 return;
75714 @@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
75715 pmd = pmd_offset(pud, start);
75716 pud_clear(pud);
75717 pmd_free_tlb(tlb, pmd, start);
75718 +#endif
75719 +
75720 }
75721
75722 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75723 @@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75724 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
75725 } while (pud++, addr = next, addr != end);
75726
75727 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
75728 start &= PGDIR_MASK;
75729 if (start < floor)
75730 return;
75731 @@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75732 pud = pud_offset(pgd, start);
75733 pgd_clear(pgd);
75734 pud_free_tlb(tlb, pud, start);
75735 +#endif
75736 +
75737 }
75738
75739 /*
75740 @@ -1618,12 +1624,6 @@ no_page_table:
75741 return page;
75742 }
75743
75744 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
75745 -{
75746 - return stack_guard_page_start(vma, addr) ||
75747 - stack_guard_page_end(vma, addr+PAGE_SIZE);
75748 -}
75749 -
75750 /**
75751 * __get_user_pages() - pin user pages in memory
75752 * @tsk: task_struct of target task
75753 @@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75754
75755 i = 0;
75756
75757 - do {
75758 + while (nr_pages) {
75759 struct vm_area_struct *vma;
75760
75761 - vma = find_extend_vma(mm, start);
75762 + vma = find_vma(mm, start);
75763 if (!vma && in_gate_area(mm, start)) {
75764 unsigned long pg = start & PAGE_MASK;
75765 pgd_t *pgd;
75766 @@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75767 goto next_page;
75768 }
75769
75770 - if (!vma ||
75771 + if (!vma || start < vma->vm_start ||
75772 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
75773 !(vm_flags & vma->vm_flags))
75774 return i ? : -EFAULT;
75775 @@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75776 int ret;
75777 unsigned int fault_flags = 0;
75778
75779 - /* For mlock, just skip the stack guard page. */
75780 - if (foll_flags & FOLL_MLOCK) {
75781 - if (stack_guard_page(vma, start))
75782 - goto next_page;
75783 - }
75784 if (foll_flags & FOLL_WRITE)
75785 fault_flags |= FAULT_FLAG_WRITE;
75786 if (nonblocking)
75787 @@ -1865,7 +1860,7 @@ next_page:
75788 start += PAGE_SIZE;
75789 nr_pages--;
75790 } while (nr_pages && start < vma->vm_end);
75791 - } while (nr_pages);
75792 + }
75793 return i;
75794 }
75795 EXPORT_SYMBOL(__get_user_pages);
75796 @@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
75797 page_add_file_rmap(page);
75798 set_pte_at(mm, addr, pte, mk_pte(page, prot));
75799
75800 +#ifdef CONFIG_PAX_SEGMEXEC
75801 + pax_mirror_file_pte(vma, addr, page, ptl);
75802 +#endif
75803 +
75804 retval = 0;
75805 pte_unmap_unlock(pte, ptl);
75806 return retval;
75807 @@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
75808 if (!page_count(page))
75809 return -EINVAL;
75810 if (!(vma->vm_flags & VM_MIXEDMAP)) {
75811 +
75812 +#ifdef CONFIG_PAX_SEGMEXEC
75813 + struct vm_area_struct *vma_m;
75814 +#endif
75815 +
75816 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
75817 BUG_ON(vma->vm_flags & VM_PFNMAP);
75818 vma->vm_flags |= VM_MIXEDMAP;
75819 +
75820 +#ifdef CONFIG_PAX_SEGMEXEC
75821 + vma_m = pax_find_mirror_vma(vma);
75822 + if (vma_m)
75823 + vma_m->vm_flags |= VM_MIXEDMAP;
75824 +#endif
75825 +
75826 }
75827 return insert_page(vma, addr, page, vma->vm_page_prot);
75828 }
75829 @@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
75830 unsigned long pfn)
75831 {
75832 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
75833 + BUG_ON(vma->vm_mirror);
75834
75835 if (addr < vma->vm_start || addr >= vma->vm_end)
75836 return -EFAULT;
75837 @@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
75838
75839 BUG_ON(pud_huge(*pud));
75840
75841 - pmd = pmd_alloc(mm, pud, addr);
75842 + pmd = (mm == &init_mm) ?
75843 + pmd_alloc_kernel(mm, pud, addr) :
75844 + pmd_alloc(mm, pud, addr);
75845 if (!pmd)
75846 return -ENOMEM;
75847 do {
75848 @@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
75849 unsigned long next;
75850 int err;
75851
75852 - pud = pud_alloc(mm, pgd, addr);
75853 + pud = (mm == &init_mm) ?
75854 + pud_alloc_kernel(mm, pgd, addr) :
75855 + pud_alloc(mm, pgd, addr);
75856 if (!pud)
75857 return -ENOMEM;
75858 do {
75859 @@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
75860 copy_user_highpage(dst, src, va, vma);
75861 }
75862
75863 +#ifdef CONFIG_PAX_SEGMEXEC
75864 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
75865 +{
75866 + struct mm_struct *mm = vma->vm_mm;
75867 + spinlock_t *ptl;
75868 + pte_t *pte, entry;
75869 +
75870 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
75871 + entry = *pte;
75872 + if (!pte_present(entry)) {
75873 + if (!pte_none(entry)) {
75874 + BUG_ON(pte_file(entry));
75875 + free_swap_and_cache(pte_to_swp_entry(entry));
75876 + pte_clear_not_present_full(mm, address, pte, 0);
75877 + }
75878 + } else {
75879 + struct page *page;
75880 +
75881 + flush_cache_page(vma, address, pte_pfn(entry));
75882 + entry = ptep_clear_flush(vma, address, pte);
75883 + BUG_ON(pte_dirty(entry));
75884 + page = vm_normal_page(vma, address, entry);
75885 + if (page) {
75886 + update_hiwater_rss(mm);
75887 + if (PageAnon(page))
75888 + dec_mm_counter_fast(mm, MM_ANONPAGES);
75889 + else
75890 + dec_mm_counter_fast(mm, MM_FILEPAGES);
75891 + page_remove_rmap(page);
75892 + page_cache_release(page);
75893 + }
75894 + }
75895 + pte_unmap_unlock(pte, ptl);
75896 +}
75897 +
75898 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
75899 + *
75900 + * the ptl of the lower mapped page is held on entry and is not released on exit
75901 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
75902 + */
75903 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75904 +{
75905 + struct mm_struct *mm = vma->vm_mm;
75906 + unsigned long address_m;
75907 + spinlock_t *ptl_m;
75908 + struct vm_area_struct *vma_m;
75909 + pmd_t *pmd_m;
75910 + pte_t *pte_m, entry_m;
75911 +
75912 + BUG_ON(!page_m || !PageAnon(page_m));
75913 +
75914 + vma_m = pax_find_mirror_vma(vma);
75915 + if (!vma_m)
75916 + return;
75917 +
75918 + BUG_ON(!PageLocked(page_m));
75919 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75920 + address_m = address + SEGMEXEC_TASK_SIZE;
75921 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75922 + pte_m = pte_offset_map(pmd_m, address_m);
75923 + ptl_m = pte_lockptr(mm, pmd_m);
75924 + if (ptl != ptl_m) {
75925 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75926 + if (!pte_none(*pte_m))
75927 + goto out;
75928 + }
75929 +
75930 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75931 + page_cache_get(page_m);
75932 + page_add_anon_rmap(page_m, vma_m, address_m);
75933 + inc_mm_counter_fast(mm, MM_ANONPAGES);
75934 + set_pte_at(mm, address_m, pte_m, entry_m);
75935 + update_mmu_cache(vma_m, address_m, entry_m);
75936 +out:
75937 + if (ptl != ptl_m)
75938 + spin_unlock(ptl_m);
75939 + pte_unmap(pte_m);
75940 + unlock_page(page_m);
75941 +}
75942 +
75943 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75944 +{
75945 + struct mm_struct *mm = vma->vm_mm;
75946 + unsigned long address_m;
75947 + spinlock_t *ptl_m;
75948 + struct vm_area_struct *vma_m;
75949 + pmd_t *pmd_m;
75950 + pte_t *pte_m, entry_m;
75951 +
75952 + BUG_ON(!page_m || PageAnon(page_m));
75953 +
75954 + vma_m = pax_find_mirror_vma(vma);
75955 + if (!vma_m)
75956 + return;
75957 +
75958 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75959 + address_m = address + SEGMEXEC_TASK_SIZE;
75960 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75961 + pte_m = pte_offset_map(pmd_m, address_m);
75962 + ptl_m = pte_lockptr(mm, pmd_m);
75963 + if (ptl != ptl_m) {
75964 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75965 + if (!pte_none(*pte_m))
75966 + goto out;
75967 + }
75968 +
75969 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75970 + page_cache_get(page_m);
75971 + page_add_file_rmap(page_m);
75972 + inc_mm_counter_fast(mm, MM_FILEPAGES);
75973 + set_pte_at(mm, address_m, pte_m, entry_m);
75974 + update_mmu_cache(vma_m, address_m, entry_m);
75975 +out:
75976 + if (ptl != ptl_m)
75977 + spin_unlock(ptl_m);
75978 + pte_unmap(pte_m);
75979 +}
75980 +
75981 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
75982 +{
75983 + struct mm_struct *mm = vma->vm_mm;
75984 + unsigned long address_m;
75985 + spinlock_t *ptl_m;
75986 + struct vm_area_struct *vma_m;
75987 + pmd_t *pmd_m;
75988 + pte_t *pte_m, entry_m;
75989 +
75990 + vma_m = pax_find_mirror_vma(vma);
75991 + if (!vma_m)
75992 + return;
75993 +
75994 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75995 + address_m = address + SEGMEXEC_TASK_SIZE;
75996 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75997 + pte_m = pte_offset_map(pmd_m, address_m);
75998 + ptl_m = pte_lockptr(mm, pmd_m);
75999 + if (ptl != ptl_m) {
76000 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76001 + if (!pte_none(*pte_m))
76002 + goto out;
76003 + }
76004 +
76005 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
76006 + set_pte_at(mm, address_m, pte_m, entry_m);
76007 +out:
76008 + if (ptl != ptl_m)
76009 + spin_unlock(ptl_m);
76010 + pte_unmap(pte_m);
76011 +}
76012 +
76013 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
76014 +{
76015 + struct page *page_m;
76016 + pte_t entry;
76017 +
76018 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
76019 + goto out;
76020 +
76021 + entry = *pte;
76022 + page_m = vm_normal_page(vma, address, entry);
76023 + if (!page_m)
76024 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
76025 + else if (PageAnon(page_m)) {
76026 + if (pax_find_mirror_vma(vma)) {
76027 + pte_unmap_unlock(pte, ptl);
76028 + lock_page(page_m);
76029 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
76030 + if (pte_same(entry, *pte))
76031 + pax_mirror_anon_pte(vma, address, page_m, ptl);
76032 + else
76033 + unlock_page(page_m);
76034 + }
76035 + } else
76036 + pax_mirror_file_pte(vma, address, page_m, ptl);
76037 +
76038 +out:
76039 + pte_unmap_unlock(pte, ptl);
76040 +}
76041 +#endif
76042 +
76043 /*
76044 * This routine handles present pages, when users try to write
76045 * to a shared page. It is done by copying the page to a new address
76046 @@ -2725,6 +2921,12 @@ gotten:
76047 */
76048 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76049 if (likely(pte_same(*page_table, orig_pte))) {
76050 +
76051 +#ifdef CONFIG_PAX_SEGMEXEC
76052 + if (pax_find_mirror_vma(vma))
76053 + BUG_ON(!trylock_page(new_page));
76054 +#endif
76055 +
76056 if (old_page) {
76057 if (!PageAnon(old_page)) {
76058 dec_mm_counter_fast(mm, MM_FILEPAGES);
76059 @@ -2776,6 +2978,10 @@ gotten:
76060 page_remove_rmap(old_page);
76061 }
76062
76063 +#ifdef CONFIG_PAX_SEGMEXEC
76064 + pax_mirror_anon_pte(vma, address, new_page, ptl);
76065 +#endif
76066 +
76067 /* Free the old page.. */
76068 new_page = old_page;
76069 ret |= VM_FAULT_WRITE;
76070 @@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
76071 swap_free(entry);
76072 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
76073 try_to_free_swap(page);
76074 +
76075 +#ifdef CONFIG_PAX_SEGMEXEC
76076 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
76077 +#endif
76078 +
76079 unlock_page(page);
76080 if (swapcache) {
76081 /*
76082 @@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
76083
76084 /* No need to invalidate - it was non-present before */
76085 update_mmu_cache(vma, address, page_table);
76086 +
76087 +#ifdef CONFIG_PAX_SEGMEXEC
76088 + pax_mirror_anon_pte(vma, address, page, ptl);
76089 +#endif
76090 +
76091 unlock:
76092 pte_unmap_unlock(page_table, ptl);
76093 out:
76094 @@ -3093,40 +3309,6 @@ out_release:
76095 }
76096
76097 /*
76098 - * This is like a special single-page "expand_{down|up}wards()",
76099 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
76100 - * doesn't hit another vma.
76101 - */
76102 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
76103 -{
76104 - address &= PAGE_MASK;
76105 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
76106 - struct vm_area_struct *prev = vma->vm_prev;
76107 -
76108 - /*
76109 - * Is there a mapping abutting this one below?
76110 - *
76111 - * That's only ok if it's the same stack mapping
76112 - * that has gotten split..
76113 - */
76114 - if (prev && prev->vm_end == address)
76115 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
76116 -
76117 - expand_downwards(vma, address - PAGE_SIZE);
76118 - }
76119 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
76120 - struct vm_area_struct *next = vma->vm_next;
76121 -
76122 - /* As VM_GROWSDOWN but s/below/above/ */
76123 - if (next && next->vm_start == address + PAGE_SIZE)
76124 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
76125 -
76126 - expand_upwards(vma, address + PAGE_SIZE);
76127 - }
76128 - return 0;
76129 -}
76130 -
76131 -/*
76132 * We enter with non-exclusive mmap_sem (to exclude vma changes,
76133 * but allow concurrent faults), and pte mapped but not yet locked.
76134 * We return with mmap_sem still held, but pte unmapped and unlocked.
76135 @@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76136 unsigned long address, pte_t *page_table, pmd_t *pmd,
76137 unsigned int flags)
76138 {
76139 - struct page *page;
76140 + struct page *page = NULL;
76141 spinlock_t *ptl;
76142 pte_t entry;
76143
76144 - pte_unmap(page_table);
76145 -
76146 - /* Check if we need to add a guard page to the stack */
76147 - if (check_stack_guard_page(vma, address) < 0)
76148 - return VM_FAULT_SIGBUS;
76149 -
76150 - /* Use the zero-page for reads */
76151 if (!(flags & FAULT_FLAG_WRITE)) {
76152 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
76153 vma->vm_page_prot));
76154 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76155 + ptl = pte_lockptr(mm, pmd);
76156 + spin_lock(ptl);
76157 if (!pte_none(*page_table))
76158 goto unlock;
76159 goto setpte;
76160 }
76161
76162 /* Allocate our own private page. */
76163 + pte_unmap(page_table);
76164 +
76165 if (unlikely(anon_vma_prepare(vma)))
76166 goto oom;
76167 page = alloc_zeroed_user_highpage_movable(vma, address);
76168 @@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76169 if (!pte_none(*page_table))
76170 goto release;
76171
76172 +#ifdef CONFIG_PAX_SEGMEXEC
76173 + if (pax_find_mirror_vma(vma))
76174 + BUG_ON(!trylock_page(page));
76175 +#endif
76176 +
76177 inc_mm_counter_fast(mm, MM_ANONPAGES);
76178 page_add_new_anon_rmap(page, vma, address);
76179 setpte:
76180 @@ -3181,6 +3364,12 @@ setpte:
76181
76182 /* No need to invalidate - it was non-present before */
76183 update_mmu_cache(vma, address, page_table);
76184 +
76185 +#ifdef CONFIG_PAX_SEGMEXEC
76186 + if (page)
76187 + pax_mirror_anon_pte(vma, address, page, ptl);
76188 +#endif
76189 +
76190 unlock:
76191 pte_unmap_unlock(page_table, ptl);
76192 return 0;
76193 @@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76194 */
76195 /* Only go through if we didn't race with anybody else... */
76196 if (likely(pte_same(*page_table, orig_pte))) {
76197 +
76198 +#ifdef CONFIG_PAX_SEGMEXEC
76199 + if (anon && pax_find_mirror_vma(vma))
76200 + BUG_ON(!trylock_page(page));
76201 +#endif
76202 +
76203 flush_icache_page(vma, page);
76204 entry = mk_pte(page, vma->vm_page_prot);
76205 if (flags & FAULT_FLAG_WRITE)
76206 @@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76207
76208 /* no need to invalidate: a not-present page won't be cached */
76209 update_mmu_cache(vma, address, page_table);
76210 +
76211 +#ifdef CONFIG_PAX_SEGMEXEC
76212 + if (anon)
76213 + pax_mirror_anon_pte(vma, address, page, ptl);
76214 + else
76215 + pax_mirror_file_pte(vma, address, page, ptl);
76216 +#endif
76217 +
76218 } else {
76219 if (cow_page)
76220 mem_cgroup_uncharge_page(cow_page);
76221 @@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
76222 if (flags & FAULT_FLAG_WRITE)
76223 flush_tlb_fix_spurious_fault(vma, address);
76224 }
76225 +
76226 +#ifdef CONFIG_PAX_SEGMEXEC
76227 + pax_mirror_pte(vma, address, pte, pmd, ptl);
76228 + return 0;
76229 +#endif
76230 +
76231 unlock:
76232 pte_unmap_unlock(pte, ptl);
76233 return 0;
76234 @@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76235 pmd_t *pmd;
76236 pte_t *pte;
76237
76238 +#ifdef CONFIG_PAX_SEGMEXEC
76239 + struct vm_area_struct *vma_m;
76240 +#endif
76241 +
76242 __set_current_state(TASK_RUNNING);
76243
76244 count_vm_event(PGFAULT);
76245 @@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76246 if (unlikely(is_vm_hugetlb_page(vma)))
76247 return hugetlb_fault(mm, vma, address, flags);
76248
76249 +#ifdef CONFIG_PAX_SEGMEXEC
76250 + vma_m = pax_find_mirror_vma(vma);
76251 + if (vma_m) {
76252 + unsigned long address_m;
76253 + pgd_t *pgd_m;
76254 + pud_t *pud_m;
76255 + pmd_t *pmd_m;
76256 +
76257 + if (vma->vm_start > vma_m->vm_start) {
76258 + address_m = address;
76259 + address -= SEGMEXEC_TASK_SIZE;
76260 + vma = vma_m;
76261 + } else
76262 + address_m = address + SEGMEXEC_TASK_SIZE;
76263 +
76264 + pgd_m = pgd_offset(mm, address_m);
76265 + pud_m = pud_alloc(mm, pgd_m, address_m);
76266 + if (!pud_m)
76267 + return VM_FAULT_OOM;
76268 + pmd_m = pmd_alloc(mm, pud_m, address_m);
76269 + if (!pmd_m)
76270 + return VM_FAULT_OOM;
76271 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
76272 + return VM_FAULT_OOM;
76273 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
76274 + }
76275 +#endif
76276 +
76277 retry:
76278 pgd = pgd_offset(mm, address);
76279 pud = pud_alloc(mm, pgd, address);
76280 @@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
76281 spin_unlock(&mm->page_table_lock);
76282 return 0;
76283 }
76284 +
76285 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
76286 +{
76287 + pud_t *new = pud_alloc_one(mm, address);
76288 + if (!new)
76289 + return -ENOMEM;
76290 +
76291 + smp_wmb(); /* See comment in __pte_alloc */
76292 +
76293 + spin_lock(&mm->page_table_lock);
76294 + if (pgd_present(*pgd)) /* Another has populated it */
76295 + pud_free(mm, new);
76296 + else
76297 + pgd_populate_kernel(mm, pgd, new);
76298 + spin_unlock(&mm->page_table_lock);
76299 + return 0;
76300 +}
76301 #endif /* __PAGETABLE_PUD_FOLDED */
76302
76303 #ifndef __PAGETABLE_PMD_FOLDED
76304 @@ -3819,6 +4077,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
76305 spin_unlock(&mm->page_table_lock);
76306 return 0;
76307 }
76308 +
76309 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
76310 +{
76311 + pmd_t *new = pmd_alloc_one(mm, address);
76312 + if (!new)
76313 + return -ENOMEM;
76314 +
76315 + smp_wmb(); /* See comment in __pte_alloc */
76316 +
76317 + spin_lock(&mm->page_table_lock);
76318 +#ifndef __ARCH_HAS_4LEVEL_HACK
76319 + if (pud_present(*pud)) /* Another has populated it */
76320 + pmd_free(mm, new);
76321 + else
76322 + pud_populate_kernel(mm, pud, new);
76323 +#else
76324 + if (pgd_present(*pud)) /* Another has populated it */
76325 + pmd_free(mm, new);
76326 + else
76327 + pgd_populate_kernel(mm, pud, new);
76328 +#endif /* __ARCH_HAS_4LEVEL_HACK */
76329 + spin_unlock(&mm->page_table_lock);
76330 + return 0;
76331 +}
76332 #endif /* __PAGETABLE_PMD_FOLDED */
76333
76334 int make_pages_present(unsigned long addr, unsigned long end)
76335 @@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
76336 gate_vma.vm_start = FIXADDR_USER_START;
76337 gate_vma.vm_end = FIXADDR_USER_END;
76338 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
76339 - gate_vma.vm_page_prot = __P101;
76340 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
76341
76342 return 0;
76343 }
76344 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
76345 index e2df1c1..1e31d57 100644
76346 --- a/mm/mempolicy.c
76347 +++ b/mm/mempolicy.c
76348 @@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
76349 unsigned long vmstart;
76350 unsigned long vmend;
76351
76352 +#ifdef CONFIG_PAX_SEGMEXEC
76353 + struct vm_area_struct *vma_m;
76354 +#endif
76355 +
76356 vma = find_vma(mm, start);
76357 if (!vma || vma->vm_start > start)
76358 return -EFAULT;
76359 @@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
76360 if (err)
76361 goto out;
76362 }
76363 +
76364 err = vma_replace_policy(vma, new_pol);
76365 if (err)
76366 goto out;
76367 +
76368 +#ifdef CONFIG_PAX_SEGMEXEC
76369 + vma_m = pax_find_mirror_vma(vma);
76370 + if (vma_m) {
76371 + err = vma_replace_policy(vma_m, new_pol);
76372 + if (err)
76373 + goto out;
76374 + }
76375 +#endif
76376 +
76377 }
76378
76379 out:
76380 @@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
76381
76382 if (end < start)
76383 return -EINVAL;
76384 +
76385 +#ifdef CONFIG_PAX_SEGMEXEC
76386 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76387 + if (end > SEGMEXEC_TASK_SIZE)
76388 + return -EINVAL;
76389 + } else
76390 +#endif
76391 +
76392 + if (end > TASK_SIZE)
76393 + return -EINVAL;
76394 +
76395 if (end == start)
76396 return 0;
76397
76398 @@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76399 */
76400 tcred = __task_cred(task);
76401 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
76402 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
76403 - !capable(CAP_SYS_NICE)) {
76404 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
76405 rcu_read_unlock();
76406 err = -EPERM;
76407 goto out_put;
76408 @@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76409 goto out;
76410 }
76411
76412 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76413 + if (mm != current->mm &&
76414 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76415 + mmput(mm);
76416 + err = -EPERM;
76417 + goto out;
76418 + }
76419 +#endif
76420 +
76421 err = do_migrate_pages(mm, old, new,
76422 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
76423
76424 diff --git a/mm/migrate.c b/mm/migrate.c
76425 index 2fd8b4a..d70358f 100644
76426 --- a/mm/migrate.c
76427 +++ b/mm/migrate.c
76428 @@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76429 */
76430 tcred = __task_cred(task);
76431 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
76432 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
76433 - !capable(CAP_SYS_NICE)) {
76434 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
76435 rcu_read_unlock();
76436 err = -EPERM;
76437 goto out;
76438 diff --git a/mm/mlock.c b/mm/mlock.c
76439 index c9bd528..da8d069 100644
76440 --- a/mm/mlock.c
76441 +++ b/mm/mlock.c
76442 @@ -13,6 +13,7 @@
76443 #include <linux/pagemap.h>
76444 #include <linux/mempolicy.h>
76445 #include <linux/syscalls.h>
76446 +#include <linux/security.h>
76447 #include <linux/sched.h>
76448 #include <linux/export.h>
76449 #include <linux/rmap.h>
76450 @@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
76451 {
76452 unsigned long nstart, end, tmp;
76453 struct vm_area_struct * vma, * prev;
76454 - int error;
76455 + int error = 0;
76456
76457 VM_BUG_ON(start & ~PAGE_MASK);
76458 VM_BUG_ON(len != PAGE_ALIGN(len));
76459 @@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
76460 return -EINVAL;
76461 if (end == start)
76462 return 0;
76463 + if (end > TASK_SIZE)
76464 + return -EINVAL;
76465 +
76466 vma = find_vma(current->mm, start);
76467 if (!vma || vma->vm_start > start)
76468 return -ENOMEM;
76469 @@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
76470 for (nstart = start ; ; ) {
76471 vm_flags_t newflags;
76472
76473 +#ifdef CONFIG_PAX_SEGMEXEC
76474 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76475 + break;
76476 +#endif
76477 +
76478 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
76479
76480 newflags = vma->vm_flags | VM_LOCKED;
76481 @@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
76482 lock_limit >>= PAGE_SHIFT;
76483
76484 /* check against resource limits */
76485 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
76486 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
76487 error = do_mlock(start, len, 1);
76488 up_write(&current->mm->mmap_sem);
76489 @@ -528,6 +538,12 @@ static int do_mlockall(int flags)
76490 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
76491 vm_flags_t newflags;
76492
76493 +#ifdef CONFIG_PAX_SEGMEXEC
76494 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76495 + break;
76496 +#endif
76497 +
76498 + BUG_ON(vma->vm_end > TASK_SIZE);
76499 newflags = vma->vm_flags | VM_LOCKED;
76500 if (!(flags & MCL_CURRENT))
76501 newflags &= ~VM_LOCKED;
76502 @@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
76503 lock_limit >>= PAGE_SHIFT;
76504
76505 ret = -ENOMEM;
76506 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
76507 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
76508 capable(CAP_IPC_LOCK))
76509 ret = do_mlockall(flags);
76510 diff --git a/mm/mmap.c b/mm/mmap.c
76511 index 8832b87..7d36e4f 100644
76512 --- a/mm/mmap.c
76513 +++ b/mm/mmap.c
76514 @@ -32,6 +32,7 @@
76515 #include <linux/khugepaged.h>
76516 #include <linux/uprobes.h>
76517 #include <linux/rbtree_augmented.h>
76518 +#include <linux/random.h>
76519
76520 #include <asm/uaccess.h>
76521 #include <asm/cacheflush.h>
76522 @@ -48,6 +49,16 @@
76523 #define arch_rebalance_pgtables(addr, len) (addr)
76524 #endif
76525
76526 +static inline void verify_mm_writelocked(struct mm_struct *mm)
76527 +{
76528 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
76529 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76530 + up_read(&mm->mmap_sem);
76531 + BUG();
76532 + }
76533 +#endif
76534 +}
76535 +
76536 static void unmap_region(struct mm_struct *mm,
76537 struct vm_area_struct *vma, struct vm_area_struct *prev,
76538 unsigned long start, unsigned long end);
76539 @@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
76540 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
76541 *
76542 */
76543 -pgprot_t protection_map[16] = {
76544 +pgprot_t protection_map[16] __read_only = {
76545 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76546 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
76547 };
76548
76549 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
76550 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
76551 {
76552 - return __pgprot(pgprot_val(protection_map[vm_flags &
76553 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
76554 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
76555 pgprot_val(arch_vm_get_page_prot(vm_flags)));
76556 +
76557 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76558 + if (!(__supported_pte_mask & _PAGE_NX) &&
76559 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
76560 + (vm_flags & (VM_READ | VM_WRITE)))
76561 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
76562 +#endif
76563 +
76564 + return prot;
76565 }
76566 EXPORT_SYMBOL(vm_get_page_prot);
76567
76568 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
76569 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
76570 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
76571 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
76572 /*
76573 * Make sure vm_committed_as in one cacheline and not cacheline shared with
76574 * other variables. It can be updated by several CPUs frequently.
76575 @@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
76576 struct vm_area_struct *next = vma->vm_next;
76577
76578 might_sleep();
76579 + BUG_ON(vma->vm_mirror);
76580 if (vma->vm_ops && vma->vm_ops->close)
76581 vma->vm_ops->close(vma);
76582 if (vma->vm_file)
76583 @@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
76584 * not page aligned -Ram Gupta
76585 */
76586 rlim = rlimit(RLIMIT_DATA);
76587 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
76588 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
76589 (mm->end_data - mm->start_data) > rlim)
76590 goto out;
76591 @@ -888,6 +911,12 @@ static int
76592 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
76593 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76594 {
76595 +
76596 +#ifdef CONFIG_PAX_SEGMEXEC
76597 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
76598 + return 0;
76599 +#endif
76600 +
76601 if (is_mergeable_vma(vma, file, vm_flags) &&
76602 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
76603 if (vma->vm_pgoff == vm_pgoff)
76604 @@ -907,6 +936,12 @@ static int
76605 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76606 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76607 {
76608 +
76609 +#ifdef CONFIG_PAX_SEGMEXEC
76610 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
76611 + return 0;
76612 +#endif
76613 +
76614 if (is_mergeable_vma(vma, file, vm_flags) &&
76615 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
76616 pgoff_t vm_pglen;
76617 @@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76618 struct vm_area_struct *vma_merge(struct mm_struct *mm,
76619 struct vm_area_struct *prev, unsigned long addr,
76620 unsigned long end, unsigned long vm_flags,
76621 - struct anon_vma *anon_vma, struct file *file,
76622 + struct anon_vma *anon_vma, struct file *file,
76623 pgoff_t pgoff, struct mempolicy *policy)
76624 {
76625 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
76626 struct vm_area_struct *area, *next;
76627 int err;
76628
76629 +#ifdef CONFIG_PAX_SEGMEXEC
76630 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
76631 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
76632 +
76633 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
76634 +#endif
76635 +
76636 /*
76637 * We later require that vma->vm_flags == vm_flags,
76638 * so this tests vma->vm_flags & VM_SPECIAL, too.
76639 @@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76640 if (next && next->vm_end == end) /* cases 6, 7, 8 */
76641 next = next->vm_next;
76642
76643 +#ifdef CONFIG_PAX_SEGMEXEC
76644 + if (prev)
76645 + prev_m = pax_find_mirror_vma(prev);
76646 + if (area)
76647 + area_m = pax_find_mirror_vma(area);
76648 + if (next)
76649 + next_m = pax_find_mirror_vma(next);
76650 +#endif
76651 +
76652 /*
76653 * Can it merge with the predecessor?
76654 */
76655 @@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76656 /* cases 1, 6 */
76657 err = vma_adjust(prev, prev->vm_start,
76658 next->vm_end, prev->vm_pgoff, NULL);
76659 - } else /* cases 2, 5, 7 */
76660 +
76661 +#ifdef CONFIG_PAX_SEGMEXEC
76662 + if (!err && prev_m)
76663 + err = vma_adjust(prev_m, prev_m->vm_start,
76664 + next_m->vm_end, prev_m->vm_pgoff, NULL);
76665 +#endif
76666 +
76667 + } else { /* cases 2, 5, 7 */
76668 err = vma_adjust(prev, prev->vm_start,
76669 end, prev->vm_pgoff, NULL);
76670 +
76671 +#ifdef CONFIG_PAX_SEGMEXEC
76672 + if (!err && prev_m)
76673 + err = vma_adjust(prev_m, prev_m->vm_start,
76674 + end_m, prev_m->vm_pgoff, NULL);
76675 +#endif
76676 +
76677 + }
76678 if (err)
76679 return NULL;
76680 khugepaged_enter_vma_merge(prev);
76681 @@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76682 mpol_equal(policy, vma_policy(next)) &&
76683 can_vma_merge_before(next, vm_flags,
76684 anon_vma, file, pgoff+pglen)) {
76685 - if (prev && addr < prev->vm_end) /* case 4 */
76686 + if (prev && addr < prev->vm_end) { /* case 4 */
76687 err = vma_adjust(prev, prev->vm_start,
76688 addr, prev->vm_pgoff, NULL);
76689 - else /* cases 3, 8 */
76690 +
76691 +#ifdef CONFIG_PAX_SEGMEXEC
76692 + if (!err && prev_m)
76693 + err = vma_adjust(prev_m, prev_m->vm_start,
76694 + addr_m, prev_m->vm_pgoff, NULL);
76695 +#endif
76696 +
76697 + } else { /* cases 3, 8 */
76698 err = vma_adjust(area, addr, next->vm_end,
76699 next->vm_pgoff - pglen, NULL);
76700 +
76701 +#ifdef CONFIG_PAX_SEGMEXEC
76702 + if (!err && area_m)
76703 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
76704 + next_m->vm_pgoff - pglen, NULL);
76705 +#endif
76706 +
76707 + }
76708 if (err)
76709 return NULL;
76710 khugepaged_enter_vma_merge(area);
76711 @@ -1120,16 +1201,13 @@ none:
76712 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
76713 struct file *file, long pages)
76714 {
76715 - const unsigned long stack_flags
76716 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
76717 -
76718 mm->total_vm += pages;
76719
76720 if (file) {
76721 mm->shared_vm += pages;
76722 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
76723 mm->exec_vm += pages;
76724 - } else if (flags & stack_flags)
76725 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
76726 mm->stack_vm += pages;
76727 }
76728 #endif /* CONFIG_PROC_FS */
76729 @@ -1165,7 +1243,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76730 * (the exception is when the underlying filesystem is noexec
76731 * mounted, in which case we dont add PROT_EXEC.)
76732 */
76733 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76734 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76735 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
76736 prot |= PROT_EXEC;
76737
76738 @@ -1191,7 +1269,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76739 /* Obtain the address to map to. we verify (or select) it and ensure
76740 * that it represents a valid section of the address space.
76741 */
76742 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
76743 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
76744 if (addr & ~PAGE_MASK)
76745 return addr;
76746
76747 @@ -1202,6 +1280,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76748 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
76749 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
76750
76751 +#ifdef CONFIG_PAX_MPROTECT
76752 + if (mm->pax_flags & MF_PAX_MPROTECT) {
76753 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
76754 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
76755 + gr_log_rwxmmap(file);
76756 +
76757 +#ifdef CONFIG_PAX_EMUPLT
76758 + vm_flags &= ~VM_EXEC;
76759 +#else
76760 + return -EPERM;
76761 +#endif
76762 +
76763 + }
76764 +
76765 + if (!(vm_flags & VM_EXEC))
76766 + vm_flags &= ~VM_MAYEXEC;
76767 +#else
76768 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76769 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76770 +#endif
76771 + else
76772 + vm_flags &= ~VM_MAYWRITE;
76773 + }
76774 +#endif
76775 +
76776 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76777 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
76778 + vm_flags &= ~VM_PAGEEXEC;
76779 +#endif
76780 +
76781 if (flags & MAP_LOCKED)
76782 if (!can_do_mlock())
76783 return -EPERM;
76784 @@ -1213,6 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76785 locked += mm->locked_vm;
76786 lock_limit = rlimit(RLIMIT_MEMLOCK);
76787 lock_limit >>= PAGE_SHIFT;
76788 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
76789 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
76790 return -EAGAIN;
76791 }
76792 @@ -1279,6 +1388,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76793 }
76794 }
76795
76796 + if (!gr_acl_handle_mmap(file, prot))
76797 + return -EACCES;
76798 +
76799 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
76800 }
76801
76802 @@ -1356,7 +1468,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
76803 vm_flags_t vm_flags = vma->vm_flags;
76804
76805 /* If it was private or non-writable, the write bit is already clear */
76806 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
76807 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
76808 return 0;
76809
76810 /* The backer wishes to know when pages are first written to? */
76811 @@ -1405,13 +1517,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
76812 unsigned long charged = 0;
76813 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
76814
76815 +#ifdef CONFIG_PAX_SEGMEXEC
76816 + struct vm_area_struct *vma_m = NULL;
76817 +#endif
76818 +
76819 + /*
76820 + * mm->mmap_sem is required to protect against another thread
76821 + * changing the mappings in case we sleep.
76822 + */
76823 + verify_mm_writelocked(mm);
76824 +
76825 /* Clear old maps */
76826 error = -ENOMEM;
76827 -munmap_back:
76828 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
76829 if (do_munmap(mm, addr, len))
76830 return -ENOMEM;
76831 - goto munmap_back;
76832 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
76833 }
76834
76835 /* Check against address space limit. */
76836 @@ -1460,6 +1581,16 @@ munmap_back:
76837 goto unacct_error;
76838 }
76839
76840 +#ifdef CONFIG_PAX_SEGMEXEC
76841 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
76842 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76843 + if (!vma_m) {
76844 + error = -ENOMEM;
76845 + goto free_vma;
76846 + }
76847 + }
76848 +#endif
76849 +
76850 vma->vm_mm = mm;
76851 vma->vm_start = addr;
76852 vma->vm_end = addr + len;
76853 @@ -1484,6 +1615,13 @@ munmap_back:
76854 if (error)
76855 goto unmap_and_free_vma;
76856
76857 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76858 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
76859 + vma->vm_flags |= VM_PAGEEXEC;
76860 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76861 + }
76862 +#endif
76863 +
76864 /* Can addr have changed??
76865 *
76866 * Answer: Yes, several device drivers can do it in their
76867 @@ -1522,6 +1660,11 @@ munmap_back:
76868 vma_link(mm, vma, prev, rb_link, rb_parent);
76869 file = vma->vm_file;
76870
76871 +#ifdef CONFIG_PAX_SEGMEXEC
76872 + if (vma_m)
76873 + BUG_ON(pax_mirror_vma(vma_m, vma));
76874 +#endif
76875 +
76876 /* Once vma denies write, undo our temporary denial count */
76877 if (correct_wcount)
76878 atomic_inc(&inode->i_writecount);
76879 @@ -1529,6 +1672,7 @@ out:
76880 perf_event_mmap(vma);
76881
76882 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
76883 + track_exec_limit(mm, addr, addr + len, vm_flags);
76884 if (vm_flags & VM_LOCKED) {
76885 if (!mlock_vma_pages_range(vma, addr, addr + len))
76886 mm->locked_vm += (len >> PAGE_SHIFT);
76887 @@ -1550,6 +1694,12 @@ unmap_and_free_vma:
76888 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
76889 charged = 0;
76890 free_vma:
76891 +
76892 +#ifdef CONFIG_PAX_SEGMEXEC
76893 + if (vma_m)
76894 + kmem_cache_free(vm_area_cachep, vma_m);
76895 +#endif
76896 +
76897 kmem_cache_free(vm_area_cachep, vma);
76898 unacct_error:
76899 if (charged)
76900 @@ -1557,6 +1707,62 @@ unacct_error:
76901 return error;
76902 }
76903
76904 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
76905 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
76906 +{
76907 + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
76908 + return (random32() & 0xFF) << PAGE_SHIFT;
76909 +
76910 + return 0;
76911 +}
76912 +#endif
76913 +
76914 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
76915 +{
76916 + if (!vma) {
76917 +#ifdef CONFIG_STACK_GROWSUP
76918 + if (addr > sysctl_heap_stack_gap)
76919 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
76920 + else
76921 + vma = find_vma(current->mm, 0);
76922 + if (vma && (vma->vm_flags & VM_GROWSUP))
76923 + return false;
76924 +#endif
76925 + return true;
76926 + }
76927 +
76928 + if (addr + len > vma->vm_start)
76929 + return false;
76930 +
76931 + if (vma->vm_flags & VM_GROWSDOWN)
76932 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
76933 +#ifdef CONFIG_STACK_GROWSUP
76934 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
76935 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
76936 +#endif
76937 + else if (offset)
76938 + return offset <= vma->vm_start - addr - len;
76939 +
76940 + return true;
76941 +}
76942 +
76943 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
76944 +{
76945 + if (vma->vm_start < len)
76946 + return -ENOMEM;
76947 +
76948 + if (!(vma->vm_flags & VM_GROWSDOWN)) {
76949 + if (offset <= vma->vm_start - len)
76950 + return vma->vm_start - len - offset;
76951 + else
76952 + return -ENOMEM;
76953 + }
76954 +
76955 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
76956 + return vma->vm_start - len - sysctl_heap_stack_gap;
76957 + return -ENOMEM;
76958 +}
76959 +
76960 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
76961 {
76962 /*
76963 @@ -1776,6 +1982,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
76964 struct mm_struct *mm = current->mm;
76965 struct vm_area_struct *vma;
76966 struct vm_unmapped_area_info info;
76967 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
76968
76969 if (len > TASK_SIZE)
76970 return -ENOMEM;
76971 @@ -1783,17 +1990,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
76972 if (flags & MAP_FIXED)
76973 return addr;
76974
76975 +#ifdef CONFIG_PAX_RANDMMAP
76976 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76977 +#endif
76978 +
76979 if (addr) {
76980 addr = PAGE_ALIGN(addr);
76981 vma = find_vma(mm, addr);
76982 - if (TASK_SIZE - len >= addr &&
76983 - (!vma || addr + len <= vma->vm_start))
76984 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
76985 return addr;
76986 }
76987
76988 info.flags = 0;
76989 info.length = len;
76990 info.low_limit = TASK_UNMAPPED_BASE;
76991 +
76992 +#ifdef CONFIG_PAX_RANDMMAP
76993 + if (mm->pax_flags & MF_PAX_RANDMMAP)
76994 + info.low_limit += mm->delta_mmap;
76995 +#endif
76996 +
76997 info.high_limit = TASK_SIZE;
76998 info.align_mask = 0;
76999 return vm_unmapped_area(&info);
77000 @@ -1802,10 +2018,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
77001
77002 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
77003 {
77004 +
77005 +#ifdef CONFIG_PAX_SEGMEXEC
77006 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77007 + return;
77008 +#endif
77009 +
77010 /*
77011 * Is this a new hole at the lowest possible address?
77012 */
77013 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
77014 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
77015 mm->free_area_cache = addr;
77016 }
77017
77018 @@ -1823,6 +2045,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77019 struct mm_struct *mm = current->mm;
77020 unsigned long addr = addr0;
77021 struct vm_unmapped_area_info info;
77022 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
77023
77024 /* requested length too big for entire address space */
77025 if (len > TASK_SIZE)
77026 @@ -1831,12 +2054,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77027 if (flags & MAP_FIXED)
77028 return addr;
77029
77030 +#ifdef CONFIG_PAX_RANDMMAP
77031 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
77032 +#endif
77033 +
77034 /* requesting a specific address */
77035 if (addr) {
77036 addr = PAGE_ALIGN(addr);
77037 vma = find_vma(mm, addr);
77038 - if (TASK_SIZE - len >= addr &&
77039 - (!vma || addr + len <= vma->vm_start))
77040 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
77041 return addr;
77042 }
77043
77044 @@ -1857,6 +2083,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77045 VM_BUG_ON(addr != -ENOMEM);
77046 info.flags = 0;
77047 info.low_limit = TASK_UNMAPPED_BASE;
77048 +
77049 +#ifdef CONFIG_PAX_RANDMMAP
77050 + if (mm->pax_flags & MF_PAX_RANDMMAP)
77051 + info.low_limit += mm->delta_mmap;
77052 +#endif
77053 +
77054 info.high_limit = TASK_SIZE;
77055 addr = vm_unmapped_area(&info);
77056 }
77057 @@ -1867,6 +2099,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77058
77059 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77060 {
77061 +
77062 +#ifdef CONFIG_PAX_SEGMEXEC
77063 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77064 + return;
77065 +#endif
77066 +
77067 /*
77068 * Is this a new hole at the highest possible address?
77069 */
77070 @@ -1874,8 +2112,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77071 mm->free_area_cache = addr;
77072
77073 /* dont allow allocations above current base */
77074 - if (mm->free_area_cache > mm->mmap_base)
77075 + if (mm->free_area_cache > mm->mmap_base) {
77076 mm->free_area_cache = mm->mmap_base;
77077 + mm->cached_hole_size = ~0UL;
77078 + }
77079 }
77080
77081 unsigned long
77082 @@ -1974,6 +2214,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
77083 return vma;
77084 }
77085
77086 +#ifdef CONFIG_PAX_SEGMEXEC
77087 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
77088 +{
77089 + struct vm_area_struct *vma_m;
77090 +
77091 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
77092 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
77093 + BUG_ON(vma->vm_mirror);
77094 + return NULL;
77095 + }
77096 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
77097 + vma_m = vma->vm_mirror;
77098 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
77099 + BUG_ON(vma->vm_file != vma_m->vm_file);
77100 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
77101 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
77102 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
77103 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
77104 + return vma_m;
77105 +}
77106 +#endif
77107 +
77108 /*
77109 * Verify that the stack growth is acceptable and
77110 * update accounting. This is shared with both the
77111 @@ -1990,6 +2252,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77112 return -ENOMEM;
77113
77114 /* Stack limit test */
77115 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
77116 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
77117 return -ENOMEM;
77118
77119 @@ -2000,6 +2263,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77120 locked = mm->locked_vm + grow;
77121 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
77122 limit >>= PAGE_SHIFT;
77123 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77124 if (locked > limit && !capable(CAP_IPC_LOCK))
77125 return -ENOMEM;
77126 }
77127 @@ -2029,37 +2293,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77128 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
77129 * vma is the last one with address > vma->vm_end. Have to extend vma.
77130 */
77131 +#ifndef CONFIG_IA64
77132 +static
77133 +#endif
77134 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77135 {
77136 int error;
77137 + bool locknext;
77138
77139 if (!(vma->vm_flags & VM_GROWSUP))
77140 return -EFAULT;
77141
77142 + /* Also guard against wrapping around to address 0. */
77143 + if (address < PAGE_ALIGN(address+1))
77144 + address = PAGE_ALIGN(address+1);
77145 + else
77146 + return -ENOMEM;
77147 +
77148 /*
77149 * We must make sure the anon_vma is allocated
77150 * so that the anon_vma locking is not a noop.
77151 */
77152 if (unlikely(anon_vma_prepare(vma)))
77153 return -ENOMEM;
77154 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
77155 + if (locknext && anon_vma_prepare(vma->vm_next))
77156 + return -ENOMEM;
77157 vma_lock_anon_vma(vma);
77158 + if (locknext)
77159 + vma_lock_anon_vma(vma->vm_next);
77160
77161 /*
77162 * vma->vm_start/vm_end cannot change under us because the caller
77163 * is required to hold the mmap_sem in read mode. We need the
77164 - * anon_vma lock to serialize against concurrent expand_stacks.
77165 - * Also guard against wrapping around to address 0.
77166 + * anon_vma locks to serialize against concurrent expand_stacks
77167 + * and expand_upwards.
77168 */
77169 - if (address < PAGE_ALIGN(address+4))
77170 - address = PAGE_ALIGN(address+4);
77171 - else {
77172 - vma_unlock_anon_vma(vma);
77173 - return -ENOMEM;
77174 - }
77175 error = 0;
77176
77177 /* Somebody else might have raced and expanded it already */
77178 - if (address > vma->vm_end) {
77179 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
77180 + error = -ENOMEM;
77181 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
77182 unsigned long size, grow;
77183
77184 size = address - vma->vm_start;
77185 @@ -2094,6 +2369,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77186 }
77187 }
77188 }
77189 + if (locknext)
77190 + vma_unlock_anon_vma(vma->vm_next);
77191 vma_unlock_anon_vma(vma);
77192 khugepaged_enter_vma_merge(vma);
77193 validate_mm(vma->vm_mm);
77194 @@ -2108,6 +2385,8 @@ int expand_downwards(struct vm_area_struct *vma,
77195 unsigned long address)
77196 {
77197 int error;
77198 + bool lockprev = false;
77199 + struct vm_area_struct *prev;
77200
77201 /*
77202 * We must make sure the anon_vma is allocated
77203 @@ -2121,6 +2400,15 @@ int expand_downwards(struct vm_area_struct *vma,
77204 if (error)
77205 return error;
77206
77207 + prev = vma->vm_prev;
77208 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
77209 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
77210 +#endif
77211 + if (lockprev && anon_vma_prepare(prev))
77212 + return -ENOMEM;
77213 + if (lockprev)
77214 + vma_lock_anon_vma(prev);
77215 +
77216 vma_lock_anon_vma(vma);
77217
77218 /*
77219 @@ -2130,9 +2418,17 @@ int expand_downwards(struct vm_area_struct *vma,
77220 */
77221
77222 /* Somebody else might have raced and expanded it already */
77223 - if (address < vma->vm_start) {
77224 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
77225 + error = -ENOMEM;
77226 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
77227 unsigned long size, grow;
77228
77229 +#ifdef CONFIG_PAX_SEGMEXEC
77230 + struct vm_area_struct *vma_m;
77231 +
77232 + vma_m = pax_find_mirror_vma(vma);
77233 +#endif
77234 +
77235 size = vma->vm_end - address;
77236 grow = (vma->vm_start - address) >> PAGE_SHIFT;
77237
77238 @@ -2157,6 +2453,18 @@ int expand_downwards(struct vm_area_struct *vma,
77239 vma->vm_pgoff -= grow;
77240 anon_vma_interval_tree_post_update_vma(vma);
77241 vma_gap_update(vma);
77242 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
77243 +
77244 +#ifdef CONFIG_PAX_SEGMEXEC
77245 + if (vma_m) {
77246 + anon_vma_interval_tree_pre_update_vma(vma_m);
77247 + vma_m->vm_start -= grow << PAGE_SHIFT;
77248 + vma_m->vm_pgoff -= grow;
77249 + anon_vma_interval_tree_post_update_vma(vma_m);
77250 + vma_gap_update(vma_m);
77251 + }
77252 +#endif
77253 +
77254 spin_unlock(&vma->vm_mm->page_table_lock);
77255
77256 perf_event_mmap(vma);
77257 @@ -2263,6 +2571,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
77258 do {
77259 long nrpages = vma_pages(vma);
77260
77261 +#ifdef CONFIG_PAX_SEGMEXEC
77262 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
77263 + vma = remove_vma(vma);
77264 + continue;
77265 + }
77266 +#endif
77267 +
77268 if (vma->vm_flags & VM_ACCOUNT)
77269 nr_accounted += nrpages;
77270 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
77271 @@ -2308,6 +2623,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
77272 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
77273 vma->vm_prev = NULL;
77274 do {
77275 +
77276 +#ifdef CONFIG_PAX_SEGMEXEC
77277 + if (vma->vm_mirror) {
77278 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
77279 + vma->vm_mirror->vm_mirror = NULL;
77280 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
77281 + vma->vm_mirror = NULL;
77282 + }
77283 +#endif
77284 +
77285 vma_rb_erase(vma, &mm->mm_rb);
77286 mm->map_count--;
77287 tail_vma = vma;
77288 @@ -2339,14 +2664,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77289 struct vm_area_struct *new;
77290 int err = -ENOMEM;
77291
77292 +#ifdef CONFIG_PAX_SEGMEXEC
77293 + struct vm_area_struct *vma_m, *new_m = NULL;
77294 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
77295 +#endif
77296 +
77297 if (is_vm_hugetlb_page(vma) && (addr &
77298 ~(huge_page_mask(hstate_vma(vma)))))
77299 return -EINVAL;
77300
77301 +#ifdef CONFIG_PAX_SEGMEXEC
77302 + vma_m = pax_find_mirror_vma(vma);
77303 +#endif
77304 +
77305 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
77306 if (!new)
77307 goto out_err;
77308
77309 +#ifdef CONFIG_PAX_SEGMEXEC
77310 + if (vma_m) {
77311 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
77312 + if (!new_m) {
77313 + kmem_cache_free(vm_area_cachep, new);
77314 + goto out_err;
77315 + }
77316 + }
77317 +#endif
77318 +
77319 /* most fields are the same, copy all, and then fixup */
77320 *new = *vma;
77321
77322 @@ -2359,6 +2703,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77323 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
77324 }
77325
77326 +#ifdef CONFIG_PAX_SEGMEXEC
77327 + if (vma_m) {
77328 + *new_m = *vma_m;
77329 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
77330 + new_m->vm_mirror = new;
77331 + new->vm_mirror = new_m;
77332 +
77333 + if (new_below)
77334 + new_m->vm_end = addr_m;
77335 + else {
77336 + new_m->vm_start = addr_m;
77337 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
77338 + }
77339 + }
77340 +#endif
77341 +
77342 pol = mpol_dup(vma_policy(vma));
77343 if (IS_ERR(pol)) {
77344 err = PTR_ERR(pol);
77345 @@ -2381,6 +2741,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77346 else
77347 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
77348
77349 +#ifdef CONFIG_PAX_SEGMEXEC
77350 + if (!err && vma_m) {
77351 + if (anon_vma_clone(new_m, vma_m))
77352 + goto out_free_mpol;
77353 +
77354 + mpol_get(pol);
77355 + vma_set_policy(new_m, pol);
77356 +
77357 + if (new_m->vm_file)
77358 + get_file(new_m->vm_file);
77359 +
77360 + if (new_m->vm_ops && new_m->vm_ops->open)
77361 + new_m->vm_ops->open(new_m);
77362 +
77363 + if (new_below)
77364 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
77365 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
77366 + else
77367 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
77368 +
77369 + if (err) {
77370 + if (new_m->vm_ops && new_m->vm_ops->close)
77371 + new_m->vm_ops->close(new_m);
77372 + if (new_m->vm_file)
77373 + fput(new_m->vm_file);
77374 + mpol_put(pol);
77375 + }
77376 + }
77377 +#endif
77378 +
77379 /* Success. */
77380 if (!err)
77381 return 0;
77382 @@ -2390,10 +2780,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77383 new->vm_ops->close(new);
77384 if (new->vm_file)
77385 fput(new->vm_file);
77386 - unlink_anon_vmas(new);
77387 out_free_mpol:
77388 mpol_put(pol);
77389 out_free_vma:
77390 +
77391 +#ifdef CONFIG_PAX_SEGMEXEC
77392 + if (new_m) {
77393 + unlink_anon_vmas(new_m);
77394 + kmem_cache_free(vm_area_cachep, new_m);
77395 + }
77396 +#endif
77397 +
77398 + unlink_anon_vmas(new);
77399 kmem_cache_free(vm_area_cachep, new);
77400 out_err:
77401 return err;
77402 @@ -2406,6 +2804,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77403 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
77404 unsigned long addr, int new_below)
77405 {
77406 +
77407 +#ifdef CONFIG_PAX_SEGMEXEC
77408 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77409 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
77410 + if (mm->map_count >= sysctl_max_map_count-1)
77411 + return -ENOMEM;
77412 + } else
77413 +#endif
77414 +
77415 if (mm->map_count >= sysctl_max_map_count)
77416 return -ENOMEM;
77417
77418 @@ -2417,11 +2824,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
77419 * work. This now handles partial unmappings.
77420 * Jeremy Fitzhardinge <jeremy@goop.org>
77421 */
77422 +#ifdef CONFIG_PAX_SEGMEXEC
77423 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77424 {
77425 + int ret = __do_munmap(mm, start, len);
77426 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
77427 + return ret;
77428 +
77429 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
77430 +}
77431 +
77432 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77433 +#else
77434 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77435 +#endif
77436 +{
77437 unsigned long end;
77438 struct vm_area_struct *vma, *prev, *last;
77439
77440 + /*
77441 + * mm->mmap_sem is required to protect against another thread
77442 + * changing the mappings in case we sleep.
77443 + */
77444 + verify_mm_writelocked(mm);
77445 +
77446 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
77447 return -EINVAL;
77448
77449 @@ -2496,6 +2922,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77450 /* Fix up all other VM information */
77451 remove_vma_list(mm, vma);
77452
77453 + track_exec_limit(mm, start, end, 0UL);
77454 +
77455 return 0;
77456 }
77457
77458 @@ -2504,6 +2932,13 @@ int vm_munmap(unsigned long start, size_t len)
77459 int ret;
77460 struct mm_struct *mm = current->mm;
77461
77462 +
77463 +#ifdef CONFIG_PAX_SEGMEXEC
77464 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
77465 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
77466 + return -EINVAL;
77467 +#endif
77468 +
77469 down_write(&mm->mmap_sem);
77470 ret = do_munmap(mm, start, len);
77471 up_write(&mm->mmap_sem);
77472 @@ -2517,16 +2952,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
77473 return vm_munmap(addr, len);
77474 }
77475
77476 -static inline void verify_mm_writelocked(struct mm_struct *mm)
77477 -{
77478 -#ifdef CONFIG_DEBUG_VM
77479 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77480 - WARN_ON(1);
77481 - up_read(&mm->mmap_sem);
77482 - }
77483 -#endif
77484 -}
77485 -
77486 /*
77487 * this is really a simplified "do_mmap". it only handles
77488 * anonymous maps. eventually we may be able to do some
77489 @@ -2540,6 +2965,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
77490 struct rb_node ** rb_link, * rb_parent;
77491 pgoff_t pgoff = addr >> PAGE_SHIFT;
77492 int error;
77493 + unsigned long charged;
77494
77495 len = PAGE_ALIGN(len);
77496 if (!len)
77497 @@ -2547,16 +2973,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
77498
77499 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
77500
77501 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
77502 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
77503 + flags &= ~VM_EXEC;
77504 +
77505 +#ifdef CONFIG_PAX_MPROTECT
77506 + if (mm->pax_flags & MF_PAX_MPROTECT)
77507 + flags &= ~VM_MAYEXEC;
77508 +#endif
77509 +
77510 + }
77511 +#endif
77512 +
77513 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
77514 if (error & ~PAGE_MASK)
77515 return error;
77516
77517 + charged = len >> PAGE_SHIFT;
77518 +
77519 /*
77520 * mlock MCL_FUTURE?
77521 */
77522 if (mm->def_flags & VM_LOCKED) {
77523 unsigned long locked, lock_limit;
77524 - locked = len >> PAGE_SHIFT;
77525 + locked = charged;
77526 locked += mm->locked_vm;
77527 lock_limit = rlimit(RLIMIT_MEMLOCK);
77528 lock_limit >>= PAGE_SHIFT;
77529 @@ -2573,21 +3013,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
77530 /*
77531 * Clear old maps. this also does some error checking for us
77532 */
77533 - munmap_back:
77534 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
77535 if (do_munmap(mm, addr, len))
77536 return -ENOMEM;
77537 - goto munmap_back;
77538 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
77539 }
77540
77541 /* Check against address space limits *after* clearing old maps... */
77542 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
77543 + if (!may_expand_vm(mm, charged))
77544 return -ENOMEM;
77545
77546 if (mm->map_count > sysctl_max_map_count)
77547 return -ENOMEM;
77548
77549 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
77550 + if (security_vm_enough_memory_mm(mm, charged))
77551 return -ENOMEM;
77552
77553 /* Can we just expand an old private anonymous mapping? */
77554 @@ -2601,7 +3040,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
77555 */
77556 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77557 if (!vma) {
77558 - vm_unacct_memory(len >> PAGE_SHIFT);
77559 + vm_unacct_memory(charged);
77560 return -ENOMEM;
77561 }
77562
77563 @@ -2615,11 +3054,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
77564 vma_link(mm, vma, prev, rb_link, rb_parent);
77565 out:
77566 perf_event_mmap(vma);
77567 - mm->total_vm += len >> PAGE_SHIFT;
77568 + mm->total_vm += charged;
77569 if (flags & VM_LOCKED) {
77570 if (!mlock_vma_pages_range(vma, addr, addr + len))
77571 - mm->locked_vm += (len >> PAGE_SHIFT);
77572 + mm->locked_vm += charged;
77573 }
77574 + track_exec_limit(mm, addr, addr + len, flags);
77575 return addr;
77576 }
77577
77578 @@ -2677,6 +3117,7 @@ void exit_mmap(struct mm_struct *mm)
77579 while (vma) {
77580 if (vma->vm_flags & VM_ACCOUNT)
77581 nr_accounted += vma_pages(vma);
77582 + vma->vm_mirror = NULL;
77583 vma = remove_vma(vma);
77584 }
77585 vm_unacct_memory(nr_accounted);
77586 @@ -2693,6 +3134,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
77587 struct vm_area_struct *prev;
77588 struct rb_node **rb_link, *rb_parent;
77589
77590 +#ifdef CONFIG_PAX_SEGMEXEC
77591 + struct vm_area_struct *vma_m = NULL;
77592 +#endif
77593 +
77594 + if (security_mmap_addr(vma->vm_start))
77595 + return -EPERM;
77596 +
77597 /*
77598 * The vm_pgoff of a purely anonymous vma should be irrelevant
77599 * until its first write fault, when page's anon_vma and index
77600 @@ -2716,7 +3164,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
77601 security_vm_enough_memory_mm(mm, vma_pages(vma)))
77602 return -ENOMEM;
77603
77604 +#ifdef CONFIG_PAX_SEGMEXEC
77605 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
77606 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77607 + if (!vma_m)
77608 + return -ENOMEM;
77609 + }
77610 +#endif
77611 +
77612 vma_link(mm, vma, prev, rb_link, rb_parent);
77613 +
77614 +#ifdef CONFIG_PAX_SEGMEXEC
77615 + if (vma_m)
77616 + BUG_ON(pax_mirror_vma(vma_m, vma));
77617 +#endif
77618 +
77619 return 0;
77620 }
77621
77622 @@ -2736,6 +3198,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77623 struct mempolicy *pol;
77624 bool faulted_in_anon_vma = true;
77625
77626 + BUG_ON(vma->vm_mirror);
77627 +
77628 /*
77629 * If anonymous vma has not yet been faulted, update new pgoff
77630 * to match new location, to increase its chance of merging.
77631 @@ -2802,6 +3266,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77632 return NULL;
77633 }
77634
77635 +#ifdef CONFIG_PAX_SEGMEXEC
77636 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
77637 +{
77638 + struct vm_area_struct *prev_m;
77639 + struct rb_node **rb_link_m, *rb_parent_m;
77640 + struct mempolicy *pol_m;
77641 +
77642 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
77643 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
77644 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
77645 + *vma_m = *vma;
77646 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
77647 + if (anon_vma_clone(vma_m, vma))
77648 + return -ENOMEM;
77649 + pol_m = vma_policy(vma_m);
77650 + mpol_get(pol_m);
77651 + vma_set_policy(vma_m, pol_m);
77652 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
77653 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
77654 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
77655 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
77656 + if (vma_m->vm_file)
77657 + get_file(vma_m->vm_file);
77658 + if (vma_m->vm_ops && vma_m->vm_ops->open)
77659 + vma_m->vm_ops->open(vma_m);
77660 + BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
77661 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
77662 + vma_m->vm_mirror = vma;
77663 + vma->vm_mirror = vma_m;
77664 + return 0;
77665 +}
77666 +#endif
77667 +
77668 /*
77669 * Return true if the calling process may expand its vm space by the passed
77670 * number of pages
77671 @@ -2813,6 +3310,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
77672
77673 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
77674
77675 +#ifdef CONFIG_PAX_RANDMMAP
77676 + if (mm->pax_flags & MF_PAX_RANDMMAP)
77677 + cur -= mm->brk_gap;
77678 +#endif
77679 +
77680 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
77681 if (cur + npages > lim)
77682 return 0;
77683 return 1;
77684 @@ -2883,6 +3386,22 @@ int install_special_mapping(struct mm_struct *mm,
77685 vma->vm_start = addr;
77686 vma->vm_end = addr + len;
77687
77688 +#ifdef CONFIG_PAX_MPROTECT
77689 + if (mm->pax_flags & MF_PAX_MPROTECT) {
77690 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
77691 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
77692 + return -EPERM;
77693 + if (!(vm_flags & VM_EXEC))
77694 + vm_flags &= ~VM_MAYEXEC;
77695 +#else
77696 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77697 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77698 +#endif
77699 + else
77700 + vm_flags &= ~VM_MAYWRITE;
77701 + }
77702 +#endif
77703 +
77704 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
77705 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77706
77707 diff --git a/mm/mprotect.c b/mm/mprotect.c
77708 index 94722a4..9837984 100644
77709 --- a/mm/mprotect.c
77710 +++ b/mm/mprotect.c
77711 @@ -23,10 +23,17 @@
77712 #include <linux/mmu_notifier.h>
77713 #include <linux/migrate.h>
77714 #include <linux/perf_event.h>
77715 +
77716 +#ifdef CONFIG_PAX_MPROTECT
77717 +#include <linux/elf.h>
77718 +#include <linux/binfmts.h>
77719 +#endif
77720 +
77721 #include <asm/uaccess.h>
77722 #include <asm/pgtable.h>
77723 #include <asm/cacheflush.h>
77724 #include <asm/tlbflush.h>
77725 +#include <asm/mmu_context.h>
77726
77727 #ifndef pgprot_modify
77728 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
77729 @@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
77730 return pages;
77731 }
77732
77733 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77734 +/* called while holding the mmap semaphor for writing except stack expansion */
77735 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
77736 +{
77737 + unsigned long oldlimit, newlimit = 0UL;
77738 +
77739 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
77740 + return;
77741 +
77742 + spin_lock(&mm->page_table_lock);
77743 + oldlimit = mm->context.user_cs_limit;
77744 + if ((prot & VM_EXEC) && oldlimit < end)
77745 + /* USER_CS limit moved up */
77746 + newlimit = end;
77747 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
77748 + /* USER_CS limit moved down */
77749 + newlimit = start;
77750 +
77751 + if (newlimit) {
77752 + mm->context.user_cs_limit = newlimit;
77753 +
77754 +#ifdef CONFIG_SMP
77755 + wmb();
77756 + cpus_clear(mm->context.cpu_user_cs_mask);
77757 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
77758 +#endif
77759 +
77760 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
77761 + }
77762 + spin_unlock(&mm->page_table_lock);
77763 + if (newlimit == end) {
77764 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
77765 +
77766 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
77767 + if (is_vm_hugetlb_page(vma))
77768 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
77769 + else
77770 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
77771 + }
77772 +}
77773 +#endif
77774 +
77775 int
77776 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77777 unsigned long start, unsigned long end, unsigned long newflags)
77778 @@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77779 int error;
77780 int dirty_accountable = 0;
77781
77782 +#ifdef CONFIG_PAX_SEGMEXEC
77783 + struct vm_area_struct *vma_m = NULL;
77784 + unsigned long start_m, end_m;
77785 +
77786 + start_m = start + SEGMEXEC_TASK_SIZE;
77787 + end_m = end + SEGMEXEC_TASK_SIZE;
77788 +#endif
77789 +
77790 if (newflags == oldflags) {
77791 *pprev = vma;
77792 return 0;
77793 }
77794
77795 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
77796 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
77797 +
77798 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
77799 + return -ENOMEM;
77800 +
77801 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
77802 + return -ENOMEM;
77803 + }
77804 +
77805 /*
77806 * If we make a private mapping writable we increase our commit;
77807 * but (without finer accounting) cannot reduce our commit if we
77808 @@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77809 }
77810 }
77811
77812 +#ifdef CONFIG_PAX_SEGMEXEC
77813 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
77814 + if (start != vma->vm_start) {
77815 + error = split_vma(mm, vma, start, 1);
77816 + if (error)
77817 + goto fail;
77818 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
77819 + *pprev = (*pprev)->vm_next;
77820 + }
77821 +
77822 + if (end != vma->vm_end) {
77823 + error = split_vma(mm, vma, end, 0);
77824 + if (error)
77825 + goto fail;
77826 + }
77827 +
77828 + if (pax_find_mirror_vma(vma)) {
77829 + error = __do_munmap(mm, start_m, end_m - start_m);
77830 + if (error)
77831 + goto fail;
77832 + } else {
77833 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77834 + if (!vma_m) {
77835 + error = -ENOMEM;
77836 + goto fail;
77837 + }
77838 + vma->vm_flags = newflags;
77839 + error = pax_mirror_vma(vma_m, vma);
77840 + if (error) {
77841 + vma->vm_flags = oldflags;
77842 + goto fail;
77843 + }
77844 + }
77845 + }
77846 +#endif
77847 +
77848 /*
77849 * First try to merge with previous and/or next vma.
77850 */
77851 @@ -296,9 +399,21 @@ success:
77852 * vm_flags and vm_page_prot are protected by the mmap_sem
77853 * held in write mode.
77854 */
77855 +
77856 +#ifdef CONFIG_PAX_SEGMEXEC
77857 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
77858 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
77859 +#endif
77860 +
77861 vma->vm_flags = newflags;
77862 +
77863 +#ifdef CONFIG_PAX_MPROTECT
77864 + if (mm->binfmt && mm->binfmt->handle_mprotect)
77865 + mm->binfmt->handle_mprotect(vma, newflags);
77866 +#endif
77867 +
77868 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
77869 - vm_get_page_prot(newflags));
77870 + vm_get_page_prot(vma->vm_flags));
77871
77872 if (vma_wants_writenotify(vma)) {
77873 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
77874 @@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77875 end = start + len;
77876 if (end <= start)
77877 return -ENOMEM;
77878 +
77879 +#ifdef CONFIG_PAX_SEGMEXEC
77880 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77881 + if (end > SEGMEXEC_TASK_SIZE)
77882 + return -EINVAL;
77883 + } else
77884 +#endif
77885 +
77886 + if (end > TASK_SIZE)
77887 + return -EINVAL;
77888 +
77889 if (!arch_validate_prot(prot))
77890 return -EINVAL;
77891
77892 @@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77893 /*
77894 * Does the application expect PROT_READ to imply PROT_EXEC:
77895 */
77896 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77897 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77898 prot |= PROT_EXEC;
77899
77900 vm_flags = calc_vm_prot_bits(prot);
77901 @@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77902 if (start > vma->vm_start)
77903 prev = vma;
77904
77905 +#ifdef CONFIG_PAX_MPROTECT
77906 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
77907 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
77908 +#endif
77909 +
77910 for (nstart = start ; ; ) {
77911 unsigned long newflags;
77912
77913 @@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77914
77915 /* newflags >> 4 shift VM_MAY% in place of VM_% */
77916 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
77917 + if (prot & (PROT_WRITE | PROT_EXEC))
77918 + gr_log_rwxmprotect(vma->vm_file);
77919 +
77920 + error = -EACCES;
77921 + goto out;
77922 + }
77923 +
77924 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
77925 error = -EACCES;
77926 goto out;
77927 }
77928 @@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77929 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
77930 if (error)
77931 goto out;
77932 +
77933 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
77934 +
77935 nstart = tmp;
77936
77937 if (nstart < prev->vm_end)
77938 diff --git a/mm/mremap.c b/mm/mremap.c
77939 index e1031e1..1f2a0a1 100644
77940 --- a/mm/mremap.c
77941 +++ b/mm/mremap.c
77942 @@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
77943 continue;
77944 pte = ptep_get_and_clear(mm, old_addr, old_pte);
77945 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
77946 +
77947 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77948 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
77949 + pte = pte_exprotect(pte);
77950 +#endif
77951 +
77952 set_pte_at(mm, new_addr, new_pte, pte);
77953 }
77954
77955 @@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
77956 if (is_vm_hugetlb_page(vma))
77957 goto Einval;
77958
77959 +#ifdef CONFIG_PAX_SEGMEXEC
77960 + if (pax_find_mirror_vma(vma))
77961 + goto Einval;
77962 +#endif
77963 +
77964 /* We can't remap across vm area boundaries */
77965 if (old_len > vma->vm_end - addr)
77966 goto Efault;
77967 @@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
77968 unsigned long ret = -EINVAL;
77969 unsigned long charged = 0;
77970 unsigned long map_flags;
77971 + unsigned long pax_task_size = TASK_SIZE;
77972
77973 if (new_addr & ~PAGE_MASK)
77974 goto out;
77975
77976 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
77977 +#ifdef CONFIG_PAX_SEGMEXEC
77978 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
77979 + pax_task_size = SEGMEXEC_TASK_SIZE;
77980 +#endif
77981 +
77982 + pax_task_size -= PAGE_SIZE;
77983 +
77984 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
77985 goto out;
77986
77987 /* Check if the location we're moving into overlaps the
77988 * old location at all, and fail if it does.
77989 */
77990 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
77991 - goto out;
77992 -
77993 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
77994 + if (addr + old_len > new_addr && new_addr + new_len > addr)
77995 goto out;
77996
77997 ret = do_munmap(mm, new_addr, new_len);
77998 @@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
77999 struct vm_area_struct *vma;
78000 unsigned long ret = -EINVAL;
78001 unsigned long charged = 0;
78002 + unsigned long pax_task_size = TASK_SIZE;
78003
78004 down_write(&current->mm->mmap_sem);
78005
78006 @@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
78007 if (!new_len)
78008 goto out;
78009
78010 +#ifdef CONFIG_PAX_SEGMEXEC
78011 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
78012 + pax_task_size = SEGMEXEC_TASK_SIZE;
78013 +#endif
78014 +
78015 + pax_task_size -= PAGE_SIZE;
78016 +
78017 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
78018 + old_len > pax_task_size || addr > pax_task_size-old_len)
78019 + goto out;
78020 +
78021 if (flags & MREMAP_FIXED) {
78022 if (flags & MREMAP_MAYMOVE)
78023 ret = mremap_to(addr, old_len, new_addr, new_len);
78024 @@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
78025 addr + new_len);
78026 }
78027 ret = addr;
78028 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
78029 goto out;
78030 }
78031 }
78032 @@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
78033 goto out;
78034 }
78035
78036 + map_flags = vma->vm_flags;
78037 ret = move_vma(vma, addr, old_len, new_len, new_addr);
78038 + if (!(ret & ~PAGE_MASK)) {
78039 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
78040 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
78041 + }
78042 }
78043 out:
78044 if (ret & ~PAGE_MASK)
78045 diff --git a/mm/nommu.c b/mm/nommu.c
78046 index 79c3cac..4d357e0 100644
78047 --- a/mm/nommu.c
78048 +++ b/mm/nommu.c
78049 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78050 int sysctl_overcommit_ratio = 50; /* default is 50% */
78051 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
78052 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
78053 -int heap_stack_gap = 0;
78054
78055 atomic_long_t mmap_pages_allocated;
78056
78057 @@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78058 EXPORT_SYMBOL(find_vma);
78059
78060 /*
78061 - * find a VMA
78062 - * - we don't extend stack VMAs under NOMMU conditions
78063 - */
78064 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
78065 -{
78066 - return find_vma(mm, addr);
78067 -}
78068 -
78069 -/*
78070 * expand a stack to a given address
78071 * - not supported under NOMMU conditions
78072 */
78073 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
78074
78075 /* most fields are the same, copy all, and then fixup */
78076 *new = *vma;
78077 + INIT_LIST_HEAD(&new->anon_vma_chain);
78078 *region = *vma->vm_region;
78079 new->vm_region = region;
78080
78081 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
78082 index 0713bfb..e3774e0 100644
78083 --- a/mm/page-writeback.c
78084 +++ b/mm/page-writeback.c
78085 @@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
78086 }
78087 }
78088
78089 -static struct notifier_block __cpuinitdata ratelimit_nb = {
78090 +static struct notifier_block ratelimit_nb = {
78091 .notifier_call = ratelimit_handler,
78092 .next = NULL,
78093 };
78094 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
78095 index 6a83cd3..bc2dcb6 100644
78096 --- a/mm/page_alloc.c
78097 +++ b/mm/page_alloc.c
78098 @@ -338,7 +338,7 @@ out:
78099 * This usage means that zero-order pages may not be compound.
78100 */
78101
78102 -static void free_compound_page(struct page *page)
78103 +void free_compound_page(struct page *page)
78104 {
78105 __free_pages_ok(page, compound_order(page));
78106 }
78107 @@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
78108 int i;
78109 int bad = 0;
78110
78111 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78112 + unsigned long index = 1UL << order;
78113 +#endif
78114 +
78115 trace_mm_page_free(page, order);
78116 kmemcheck_free_shadow(page, order);
78117
78118 @@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
78119 debug_check_no_obj_freed(page_address(page),
78120 PAGE_SIZE << order);
78121 }
78122 +
78123 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78124 + for (; index; --index)
78125 + sanitize_highpage(page + index - 1);
78126 +#endif
78127 +
78128 arch_free_page(page, order);
78129 kernel_map_pages(page, 1 << order, 0);
78130
78131 @@ -861,8 +871,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
78132 arch_alloc_page(page, order);
78133 kernel_map_pages(page, 1 << order, 1);
78134
78135 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
78136 if (gfp_flags & __GFP_ZERO)
78137 prep_zero_page(page, order, gfp_flags);
78138 +#endif
78139
78140 if (order && (gfp_flags & __GFP_COMP))
78141 prep_compound_page(page, order);
78142 @@ -3752,7 +3764,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
78143 unsigned long pfn;
78144
78145 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
78146 +#ifdef CONFIG_X86_32
78147 + /* boot failures in VMware 8 on 32bit vanilla since
78148 + this change */
78149 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
78150 +#else
78151 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
78152 +#endif
78153 return 1;
78154 }
78155 return 0;
78156 diff --git a/mm/percpu.c b/mm/percpu.c
78157 index 8c8e08f..73a5cda 100644
78158 --- a/mm/percpu.c
78159 +++ b/mm/percpu.c
78160 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
78161 static unsigned int pcpu_high_unit_cpu __read_mostly;
78162
78163 /* the address of the first chunk which starts with the kernel static area */
78164 -void *pcpu_base_addr __read_mostly;
78165 +void *pcpu_base_addr __read_only;
78166 EXPORT_SYMBOL_GPL(pcpu_base_addr);
78167
78168 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
78169 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
78170 index 926b466..b23df53 100644
78171 --- a/mm/process_vm_access.c
78172 +++ b/mm/process_vm_access.c
78173 @@ -13,6 +13,7 @@
78174 #include <linux/uio.h>
78175 #include <linux/sched.h>
78176 #include <linux/highmem.h>
78177 +#include <linux/security.h>
78178 #include <linux/ptrace.h>
78179 #include <linux/slab.h>
78180 #include <linux/syscalls.h>
78181 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
78182 size_t iov_l_curr_offset = 0;
78183 ssize_t iov_len;
78184
78185 + return -ENOSYS; // PaX: until properly audited
78186 +
78187 /*
78188 * Work out how many pages of struct pages we're going to need
78189 * when eventually calling get_user_pages
78190 */
78191 for (i = 0; i < riovcnt; i++) {
78192 iov_len = rvec[i].iov_len;
78193 - if (iov_len > 0) {
78194 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
78195 - + iov_len)
78196 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
78197 - / PAGE_SIZE + 1;
78198 - nr_pages = max(nr_pages, nr_pages_iov);
78199 - }
78200 + if (iov_len <= 0)
78201 + continue;
78202 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
78203 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
78204 + nr_pages = max(nr_pages, nr_pages_iov);
78205 }
78206
78207 if (nr_pages == 0)
78208 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
78209 goto free_proc_pages;
78210 }
78211
78212 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
78213 + rc = -EPERM;
78214 + goto put_task_struct;
78215 + }
78216 +
78217 mm = mm_access(task, PTRACE_MODE_ATTACH);
78218 if (!mm || IS_ERR(mm)) {
78219 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
78220 diff --git a/mm/rmap.c b/mm/rmap.c
78221 index 2c78f8c..9e9c624 100644
78222 --- a/mm/rmap.c
78223 +++ b/mm/rmap.c
78224 @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78225 struct anon_vma *anon_vma = vma->anon_vma;
78226 struct anon_vma_chain *avc;
78227
78228 +#ifdef CONFIG_PAX_SEGMEXEC
78229 + struct anon_vma_chain *avc_m = NULL;
78230 +#endif
78231 +
78232 might_sleep();
78233 if (unlikely(!anon_vma)) {
78234 struct mm_struct *mm = vma->vm_mm;
78235 @@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78236 if (!avc)
78237 goto out_enomem;
78238
78239 +#ifdef CONFIG_PAX_SEGMEXEC
78240 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
78241 + if (!avc_m)
78242 + goto out_enomem_free_avc;
78243 +#endif
78244 +
78245 anon_vma = find_mergeable_anon_vma(vma);
78246 allocated = NULL;
78247 if (!anon_vma) {
78248 @@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78249 /* page_table_lock to protect against threads */
78250 spin_lock(&mm->page_table_lock);
78251 if (likely(!vma->anon_vma)) {
78252 +
78253 +#ifdef CONFIG_PAX_SEGMEXEC
78254 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
78255 +
78256 + if (vma_m) {
78257 + BUG_ON(vma_m->anon_vma);
78258 + vma_m->anon_vma = anon_vma;
78259 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
78260 + avc_m = NULL;
78261 + }
78262 +#endif
78263 +
78264 vma->anon_vma = anon_vma;
78265 anon_vma_chain_link(vma, avc, anon_vma);
78266 allocated = NULL;
78267 @@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78268
78269 if (unlikely(allocated))
78270 put_anon_vma(allocated);
78271 +
78272 +#ifdef CONFIG_PAX_SEGMEXEC
78273 + if (unlikely(avc_m))
78274 + anon_vma_chain_free(avc_m);
78275 +#endif
78276 +
78277 if (unlikely(avc))
78278 anon_vma_chain_free(avc);
78279 }
78280 return 0;
78281
78282 out_enomem_free_avc:
78283 +
78284 +#ifdef CONFIG_PAX_SEGMEXEC
78285 + if (avc_m)
78286 + anon_vma_chain_free(avc_m);
78287 +#endif
78288 +
78289 anon_vma_chain_free(avc);
78290 out_enomem:
78291 return -ENOMEM;
78292 @@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
78293 * Attach the anon_vmas from src to dst.
78294 * Returns 0 on success, -ENOMEM on failure.
78295 */
78296 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
78297 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
78298 {
78299 struct anon_vma_chain *avc, *pavc;
78300 struct anon_vma *root = NULL;
78301 @@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
78302 * the corresponding VMA in the parent process is attached to.
78303 * Returns 0 on success, non-zero on failure.
78304 */
78305 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
78306 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
78307 {
78308 struct anon_vma_chain *avc;
78309 struct anon_vma *anon_vma;
78310 diff --git a/mm/shmem.c b/mm/shmem.c
78311 index efd0b3a..994b702 100644
78312 --- a/mm/shmem.c
78313 +++ b/mm/shmem.c
78314 @@ -31,7 +31,7 @@
78315 #include <linux/export.h>
78316 #include <linux/swap.h>
78317
78318 -static struct vfsmount *shm_mnt;
78319 +struct vfsmount *shm_mnt;
78320
78321 #ifdef CONFIG_SHMEM
78322 /*
78323 @@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
78324 #define BOGO_DIRENT_SIZE 20
78325
78326 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
78327 -#define SHORT_SYMLINK_LEN 128
78328 +#define SHORT_SYMLINK_LEN 64
78329
78330 /*
78331 * shmem_fallocate and shmem_writepage communicate via inode->i_private
78332 @@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
78333 static int shmem_xattr_validate(const char *name)
78334 {
78335 struct { const char *prefix; size_t len; } arr[] = {
78336 +
78337 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
78338 + { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
78339 +#endif
78340 +
78341 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
78342 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
78343 };
78344 @@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
78345 if (err)
78346 return err;
78347
78348 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
78349 + if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
78350 + if (strcmp(name, XATTR_NAME_PAX_FLAGS))
78351 + return -EOPNOTSUPP;
78352 + if (size > 8)
78353 + return -EINVAL;
78354 + }
78355 +#endif
78356 +
78357 return simple_xattr_set(&info->xattrs, name, value, size, flags);
78358 }
78359
78360 @@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
78361 int err = -ENOMEM;
78362
78363 /* Round up to L1_CACHE_BYTES to resist false sharing */
78364 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
78365 - L1_CACHE_BYTES), GFP_KERNEL);
78366 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
78367 if (!sbinfo)
78368 return -ENOMEM;
78369
78370 diff --git a/mm/slab.c b/mm/slab.c
78371 index e7667a3..b62c169 100644
78372 --- a/mm/slab.c
78373 +++ b/mm/slab.c
78374 @@ -306,7 +306,7 @@ struct kmem_list3 {
78375 * Need this for bootstrapping a per node allocator.
78376 */
78377 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
78378 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
78379 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
78380 #define CACHE_CACHE 0
78381 #define SIZE_AC MAX_NUMNODES
78382 #define SIZE_L3 (2 * MAX_NUMNODES)
78383 @@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
78384 if ((x)->max_freeable < i) \
78385 (x)->max_freeable = i; \
78386 } while (0)
78387 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
78388 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
78389 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
78390 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
78391 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
78392 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
78393 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
78394 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
78395 #else
78396 #define STATS_INC_ACTIVE(x) do { } while (0)
78397 #define STATS_DEC_ACTIVE(x) do { } while (0)
78398 @@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
78399 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
78400 */
78401 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
78402 - const struct slab *slab, void *obj)
78403 + const struct slab *slab, const void *obj)
78404 {
78405 u32 offset = (obj - slab->s_mem);
78406 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
78407 @@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
78408 struct cache_names {
78409 char *name;
78410 char *name_dma;
78411 + char *name_usercopy;
78412 };
78413
78414 static struct cache_names __initdata cache_names[] = {
78415 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
78416 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
78417 #include <linux/kmalloc_sizes.h>
78418 - {NULL,}
78419 + {NULL}
78420 #undef CACHE
78421 };
78422
78423 @@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
78424 if (unlikely(gfpflags & GFP_DMA))
78425 return csizep->cs_dmacachep;
78426 #endif
78427 +
78428 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78429 + if (unlikely(gfpflags & GFP_USERCOPY))
78430 + return csizep->cs_usercopycachep;
78431 +#endif
78432 +
78433 return csizep->cs_cachep;
78434 }
78435
78436 @@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
78437 return notifier_from_errno(err);
78438 }
78439
78440 -static struct notifier_block __cpuinitdata cpucache_notifier = {
78441 +static struct notifier_block cpucache_notifier = {
78442 &cpuup_callback, NULL, 0
78443 };
78444
78445 @@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
78446 */
78447
78448 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
78449 - sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
78450 + sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
78451
78452 if (INDEX_AC != INDEX_L3)
78453 sizes[INDEX_L3].cs_cachep =
78454 create_kmalloc_cache(names[INDEX_L3].name,
78455 - sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
78456 + sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
78457
78458 slab_early_init = 0;
78459
78460 @@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
78461 */
78462 if (!sizes->cs_cachep)
78463 sizes->cs_cachep = create_kmalloc_cache(names->name,
78464 - sizes->cs_size, ARCH_KMALLOC_FLAGS);
78465 + sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
78466
78467 #ifdef CONFIG_ZONE_DMA
78468 sizes->cs_dmacachep = create_kmalloc_cache(
78469 names->name_dma, sizes->cs_size,
78470 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
78471 #endif
78472 +
78473 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78474 + sizes->cs_usercopycachep = create_kmalloc_cache(
78475 + names->name_usercopy, sizes->cs_size,
78476 + ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
78477 +#endif
78478 +
78479 sizes++;
78480 names++;
78481 }
78482 @@ -4365,10 +4379,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
78483 }
78484 /* cpu stats */
78485 {
78486 - unsigned long allochit = atomic_read(&cachep->allochit);
78487 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
78488 - unsigned long freehit = atomic_read(&cachep->freehit);
78489 - unsigned long freemiss = atomic_read(&cachep->freemiss);
78490 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
78491 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
78492 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
78493 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
78494
78495 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
78496 allochit, allocmiss, freehit, freemiss);
78497 @@ -4600,13 +4614,71 @@ static const struct file_operations proc_slabstats_operations = {
78498 static int __init slab_proc_init(void)
78499 {
78500 #ifdef CONFIG_DEBUG_SLAB_LEAK
78501 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
78502 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
78503 #endif
78504 return 0;
78505 }
78506 module_init(slab_proc_init);
78507 #endif
78508
78509 +bool is_usercopy_object(const void *ptr)
78510 +{
78511 + struct page *page;
78512 + struct kmem_cache *cachep;
78513 +
78514 + if (ZERO_OR_NULL_PTR(ptr))
78515 + return false;
78516 +
78517 + if (!slab_is_available())
78518 + return false;
78519 +
78520 + if (!virt_addr_valid(ptr))
78521 + return false;
78522 +
78523 + page = virt_to_head_page(ptr);
78524 +
78525 + if (!PageSlab(page))
78526 + return false;
78527 +
78528 + cachep = page->slab_cache;
78529 + return cachep->flags & SLAB_USERCOPY;
78530 +}
78531 +
78532 +#ifdef CONFIG_PAX_USERCOPY
78533 +const char *check_heap_object(const void *ptr, unsigned long n)
78534 +{
78535 + struct page *page;
78536 + struct kmem_cache *cachep;
78537 + struct slab *slabp;
78538 + unsigned int objnr;
78539 + unsigned long offset;
78540 +
78541 + if (ZERO_OR_NULL_PTR(ptr))
78542 + return "<null>";
78543 +
78544 + if (!virt_addr_valid(ptr))
78545 + return NULL;
78546 +
78547 + page = virt_to_head_page(ptr);
78548 +
78549 + if (!PageSlab(page))
78550 + return NULL;
78551 +
78552 + cachep = page->slab_cache;
78553 + if (!(cachep->flags & SLAB_USERCOPY))
78554 + return cachep->name;
78555 +
78556 + slabp = page->slab_page;
78557 + objnr = obj_to_index(cachep, slabp, ptr);
78558 + BUG_ON(objnr >= cachep->num);
78559 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
78560 + if (offset <= cachep->object_size && n <= cachep->object_size - offset)
78561 + return NULL;
78562 +
78563 + return cachep->name;
78564 +}
78565 +#endif
78566 +
78567 /**
78568 * ksize - get the actual amount of memory allocated for a given object
78569 * @objp: Pointer to the object
78570 diff --git a/mm/slab.h b/mm/slab.h
78571 index 34a98d6..73633d1 100644
78572 --- a/mm/slab.h
78573 +++ b/mm/slab.h
78574 @@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
78575
78576 /* Legal flag mask for kmem_cache_create(), for various configurations */
78577 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
78578 - SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
78579 + SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
78580
78581 #if defined(CONFIG_DEBUG_SLAB)
78582 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
78583 @@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
78584 return s;
78585
78586 page = virt_to_head_page(x);
78587 +
78588 + BUG_ON(!PageSlab(page));
78589 +
78590 cachep = page->slab_cache;
78591 if (slab_equal_or_root(cachep, s))
78592 return cachep;
78593 diff --git a/mm/slab_common.c b/mm/slab_common.c
78594 index 3f3cd97..e050794 100644
78595 --- a/mm/slab_common.c
78596 +++ b/mm/slab_common.c
78597 @@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
78598
78599 err = __kmem_cache_create(s, flags);
78600 if (!err) {
78601 - s->refcount = 1;
78602 + atomic_set(&s->refcount, 1);
78603 list_add(&s->list, &slab_caches);
78604 memcg_cache_list_add(memcg, s);
78605 } else {
78606 @@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
78607
78608 get_online_cpus();
78609 mutex_lock(&slab_mutex);
78610 - s->refcount--;
78611 - if (!s->refcount) {
78612 + if (atomic_dec_and_test(&s->refcount)) {
78613 list_del(&s->list);
78614
78615 if (!__kmem_cache_shutdown(s)) {
78616 @@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
78617 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
78618 name, size, err);
78619
78620 - s->refcount = -1; /* Exempt from merging for now */
78621 + atomic_set(&s->refcount, -1); /* Exempt from merging for now */
78622 }
78623
78624 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
78625 @@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
78626
78627 create_boot_cache(s, name, size, flags);
78628 list_add(&s->list, &slab_caches);
78629 - s->refcount = 1;
78630 + atomic_set(&s->refcount, 1);
78631 return s;
78632 }
78633
78634 diff --git a/mm/slob.c b/mm/slob.c
78635 index a99fdf7..f5b6577 100644
78636 --- a/mm/slob.c
78637 +++ b/mm/slob.c
78638 @@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
78639 /*
78640 * Return the size of a slob block.
78641 */
78642 -static slobidx_t slob_units(slob_t *s)
78643 +static slobidx_t slob_units(const slob_t *s)
78644 {
78645 if (s->units > 0)
78646 return s->units;
78647 @@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
78648 /*
78649 * Return the next free slob block pointer after this one.
78650 */
78651 -static slob_t *slob_next(slob_t *s)
78652 +static slob_t *slob_next(const slob_t *s)
78653 {
78654 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
78655 slobidx_t next;
78656 @@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
78657 /*
78658 * Returns true if s is the last free block in its page.
78659 */
78660 -static int slob_last(slob_t *s)
78661 +static int slob_last(const slob_t *s)
78662 {
78663 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
78664 }
78665
78666 -static void *slob_new_pages(gfp_t gfp, int order, int node)
78667 +static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
78668 {
78669 - void *page;
78670 + struct page *page;
78671
78672 #ifdef CONFIG_NUMA
78673 if (node != NUMA_NO_NODE)
78674 @@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
78675 if (!page)
78676 return NULL;
78677
78678 - return page_address(page);
78679 + __SetPageSlab(page);
78680 + return page;
78681 }
78682
78683 -static void slob_free_pages(void *b, int order)
78684 +static void slob_free_pages(struct page *sp, int order)
78685 {
78686 if (current->reclaim_state)
78687 current->reclaim_state->reclaimed_slab += 1 << order;
78688 - free_pages((unsigned long)b, order);
78689 + __ClearPageSlab(sp);
78690 + reset_page_mapcount(sp);
78691 + sp->private = 0;
78692 + __free_pages(sp, order);
78693 }
78694
78695 /*
78696 @@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
78697
78698 /* Not enough space: must allocate a new page */
78699 if (!b) {
78700 - b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
78701 - if (!b)
78702 + sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
78703 + if (!sp)
78704 return NULL;
78705 - sp = virt_to_page(b);
78706 - __SetPageSlab(sp);
78707 + b = page_address(sp);
78708
78709 spin_lock_irqsave(&slob_lock, flags);
78710 sp->units = SLOB_UNITS(PAGE_SIZE);
78711 sp->freelist = b;
78712 + sp->private = 0;
78713 INIT_LIST_HEAD(&sp->list);
78714 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
78715 set_slob_page_free(sp, slob_list);
78716 @@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
78717 if (slob_page_free(sp))
78718 clear_slob_page_free(sp);
78719 spin_unlock_irqrestore(&slob_lock, flags);
78720 - __ClearPageSlab(sp);
78721 - reset_page_mapcount(sp);
78722 - slob_free_pages(b, 0);
78723 + slob_free_pages(sp, 0);
78724 return;
78725 }
78726
78727 @@ -424,11 +426,10 @@ out:
78728 */
78729
78730 static __always_inline void *
78731 -__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
78732 +__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
78733 {
78734 - unsigned int *m;
78735 - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78736 - void *ret;
78737 + slob_t *m;
78738 + void *ret = NULL;
78739
78740 gfp &= gfp_allowed_mask;
78741
78742 @@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
78743
78744 if (!m)
78745 return NULL;
78746 - *m = size;
78747 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
78748 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
78749 + m[0].units = size;
78750 + m[1].units = align;
78751 ret = (void *)m + align;
78752
78753 trace_kmalloc_node(caller, ret,
78754 size, size + align, gfp, node);
78755 } else {
78756 unsigned int order = get_order(size);
78757 + struct page *page;
78758
78759 if (likely(order))
78760 gfp |= __GFP_COMP;
78761 - ret = slob_new_pages(gfp, order, node);
78762 + page = slob_new_pages(gfp, order, node);
78763 + if (page) {
78764 + ret = page_address(page);
78765 + page->private = size;
78766 + }
78767
78768 trace_kmalloc_node(caller, ret,
78769 size, PAGE_SIZE << order, gfp, node);
78770 }
78771
78772 - kmemleak_alloc(ret, size, 1, gfp);
78773 + return ret;
78774 +}
78775 +
78776 +static __always_inline void *
78777 +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
78778 +{
78779 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78780 + void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
78781 +
78782 + if (!ZERO_OR_NULL_PTR(ret))
78783 + kmemleak_alloc(ret, size, 1, gfp);
78784 return ret;
78785 }
78786
78787 @@ -494,33 +513,110 @@ void kfree(const void *block)
78788 kmemleak_free(block);
78789
78790 sp = virt_to_page(block);
78791 - if (PageSlab(sp)) {
78792 + VM_BUG_ON(!PageSlab(sp));
78793 + if (!sp->private) {
78794 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78795 - unsigned int *m = (unsigned int *)(block - align);
78796 - slob_free(m, *m + align);
78797 - } else
78798 + slob_t *m = (slob_t *)(block - align);
78799 + slob_free(m, m[0].units + align);
78800 + } else {
78801 + __ClearPageSlab(sp);
78802 + reset_page_mapcount(sp);
78803 + sp->private = 0;
78804 __free_pages(sp, compound_order(sp));
78805 + }
78806 }
78807 EXPORT_SYMBOL(kfree);
78808
78809 +bool is_usercopy_object(const void *ptr)
78810 +{
78811 + if (!slab_is_available())
78812 + return false;
78813 +
78814 + // PAX: TODO
78815 +
78816 + return false;
78817 +}
78818 +
78819 +#ifdef CONFIG_PAX_USERCOPY
78820 +const char *check_heap_object(const void *ptr, unsigned long n)
78821 +{
78822 + struct page *page;
78823 + const slob_t *free;
78824 + const void *base;
78825 + unsigned long flags;
78826 +
78827 + if (ZERO_OR_NULL_PTR(ptr))
78828 + return "<null>";
78829 +
78830 + if (!virt_addr_valid(ptr))
78831 + return NULL;
78832 +
78833 + page = virt_to_head_page(ptr);
78834 + if (!PageSlab(page))
78835 + return NULL;
78836 +
78837 + if (page->private) {
78838 + base = page;
78839 + if (base <= ptr && n <= page->private - (ptr - base))
78840 + return NULL;
78841 + return "<slob>";
78842 + }
78843 +
78844 + /* some tricky double walking to find the chunk */
78845 + spin_lock_irqsave(&slob_lock, flags);
78846 + base = (void *)((unsigned long)ptr & PAGE_MASK);
78847 + free = page->freelist;
78848 +
78849 + while (!slob_last(free) && (void *)free <= ptr) {
78850 + base = free + slob_units(free);
78851 + free = slob_next(free);
78852 + }
78853 +
78854 + while (base < (void *)free) {
78855 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
78856 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
78857 + int offset;
78858 +
78859 + if (ptr < base + align)
78860 + break;
78861 +
78862 + offset = ptr - base - align;
78863 + if (offset >= m) {
78864 + base += size;
78865 + continue;
78866 + }
78867 +
78868 + if (n > m - offset)
78869 + break;
78870 +
78871 + spin_unlock_irqrestore(&slob_lock, flags);
78872 + return NULL;
78873 + }
78874 +
78875 + spin_unlock_irqrestore(&slob_lock, flags);
78876 + return "<slob>";
78877 +}
78878 +#endif
78879 +
78880 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
78881 size_t ksize(const void *block)
78882 {
78883 struct page *sp;
78884 int align;
78885 - unsigned int *m;
78886 + slob_t *m;
78887
78888 BUG_ON(!block);
78889 if (unlikely(block == ZERO_SIZE_PTR))
78890 return 0;
78891
78892 sp = virt_to_page(block);
78893 - if (unlikely(!PageSlab(sp)))
78894 - return PAGE_SIZE << compound_order(sp);
78895 + VM_BUG_ON(!PageSlab(sp));
78896 + if (sp->private)
78897 + return sp->private;
78898
78899 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78900 - m = (unsigned int *)(block - align);
78901 - return SLOB_UNITS(*m) * SLOB_UNIT;
78902 + m = (slob_t *)(block - align);
78903 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
78904 }
78905 EXPORT_SYMBOL(ksize);
78906
78907 @@ -536,23 +632,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
78908
78909 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
78910 {
78911 - void *b;
78912 + void *b = NULL;
78913
78914 flags &= gfp_allowed_mask;
78915
78916 lockdep_trace_alloc(flags);
78917
78918 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78919 + b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
78920 +#else
78921 if (c->size < PAGE_SIZE) {
78922 b = slob_alloc(c->size, flags, c->align, node);
78923 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
78924 SLOB_UNITS(c->size) * SLOB_UNIT,
78925 flags, node);
78926 } else {
78927 - b = slob_new_pages(flags, get_order(c->size), node);
78928 + struct page *sp;
78929 +
78930 + sp = slob_new_pages(flags, get_order(c->size), node);
78931 + if (sp) {
78932 + b = page_address(sp);
78933 + sp->private = c->size;
78934 + }
78935 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
78936 PAGE_SIZE << get_order(c->size),
78937 flags, node);
78938 }
78939 +#endif
78940
78941 if (c->ctor)
78942 c->ctor(b);
78943 @@ -564,10 +670,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
78944
78945 static void __kmem_cache_free(void *b, int size)
78946 {
78947 - if (size < PAGE_SIZE)
78948 + struct page *sp;
78949 +
78950 + sp = virt_to_page(b);
78951 + BUG_ON(!PageSlab(sp));
78952 + if (!sp->private)
78953 slob_free(b, size);
78954 else
78955 - slob_free_pages(b, get_order(size));
78956 + slob_free_pages(sp, get_order(size));
78957 }
78958
78959 static void kmem_rcu_free(struct rcu_head *head)
78960 @@ -580,17 +690,31 @@ static void kmem_rcu_free(struct rcu_head *head)
78961
78962 void kmem_cache_free(struct kmem_cache *c, void *b)
78963 {
78964 + int size = c->size;
78965 +
78966 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78967 + if (size + c->align < PAGE_SIZE) {
78968 + size += c->align;
78969 + b -= c->align;
78970 + }
78971 +#endif
78972 +
78973 kmemleak_free_recursive(b, c->flags);
78974 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
78975 struct slob_rcu *slob_rcu;
78976 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
78977 - slob_rcu->size = c->size;
78978 + slob_rcu = b + (size - sizeof(struct slob_rcu));
78979 + slob_rcu->size = size;
78980 call_rcu(&slob_rcu->head, kmem_rcu_free);
78981 } else {
78982 - __kmem_cache_free(b, c->size);
78983 + __kmem_cache_free(b, size);
78984 }
78985
78986 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78987 + trace_kfree(_RET_IP_, b);
78988 +#else
78989 trace_kmem_cache_free(_RET_IP_, b);
78990 +#endif
78991 +
78992 }
78993 EXPORT_SYMBOL(kmem_cache_free);
78994
78995 diff --git a/mm/slub.c b/mm/slub.c
78996 index ba2ca53..00b1f4e 100644
78997 --- a/mm/slub.c
78998 +++ b/mm/slub.c
78999 @@ -197,7 +197,7 @@ struct track {
79000
79001 enum track_item { TRACK_ALLOC, TRACK_FREE };
79002
79003 -#ifdef CONFIG_SYSFS
79004 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79005 static int sysfs_slab_add(struct kmem_cache *);
79006 static int sysfs_slab_alias(struct kmem_cache *, const char *);
79007 static void sysfs_slab_remove(struct kmem_cache *);
79008 @@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
79009 if (!t->addr)
79010 return;
79011
79012 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
79013 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
79014 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
79015 #ifdef CONFIG_STACKTRACE
79016 {
79017 @@ -2653,7 +2653,7 @@ static int slub_min_objects;
79018 * Merge control. If this is set then no merging of slab caches will occur.
79019 * (Could be removed. This was introduced to pacify the merge skeptics.)
79020 */
79021 -static int slub_nomerge;
79022 +static int slub_nomerge = 1;
79023
79024 /*
79025 * Calculate the order of allocation given an slab object size.
79026 @@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
79027 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
79028 #endif
79029
79030 +#ifdef CONFIG_PAX_USERCOPY_SLABS
79031 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
79032 +#endif
79033 +
79034 static int __init setup_slub_min_order(char *str)
79035 {
79036 get_option(&str, &slub_min_order);
79037 @@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
79038 return kmalloc_dma_caches[index];
79039
79040 #endif
79041 +
79042 +#ifdef CONFIG_PAX_USERCOPY_SLABS
79043 + if (flags & SLAB_USERCOPY)
79044 + return kmalloc_usercopy_caches[index];
79045 +
79046 +#endif
79047 +
79048 return kmalloc_caches[index];
79049 }
79050
79051 @@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
79052 EXPORT_SYMBOL(__kmalloc_node);
79053 #endif
79054
79055 +bool is_usercopy_object(const void *ptr)
79056 +{
79057 + struct page *page;
79058 + struct kmem_cache *s;
79059 +
79060 + if (ZERO_OR_NULL_PTR(ptr))
79061 + return false;
79062 +
79063 + if (!slab_is_available())
79064 + return false;
79065 +
79066 + if (!virt_addr_valid(ptr))
79067 + return false;
79068 +
79069 + page = virt_to_head_page(ptr);
79070 +
79071 + if (!PageSlab(page))
79072 + return false;
79073 +
79074 + s = page->slab_cache;
79075 + return s->flags & SLAB_USERCOPY;
79076 +}
79077 +
79078 +#ifdef CONFIG_PAX_USERCOPY
79079 +const char *check_heap_object(const void *ptr, unsigned long n)
79080 +{
79081 + struct page *page;
79082 + struct kmem_cache *s;
79083 + unsigned long offset;
79084 +
79085 + if (ZERO_OR_NULL_PTR(ptr))
79086 + return "<null>";
79087 +
79088 + if (!virt_addr_valid(ptr))
79089 + return NULL;
79090 +
79091 + page = virt_to_head_page(ptr);
79092 +
79093 + if (!PageSlab(page))
79094 + return NULL;
79095 +
79096 + s = page->slab_cache;
79097 + if (!(s->flags & SLAB_USERCOPY))
79098 + return s->name;
79099 +
79100 + offset = (ptr - page_address(page)) % s->size;
79101 + if (offset <= s->object_size && n <= s->object_size - offset)
79102 + return NULL;
79103 +
79104 + return s->name;
79105 +}
79106 +#endif
79107 +
79108 size_t ksize(const void *object)
79109 {
79110 struct page *page;
79111 @@ -3712,17 +3776,17 @@ void __init kmem_cache_init(void)
79112
79113 /* Caches that are not of the two-to-the-power-of size */
79114 if (KMALLOC_MIN_SIZE <= 32) {
79115 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
79116 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
79117 caches++;
79118 }
79119
79120 if (KMALLOC_MIN_SIZE <= 64) {
79121 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
79122 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
79123 caches++;
79124 }
79125
79126 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
79127 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
79128 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
79129 caches++;
79130 }
79131
79132 @@ -3764,6 +3828,22 @@ void __init kmem_cache_init(void)
79133 }
79134 }
79135 #endif
79136 +
79137 +#ifdef CONFIG_PAX_USERCOPY_SLABS
79138 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
79139 + struct kmem_cache *s = kmalloc_caches[i];
79140 +
79141 + if (s && s->size) {
79142 + char *name = kasprintf(GFP_NOWAIT,
79143 + "usercopy-kmalloc-%d", s->object_size);
79144 +
79145 + BUG_ON(!name);
79146 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
79147 + s->object_size, SLAB_USERCOPY);
79148 + }
79149 + }
79150 +#endif
79151 +
79152 printk(KERN_INFO
79153 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
79154 " CPUs=%d, Nodes=%d\n",
79155 @@ -3790,7 +3870,7 @@ static int slab_unmergeable(struct kmem_cache *s)
79156 /*
79157 * We may have set a slab to be unmergeable during bootstrap.
79158 */
79159 - if (s->refcount < 0)
79160 + if (atomic_read(&s->refcount) < 0)
79161 return 1;
79162
79163 return 0;
79164 @@ -3848,7 +3928,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
79165
79166 s = find_mergeable(memcg, size, align, flags, name, ctor);
79167 if (s) {
79168 - s->refcount++;
79169 + atomic_inc(&s->refcount);
79170 /*
79171 * Adjust the object sizes so that we clear
79172 * the complete object on kzalloc.
79173 @@ -3857,7 +3937,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
79174 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
79175
79176 if (sysfs_slab_alias(s, name)) {
79177 - s->refcount--;
79178 + atomic_dec(&s->refcount);
79179 s = NULL;
79180 }
79181 }
79182 @@ -3919,7 +3999,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
79183 return NOTIFY_OK;
79184 }
79185
79186 -static struct notifier_block __cpuinitdata slab_notifier = {
79187 +static struct notifier_block slab_notifier = {
79188 .notifier_call = slab_cpuup_callback
79189 };
79190
79191 @@ -3977,7 +4057,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
79192 }
79193 #endif
79194
79195 -#ifdef CONFIG_SYSFS
79196 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79197 static int count_inuse(struct page *page)
79198 {
79199 return page->inuse;
79200 @@ -4364,12 +4444,12 @@ static void resiliency_test(void)
79201 validate_slab_cache(kmalloc_caches[9]);
79202 }
79203 #else
79204 -#ifdef CONFIG_SYSFS
79205 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79206 static void resiliency_test(void) {};
79207 #endif
79208 #endif
79209
79210 -#ifdef CONFIG_SYSFS
79211 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79212 enum slab_stat_type {
79213 SL_ALL, /* All slabs */
79214 SL_PARTIAL, /* Only partially allocated slabs */
79215 @@ -4613,7 +4693,7 @@ SLAB_ATTR_RO(ctor);
79216
79217 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
79218 {
79219 - return sprintf(buf, "%d\n", s->refcount - 1);
79220 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
79221 }
79222 SLAB_ATTR_RO(aliases);
79223
79224 @@ -5266,6 +5346,7 @@ static char *create_unique_id(struct kmem_cache *s)
79225 return name;
79226 }
79227
79228 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79229 static int sysfs_slab_add(struct kmem_cache *s)
79230 {
79231 int err;
79232 @@ -5323,6 +5404,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
79233 kobject_del(&s->kobj);
79234 kobject_put(&s->kobj);
79235 }
79236 +#endif
79237
79238 /*
79239 * Need to buffer aliases during bootup until sysfs becomes
79240 @@ -5336,6 +5418,7 @@ struct saved_alias {
79241
79242 static struct saved_alias *alias_list;
79243
79244 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79245 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79246 {
79247 struct saved_alias *al;
79248 @@ -5358,6 +5441,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79249 alias_list = al;
79250 return 0;
79251 }
79252 +#endif
79253
79254 static int __init slab_sysfs_init(void)
79255 {
79256 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
79257 index 1b7e22a..3fcd4f3 100644
79258 --- a/mm/sparse-vmemmap.c
79259 +++ b/mm/sparse-vmemmap.c
79260 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
79261 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
79262 if (!p)
79263 return NULL;
79264 - pud_populate(&init_mm, pud, p);
79265 + pud_populate_kernel(&init_mm, pud, p);
79266 }
79267 return pud;
79268 }
79269 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
79270 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
79271 if (!p)
79272 return NULL;
79273 - pgd_populate(&init_mm, pgd, p);
79274 + pgd_populate_kernel(&init_mm, pgd, p);
79275 }
79276 return pgd;
79277 }
79278 diff --git a/mm/sparse.c b/mm/sparse.c
79279 index 6b5fb76..db0c190 100644
79280 --- a/mm/sparse.c
79281 +++ b/mm/sparse.c
79282 @@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
79283
79284 for (i = 0; i < PAGES_PER_SECTION; i++) {
79285 if (PageHWPoison(&memmap[i])) {
79286 - atomic_long_sub(1, &mce_bad_pages);
79287 + atomic_long_sub_unchecked(1, &mce_bad_pages);
79288 ClearPageHWPoison(&memmap[i]);
79289 }
79290 }
79291 diff --git a/mm/swap.c b/mm/swap.c
79292 index 6310dc2..3662b3f 100644
79293 --- a/mm/swap.c
79294 +++ b/mm/swap.c
79295 @@ -30,6 +30,7 @@
79296 #include <linux/backing-dev.h>
79297 #include <linux/memcontrol.h>
79298 #include <linux/gfp.h>
79299 +#include <linux/hugetlb.h>
79300
79301 #include "internal.h"
79302
79303 @@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
79304
79305 __page_cache_release(page);
79306 dtor = get_compound_page_dtor(page);
79307 + if (!PageHuge(page))
79308 + BUG_ON(dtor != free_compound_page);
79309 (*dtor)(page);
79310 }
79311
79312 diff --git a/mm/swapfile.c b/mm/swapfile.c
79313 index e97a0e5..b50e796 100644
79314 --- a/mm/swapfile.c
79315 +++ b/mm/swapfile.c
79316 @@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
79317
79318 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
79319 /* Activity counter to indicate that a swapon or swapoff has occurred */
79320 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
79321 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
79322
79323 static inline unsigned char swap_count(unsigned char ent)
79324 {
79325 @@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
79326 }
79327 filp_close(swap_file, NULL);
79328 err = 0;
79329 - atomic_inc(&proc_poll_event);
79330 + atomic_inc_unchecked(&proc_poll_event);
79331 wake_up_interruptible(&proc_poll_wait);
79332
79333 out_dput:
79334 @@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
79335
79336 poll_wait(file, &proc_poll_wait, wait);
79337
79338 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
79339 - seq->poll_event = atomic_read(&proc_poll_event);
79340 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
79341 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
79342 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
79343 }
79344
79345 @@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
79346 return ret;
79347
79348 seq = file->private_data;
79349 - seq->poll_event = atomic_read(&proc_poll_event);
79350 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
79351 return 0;
79352 }
79353
79354 @@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
79355 (frontswap_map) ? "FS" : "");
79356
79357 mutex_unlock(&swapon_mutex);
79358 - atomic_inc(&proc_poll_event);
79359 + atomic_inc_unchecked(&proc_poll_event);
79360 wake_up_interruptible(&proc_poll_wait);
79361
79362 if (S_ISREG(inode->i_mode))
79363 diff --git a/mm/util.c b/mm/util.c
79364 index c55e26b..3f913a9 100644
79365 --- a/mm/util.c
79366 +++ b/mm/util.c
79367 @@ -292,6 +292,12 @@ done:
79368 void arch_pick_mmap_layout(struct mm_struct *mm)
79369 {
79370 mm->mmap_base = TASK_UNMAPPED_BASE;
79371 +
79372 +#ifdef CONFIG_PAX_RANDMMAP
79373 + if (mm->pax_flags & MF_PAX_RANDMMAP)
79374 + mm->mmap_base += mm->delta_mmap;
79375 +#endif
79376 +
79377 mm->get_unmapped_area = arch_get_unmapped_area;
79378 mm->unmap_area = arch_unmap_area;
79379 }
79380 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
79381 index 5123a16..f234a48 100644
79382 --- a/mm/vmalloc.c
79383 +++ b/mm/vmalloc.c
79384 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
79385
79386 pte = pte_offset_kernel(pmd, addr);
79387 do {
79388 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79389 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
79390 +
79391 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79392 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
79393 + BUG_ON(!pte_exec(*pte));
79394 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
79395 + continue;
79396 + }
79397 +#endif
79398 +
79399 + {
79400 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79401 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
79402 + }
79403 } while (pte++, addr += PAGE_SIZE, addr != end);
79404 }
79405
79406 @@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
79407 pte = pte_alloc_kernel(pmd, addr);
79408 if (!pte)
79409 return -ENOMEM;
79410 +
79411 + pax_open_kernel();
79412 do {
79413 struct page *page = pages[*nr];
79414
79415 - if (WARN_ON(!pte_none(*pte)))
79416 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79417 + if (pgprot_val(prot) & _PAGE_NX)
79418 +#endif
79419 +
79420 + if (!pte_none(*pte)) {
79421 + pax_close_kernel();
79422 + WARN_ON(1);
79423 return -EBUSY;
79424 - if (WARN_ON(!page))
79425 + }
79426 + if (!page) {
79427 + pax_close_kernel();
79428 + WARN_ON(1);
79429 return -ENOMEM;
79430 + }
79431 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
79432 (*nr)++;
79433 } while (pte++, addr += PAGE_SIZE, addr != end);
79434 + pax_close_kernel();
79435 return 0;
79436 }
79437
79438 @@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
79439 pmd_t *pmd;
79440 unsigned long next;
79441
79442 - pmd = pmd_alloc(&init_mm, pud, addr);
79443 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
79444 if (!pmd)
79445 return -ENOMEM;
79446 do {
79447 @@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
79448 pud_t *pud;
79449 unsigned long next;
79450
79451 - pud = pud_alloc(&init_mm, pgd, addr);
79452 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
79453 if (!pud)
79454 return -ENOMEM;
79455 do {
79456 @@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
79457 * and fall back on vmalloc() if that fails. Others
79458 * just put it in the vmalloc space.
79459 */
79460 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
79461 +#ifdef CONFIG_MODULES
79462 +#ifdef MODULES_VADDR
79463 unsigned long addr = (unsigned long)x;
79464 if (addr >= MODULES_VADDR && addr < MODULES_END)
79465 return 1;
79466 #endif
79467 +
79468 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79469 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
79470 + return 1;
79471 +#endif
79472 +
79473 +#endif
79474 +
79475 return is_vmalloc_addr(x);
79476 }
79477
79478 @@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
79479
79480 if (!pgd_none(*pgd)) {
79481 pud_t *pud = pud_offset(pgd, addr);
79482 +#ifdef CONFIG_X86
79483 + if (!pud_large(*pud))
79484 +#endif
79485 if (!pud_none(*pud)) {
79486 pmd_t *pmd = pmd_offset(pud, addr);
79487 +#ifdef CONFIG_X86
79488 + if (!pmd_large(*pmd))
79489 +#endif
79490 if (!pmd_none(*pmd)) {
79491 pte_t *ptep, pte;
79492
79493 @@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
79494 * Allocate a region of KVA of the specified size and alignment, within the
79495 * vstart and vend.
79496 */
79497 -static struct vmap_area *alloc_vmap_area(unsigned long size,
79498 +static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
79499 unsigned long align,
79500 unsigned long vstart, unsigned long vend,
79501 int node, gfp_t gfp_mask)
79502 @@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
79503 struct vm_struct *area;
79504
79505 BUG_ON(in_interrupt());
79506 +
79507 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79508 + if (flags & VM_KERNEXEC) {
79509 + if (start != VMALLOC_START || end != VMALLOC_END)
79510 + return NULL;
79511 + start = (unsigned long)MODULES_EXEC_VADDR;
79512 + end = (unsigned long)MODULES_EXEC_END;
79513 + }
79514 +#endif
79515 +
79516 if (flags & VM_IOREMAP) {
79517 int bit = fls(size);
79518
79519 @@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
79520 if (count > totalram_pages)
79521 return NULL;
79522
79523 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79524 + if (!(pgprot_val(prot) & _PAGE_NX))
79525 + flags |= VM_KERNEXEC;
79526 +#endif
79527 +
79528 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
79529 __builtin_return_address(0));
79530 if (!area)
79531 @@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
79532 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
79533 goto fail;
79534
79535 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79536 + if (!(pgprot_val(prot) & _PAGE_NX))
79537 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
79538 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
79539 + else
79540 +#endif
79541 +
79542 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
79543 start, end, node, gfp_mask, caller);
79544 if (!area)
79545 @@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
79546 * For tight control over page level allocator and protection flags
79547 * use __vmalloc() instead.
79548 */
79549 -
79550 void *vmalloc_exec(unsigned long size)
79551 {
79552 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
79553 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
79554 -1, __builtin_return_address(0));
79555 }
79556
79557 @@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
79558 unsigned long uaddr = vma->vm_start;
79559 unsigned long usize = vma->vm_end - vma->vm_start;
79560
79561 + BUG_ON(vma->vm_mirror);
79562 +
79563 if ((PAGE_SIZE-1) & (unsigned long)addr)
79564 return -EINVAL;
79565
79566 @@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
79567 v->addr, v->addr + v->size, v->size);
79568
79569 if (v->caller)
79570 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79571 + seq_printf(m, " %pK", v->caller);
79572 +#else
79573 seq_printf(m, " %pS", v->caller);
79574 +#endif
79575
79576 if (v->nr_pages)
79577 seq_printf(m, " pages=%d", v->nr_pages);
79578 diff --git a/mm/vmstat.c b/mm/vmstat.c
79579 index 9800306..76b4b27 100644
79580 --- a/mm/vmstat.c
79581 +++ b/mm/vmstat.c
79582 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
79583 *
79584 * vm_stat contains the global counters
79585 */
79586 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
79587 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
79588 EXPORT_SYMBOL(vm_stat);
79589
79590 #ifdef CONFIG_SMP
79591 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
79592 v = p->vm_stat_diff[i];
79593 p->vm_stat_diff[i] = 0;
79594 local_irq_restore(flags);
79595 - atomic_long_add(v, &zone->vm_stat[i]);
79596 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79597 global_diff[i] += v;
79598 #ifdef CONFIG_NUMA
79599 /* 3 seconds idle till flush */
79600 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
79601
79602 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
79603 if (global_diff[i])
79604 - atomic_long_add(global_diff[i], &vm_stat[i]);
79605 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
79606 }
79607
79608 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
79609 @@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
79610 if (pset->vm_stat_diff[i]) {
79611 int v = pset->vm_stat_diff[i];
79612 pset->vm_stat_diff[i] = 0;
79613 - atomic_long_add(v, &zone->vm_stat[i]);
79614 - atomic_long_add(v, &vm_stat[i]);
79615 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79616 + atomic_long_add_unchecked(v, &vm_stat[i]);
79617 }
79618 }
79619 #endif
79620 @@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
79621 return NOTIFY_OK;
79622 }
79623
79624 -static struct notifier_block __cpuinitdata vmstat_notifier =
79625 +static struct notifier_block vmstat_notifier =
79626 { &vmstat_cpuup_callback, NULL, 0 };
79627 #endif
79628
79629 @@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
79630 start_cpu_timer(cpu);
79631 #endif
79632 #ifdef CONFIG_PROC_FS
79633 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
79634 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
79635 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
79636 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
79637 + {
79638 + mode_t gr_mode = S_IRUGO;
79639 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79640 + gr_mode = S_IRUSR;
79641 +#endif
79642 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
79643 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
79644 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79645 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
79646 +#else
79647 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
79648 +#endif
79649 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
79650 + }
79651 #endif
79652 return 0;
79653 }
79654 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
79655 index a292e80..785ee68 100644
79656 --- a/net/8021q/vlan.c
79657 +++ b/net/8021q/vlan.c
79658 @@ -485,7 +485,7 @@ out:
79659 return NOTIFY_DONE;
79660 }
79661
79662 -static struct notifier_block vlan_notifier_block __read_mostly = {
79663 +static struct notifier_block vlan_notifier_block = {
79664 .notifier_call = vlan_device_event,
79665 };
79666
79667 @@ -560,8 +560,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
79668 err = -EPERM;
79669 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
79670 break;
79671 - if ((args.u.name_type >= 0) &&
79672 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
79673 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
79674 struct vlan_net *vn;
79675
79676 vn = net_generic(net, vlan_net_id);
79677 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
79678 index 02efb25..41541a9 100644
79679 --- a/net/9p/trans_fd.c
79680 +++ b/net/9p/trans_fd.c
79681 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
79682 oldfs = get_fs();
79683 set_fs(get_ds());
79684 /* The cast to a user pointer is valid due to the set_fs() */
79685 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
79686 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
79687 set_fs(oldfs);
79688
79689 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
79690 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
79691 index 876fbe8..8bbea9f 100644
79692 --- a/net/atm/atm_misc.c
79693 +++ b/net/atm/atm_misc.c
79694 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
79695 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
79696 return 1;
79697 atm_return(vcc, truesize);
79698 - atomic_inc(&vcc->stats->rx_drop);
79699 + atomic_inc_unchecked(&vcc->stats->rx_drop);
79700 return 0;
79701 }
79702 EXPORT_SYMBOL(atm_charge);
79703 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
79704 }
79705 }
79706 atm_return(vcc, guess);
79707 - atomic_inc(&vcc->stats->rx_drop);
79708 + atomic_inc_unchecked(&vcc->stats->rx_drop);
79709 return NULL;
79710 }
79711 EXPORT_SYMBOL(atm_alloc_charge);
79712 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
79713
79714 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
79715 {
79716 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79717 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79718 __SONET_ITEMS
79719 #undef __HANDLE_ITEM
79720 }
79721 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
79722
79723 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
79724 {
79725 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
79726 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
79727 __SONET_ITEMS
79728 #undef __HANDLE_ITEM
79729 }
79730 diff --git a/net/atm/lec.h b/net/atm/lec.h
79731 index a86aff9..3a0d6f6 100644
79732 --- a/net/atm/lec.h
79733 +++ b/net/atm/lec.h
79734 @@ -48,7 +48,7 @@ struct lane2_ops {
79735 const u8 *tlvs, u32 sizeoftlvs);
79736 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
79737 const u8 *tlvs, u32 sizeoftlvs);
79738 -};
79739 +} __no_const;
79740
79741 /*
79742 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
79743 diff --git a/net/atm/proc.c b/net/atm/proc.c
79744 index 0d020de..011c7bb 100644
79745 --- a/net/atm/proc.c
79746 +++ b/net/atm/proc.c
79747 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
79748 const struct k_atm_aal_stats *stats)
79749 {
79750 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
79751 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
79752 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
79753 - atomic_read(&stats->rx_drop));
79754 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
79755 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
79756 + atomic_read_unchecked(&stats->rx_drop));
79757 }
79758
79759 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
79760 diff --git a/net/atm/resources.c b/net/atm/resources.c
79761 index 0447d5d..3cf4728 100644
79762 --- a/net/atm/resources.c
79763 +++ b/net/atm/resources.c
79764 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
79765 static void copy_aal_stats(struct k_atm_aal_stats *from,
79766 struct atm_aal_stats *to)
79767 {
79768 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79769 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79770 __AAL_STAT_ITEMS
79771 #undef __HANDLE_ITEM
79772 }
79773 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
79774 static void subtract_aal_stats(struct k_atm_aal_stats *from,
79775 struct atm_aal_stats *to)
79776 {
79777 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
79778 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
79779 __AAL_STAT_ITEMS
79780 #undef __HANDLE_ITEM
79781 }
79782 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
79783 index 7d02ebd..4d4cc01 100644
79784 --- a/net/batman-adv/bat_iv_ogm.c
79785 +++ b/net/batman-adv/bat_iv_ogm.c
79786 @@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
79787
79788 /* randomize initial seqno to avoid collision */
79789 get_random_bytes(&random_seqno, sizeof(random_seqno));
79790 - atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
79791 + atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
79792
79793 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
79794 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
79795 @@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
79796 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
79797
79798 /* change sequence number to network order */
79799 - seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
79800 + seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
79801 batadv_ogm_packet->seqno = htonl(seqno);
79802 - atomic_inc(&hard_iface->bat_iv.ogm_seqno);
79803 + atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
79804
79805 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
79806 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
79807 @@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
79808 return;
79809
79810 /* could be changed by schedule_own_packet() */
79811 - if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
79812 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
79813
79814 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
79815 has_directlink_flag = 1;
79816 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
79817 index f1d37cd..4190879 100644
79818 --- a/net/batman-adv/hard-interface.c
79819 +++ b/net/batman-adv/hard-interface.c
79820 @@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
79821 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
79822 dev_add_pack(&hard_iface->batman_adv_ptype);
79823
79824 - atomic_set(&hard_iface->frag_seqno, 1);
79825 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
79826 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
79827 hard_iface->net_dev->name);
79828
79829 @@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
79830 /* This can't be called via a bat_priv callback because
79831 * we have no bat_priv yet.
79832 */
79833 - atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
79834 + atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
79835 hard_iface->bat_iv.ogm_buff = NULL;
79836
79837 return hard_iface;
79838 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
79839 index 6b548fd..fc32c8d 100644
79840 --- a/net/batman-adv/soft-interface.c
79841 +++ b/net/batman-adv/soft-interface.c
79842 @@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
79843 primary_if->net_dev->dev_addr, ETH_ALEN);
79844
79845 /* set broadcast sequence number */
79846 - seqno = atomic_inc_return(&bat_priv->bcast_seqno);
79847 + seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
79848 bcast_packet->seqno = htonl(seqno);
79849
79850 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
79851 @@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
79852 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
79853
79854 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
79855 - atomic_set(&bat_priv->bcast_seqno, 1);
79856 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
79857 atomic_set(&bat_priv->tt.vn, 0);
79858 atomic_set(&bat_priv->tt.local_changes, 0);
79859 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
79860 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
79861 index ae9ac9a..11e0fe7 100644
79862 --- a/net/batman-adv/types.h
79863 +++ b/net/batman-adv/types.h
79864 @@ -48,7 +48,7 @@
79865 struct batadv_hard_iface_bat_iv {
79866 unsigned char *ogm_buff;
79867 int ogm_buff_len;
79868 - atomic_t ogm_seqno;
79869 + atomic_unchecked_t ogm_seqno;
79870 };
79871
79872 struct batadv_hard_iface {
79873 @@ -56,7 +56,7 @@ struct batadv_hard_iface {
79874 int16_t if_num;
79875 char if_status;
79876 struct net_device *net_dev;
79877 - atomic_t frag_seqno;
79878 + atomic_unchecked_t frag_seqno;
79879 struct kobject *hardif_obj;
79880 atomic_t refcount;
79881 struct packet_type batman_adv_ptype;
79882 @@ -284,7 +284,7 @@ struct batadv_priv {
79883 atomic_t orig_interval; /* uint */
79884 atomic_t hop_penalty; /* uint */
79885 atomic_t log_level; /* uint */
79886 - atomic_t bcast_seqno;
79887 + atomic_unchecked_t bcast_seqno;
79888 atomic_t bcast_queue_left;
79889 atomic_t batman_queue_left;
79890 char num_ifaces;
79891 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
79892 index 10aff49..ea8e021 100644
79893 --- a/net/batman-adv/unicast.c
79894 +++ b/net/batman-adv/unicast.c
79895 @@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
79896 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
79897 frag2->flags = large_tail;
79898
79899 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
79900 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
79901 frag1->seqno = htons(seqno - 1);
79902 frag2->seqno = htons(seqno);
79903
79904 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
79905 index 07f0739..3c42e34 100644
79906 --- a/net/bluetooth/hci_sock.c
79907 +++ b/net/bluetooth/hci_sock.c
79908 @@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
79909 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
79910 }
79911
79912 - len = min_t(unsigned int, len, sizeof(uf));
79913 + len = min((size_t)len, sizeof(uf));
79914 if (copy_from_user(&uf, optval, len)) {
79915 err = -EFAULT;
79916 break;
79917 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
79918 index 22e6583..426e2f3 100644
79919 --- a/net/bluetooth/l2cap_core.c
79920 +++ b/net/bluetooth/l2cap_core.c
79921 @@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
79922 break;
79923
79924 case L2CAP_CONF_RFC:
79925 - if (olen == sizeof(rfc))
79926 - memcpy(&rfc, (void *)val, olen);
79927 + if (olen != sizeof(rfc))
79928 + break;
79929 +
79930 + memcpy(&rfc, (void *)val, olen);
79931
79932 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
79933 rfc.mode != chan->mode)
79934 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
79935 index 1bcfb84..dad9f98 100644
79936 --- a/net/bluetooth/l2cap_sock.c
79937 +++ b/net/bluetooth/l2cap_sock.c
79938 @@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
79939 struct sock *sk = sock->sk;
79940 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
79941 struct l2cap_options opts;
79942 - int len, err = 0;
79943 + int err = 0;
79944 + size_t len = optlen;
79945 u32 opt;
79946
79947 BT_DBG("sk %p", sk);
79948 @@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
79949 opts.max_tx = chan->max_tx;
79950 opts.txwin_size = chan->tx_win;
79951
79952 - len = min_t(unsigned int, sizeof(opts), optlen);
79953 + len = min(sizeof(opts), len);
79954 if (copy_from_user((char *) &opts, optval, len)) {
79955 err = -EFAULT;
79956 break;
79957 @@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
79958 struct bt_security sec;
79959 struct bt_power pwr;
79960 struct l2cap_conn *conn;
79961 - int len, err = 0;
79962 + int err = 0;
79963 + size_t len = optlen;
79964 u32 opt;
79965
79966 BT_DBG("sk %p", sk);
79967 @@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
79968
79969 sec.level = BT_SECURITY_LOW;
79970
79971 - len = min_t(unsigned int, sizeof(sec), optlen);
79972 + len = min(sizeof(sec), len);
79973 if (copy_from_user((char *) &sec, optval, len)) {
79974 err = -EFAULT;
79975 break;
79976 @@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
79977
79978 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
79979
79980 - len = min_t(unsigned int, sizeof(pwr), optlen);
79981 + len = min(sizeof(pwr), len);
79982 if (copy_from_user((char *) &pwr, optval, len)) {
79983 err = -EFAULT;
79984 break;
79985 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
79986 index ce3f665..2c7d08f 100644
79987 --- a/net/bluetooth/rfcomm/sock.c
79988 +++ b/net/bluetooth/rfcomm/sock.c
79989 @@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
79990 struct sock *sk = sock->sk;
79991 struct bt_security sec;
79992 int err = 0;
79993 - size_t len;
79994 + size_t len = optlen;
79995 u32 opt;
79996
79997 BT_DBG("sk %p", sk);
79998 @@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
79999
80000 sec.level = BT_SECURITY_LOW;
80001
80002 - len = min_t(unsigned int, sizeof(sec), optlen);
80003 + len = min(sizeof(sec), len);
80004 if (copy_from_user((char *) &sec, optval, len)) {
80005 err = -EFAULT;
80006 break;
80007 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
80008 index bd6fd0f..6492cba 100644
80009 --- a/net/bluetooth/rfcomm/tty.c
80010 +++ b/net/bluetooth/rfcomm/tty.c
80011 @@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
80012 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
80013
80014 spin_lock_irqsave(&dev->port.lock, flags);
80015 - if (dev->port.count > 0) {
80016 + if (atomic_read(&dev->port.count) > 0) {
80017 spin_unlock_irqrestore(&dev->port.lock, flags);
80018 return;
80019 }
80020 @@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
80021 return -ENODEV;
80022
80023 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
80024 - dev->channel, dev->port.count);
80025 + dev->channel, atomic_read(&dev->port.count));
80026
80027 spin_lock_irqsave(&dev->port.lock, flags);
80028 - if (++dev->port.count > 1) {
80029 + if (atomic_inc_return(&dev->port.count) > 1) {
80030 spin_unlock_irqrestore(&dev->port.lock, flags);
80031 return 0;
80032 }
80033 @@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
80034 return;
80035
80036 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
80037 - dev->port.count);
80038 + atomic_read(&dev->port.count));
80039
80040 spin_lock_irqsave(&dev->port.lock, flags);
80041 - if (!--dev->port.count) {
80042 + if (!atomic_dec_return(&dev->port.count)) {
80043 spin_unlock_irqrestore(&dev->port.lock, flags);
80044 if (dev->tty_dev->parent)
80045 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
80046 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
80047 index 5fe2ff3..121d696 100644
80048 --- a/net/bridge/netfilter/ebtables.c
80049 +++ b/net/bridge/netfilter/ebtables.c
80050 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
80051 tmp.valid_hooks = t->table->valid_hooks;
80052 }
80053 mutex_unlock(&ebt_mutex);
80054 - if (copy_to_user(user, &tmp, *len) != 0){
80055 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
80056 BUGPRINT("c2u Didn't work\n");
80057 ret = -EFAULT;
80058 break;
80059 @@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
80060 goto out;
80061 tmp.valid_hooks = t->valid_hooks;
80062
80063 - if (copy_to_user(user, &tmp, *len) != 0) {
80064 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
80065 ret = -EFAULT;
80066 break;
80067 }
80068 @@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
80069 tmp.entries_size = t->table->entries_size;
80070 tmp.valid_hooks = t->table->valid_hooks;
80071
80072 - if (copy_to_user(user, &tmp, *len) != 0) {
80073 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
80074 ret = -EFAULT;
80075 break;
80076 }
80077 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
80078 index a376ec1..1fbd6be 100644
80079 --- a/net/caif/cfctrl.c
80080 +++ b/net/caif/cfctrl.c
80081 @@ -10,6 +10,7 @@
80082 #include <linux/spinlock.h>
80083 #include <linux/slab.h>
80084 #include <linux/pkt_sched.h>
80085 +#include <linux/sched.h>
80086 #include <net/caif/caif_layer.h>
80087 #include <net/caif/cfpkt.h>
80088 #include <net/caif/cfctrl.h>
80089 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
80090 memset(&dev_info, 0, sizeof(dev_info));
80091 dev_info.id = 0xff;
80092 cfsrvl_init(&this->serv, 0, &dev_info, false);
80093 - atomic_set(&this->req_seq_no, 1);
80094 - atomic_set(&this->rsp_seq_no, 1);
80095 + atomic_set_unchecked(&this->req_seq_no, 1);
80096 + atomic_set_unchecked(&this->rsp_seq_no, 1);
80097 this->serv.layer.receive = cfctrl_recv;
80098 sprintf(this->serv.layer.name, "ctrl");
80099 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
80100 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
80101 struct cfctrl_request_info *req)
80102 {
80103 spin_lock_bh(&ctrl->info_list_lock);
80104 - atomic_inc(&ctrl->req_seq_no);
80105 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
80106 + atomic_inc_unchecked(&ctrl->req_seq_no);
80107 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
80108 list_add_tail(&req->list, &ctrl->list);
80109 spin_unlock_bh(&ctrl->info_list_lock);
80110 }
80111 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
80112 if (p != first)
80113 pr_warn("Requests are not received in order\n");
80114
80115 - atomic_set(&ctrl->rsp_seq_no,
80116 + atomic_set_unchecked(&ctrl->rsp_seq_no,
80117 p->sequence_no);
80118 list_del(&p->list);
80119 goto out;
80120 diff --git a/net/can/af_can.c b/net/can/af_can.c
80121 index ddac1ee..3ee0a78 100644
80122 --- a/net/can/af_can.c
80123 +++ b/net/can/af_can.c
80124 @@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
80125 };
80126
80127 /* notifier block for netdevice event */
80128 -static struct notifier_block can_netdev_notifier __read_mostly = {
80129 +static struct notifier_block can_netdev_notifier = {
80130 .notifier_call = can_notifier,
80131 };
80132
80133 diff --git a/net/can/gw.c b/net/can/gw.c
80134 index 574dda78e..3d2b3da 100644
80135 --- a/net/can/gw.c
80136 +++ b/net/can/gw.c
80137 @@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
80138 MODULE_ALIAS("can-gw");
80139
80140 static HLIST_HEAD(cgw_list);
80141 -static struct notifier_block notifier;
80142
80143 static struct kmem_cache *cgw_cache __read_mostly;
80144
80145 @@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
80146 return err;
80147 }
80148
80149 +static struct notifier_block notifier = {
80150 + .notifier_call = cgw_notifier
80151 +};
80152 +
80153 static __init int cgw_module_init(void)
80154 {
80155 printk(banner);
80156 @@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
80157 return -ENOMEM;
80158
80159 /* set notifier */
80160 - notifier.notifier_call = cgw_notifier;
80161 register_netdevice_notifier(&notifier);
80162
80163 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
80164 diff --git a/net/compat.c b/net/compat.c
80165 index 79ae884..17c5c09 100644
80166 --- a/net/compat.c
80167 +++ b/net/compat.c
80168 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
80169 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
80170 __get_user(kmsg->msg_flags, &umsg->msg_flags))
80171 return -EFAULT;
80172 - kmsg->msg_name = compat_ptr(tmp1);
80173 - kmsg->msg_iov = compat_ptr(tmp2);
80174 - kmsg->msg_control = compat_ptr(tmp3);
80175 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
80176 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
80177 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
80178 return 0;
80179 }
80180
80181 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80182
80183 if (kern_msg->msg_namelen) {
80184 if (mode == VERIFY_READ) {
80185 - int err = move_addr_to_kernel(kern_msg->msg_name,
80186 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
80187 kern_msg->msg_namelen,
80188 kern_address);
80189 if (err < 0)
80190 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80191 kern_msg->msg_name = NULL;
80192
80193 tot_len = iov_from_user_compat_to_kern(kern_iov,
80194 - (struct compat_iovec __user *)kern_msg->msg_iov,
80195 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
80196 kern_msg->msg_iovlen);
80197 if (tot_len >= 0)
80198 kern_msg->msg_iov = kern_iov;
80199 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80200
80201 #define CMSG_COMPAT_FIRSTHDR(msg) \
80202 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
80203 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
80204 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
80205 (struct compat_cmsghdr __user *)NULL)
80206
80207 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
80208 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
80209 (ucmlen) <= (unsigned long) \
80210 ((mhdr)->msg_controllen - \
80211 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
80212 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
80213
80214 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
80215 struct compat_cmsghdr __user *cmsg, int cmsg_len)
80216 {
80217 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
80218 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
80219 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
80220 msg->msg_controllen)
80221 return NULL;
80222 return (struct compat_cmsghdr __user *)ptr;
80223 @@ -219,7 +219,7 @@ Efault:
80224
80225 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
80226 {
80227 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80228 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80229 struct compat_cmsghdr cmhdr;
80230 struct compat_timeval ctv;
80231 struct compat_timespec cts[3];
80232 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80233
80234 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
80235 {
80236 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80237 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80238 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
80239 int fdnum = scm->fp->count;
80240 struct file **fp = scm->fp->fp;
80241 @@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
80242 return -EFAULT;
80243 old_fs = get_fs();
80244 set_fs(KERNEL_DS);
80245 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
80246 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
80247 set_fs(old_fs);
80248
80249 return err;
80250 @@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
80251 len = sizeof(ktime);
80252 old_fs = get_fs();
80253 set_fs(KERNEL_DS);
80254 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
80255 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
80256 set_fs(old_fs);
80257
80258 if (!err) {
80259 @@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80260 case MCAST_JOIN_GROUP:
80261 case MCAST_LEAVE_GROUP:
80262 {
80263 - struct compat_group_req __user *gr32 = (void *)optval;
80264 + struct compat_group_req __user *gr32 = (void __user *)optval;
80265 struct group_req __user *kgr =
80266 compat_alloc_user_space(sizeof(struct group_req));
80267 u32 interface;
80268 @@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80269 case MCAST_BLOCK_SOURCE:
80270 case MCAST_UNBLOCK_SOURCE:
80271 {
80272 - struct compat_group_source_req __user *gsr32 = (void *)optval;
80273 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80274 struct group_source_req __user *kgsr = compat_alloc_user_space(
80275 sizeof(struct group_source_req));
80276 u32 interface;
80277 @@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80278 }
80279 case MCAST_MSFILTER:
80280 {
80281 - struct compat_group_filter __user *gf32 = (void *)optval;
80282 + struct compat_group_filter __user *gf32 = (void __user *)optval;
80283 struct group_filter __user *kgf;
80284 u32 interface, fmode, numsrc;
80285
80286 @@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
80287 char __user *optval, int __user *optlen,
80288 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
80289 {
80290 - struct compat_group_filter __user *gf32 = (void *)optval;
80291 + struct compat_group_filter __user *gf32 = (void __user *)optval;
80292 struct group_filter __user *kgf;
80293 int __user *koptlen;
80294 u32 interface, fmode, numsrc;
80295 @@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
80296
80297 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
80298 return -EINVAL;
80299 - if (copy_from_user(a, args, nas[call]))
80300 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
80301 return -EFAULT;
80302 a0 = a[0];
80303 a1 = a[1];
80304 diff --git a/net/core/datagram.c b/net/core/datagram.c
80305 index 368f9c3..f82d4a3 100644
80306 --- a/net/core/datagram.c
80307 +++ b/net/core/datagram.c
80308 @@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
80309 }
80310
80311 kfree_skb(skb);
80312 - atomic_inc(&sk->sk_drops);
80313 + atomic_inc_unchecked(&sk->sk_drops);
80314 sk_mem_reclaim_partial(sk);
80315
80316 return err;
80317 diff --git a/net/core/dev.c b/net/core/dev.c
80318 index f64e439..8f959e6 100644
80319 --- a/net/core/dev.c
80320 +++ b/net/core/dev.c
80321 @@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
80322 if (no_module && capable(CAP_NET_ADMIN))
80323 no_module = request_module("netdev-%s", name);
80324 if (no_module && capable(CAP_SYS_MODULE)) {
80325 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
80326 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
80327 +#else
80328 if (!request_module("%s", name))
80329 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
80330 name);
80331 +#endif
80332 }
80333 }
80334 EXPORT_SYMBOL(dev_load);
80335 @@ -1715,7 +1719,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
80336 {
80337 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
80338 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
80339 - atomic_long_inc(&dev->rx_dropped);
80340 + atomic_long_inc_unchecked(&dev->rx_dropped);
80341 kfree_skb(skb);
80342 return NET_RX_DROP;
80343 }
80344 @@ -1725,7 +1729,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
80345 nf_reset(skb);
80346
80347 if (unlikely(!is_skb_forwardable(dev, skb))) {
80348 - atomic_long_inc(&dev->rx_dropped);
80349 + atomic_long_inc_unchecked(&dev->rx_dropped);
80350 kfree_skb(skb);
80351 return NET_RX_DROP;
80352 }
80353 @@ -2180,7 +2184,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80354
80355 struct dev_gso_cb {
80356 void (*destructor)(struct sk_buff *skb);
80357 -};
80358 +} __no_const;
80359
80360 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80361
80362 @@ -3053,7 +3057,7 @@ enqueue:
80363
80364 local_irq_restore(flags);
80365
80366 - atomic_long_inc(&skb->dev->rx_dropped);
80367 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
80368 kfree_skb(skb);
80369 return NET_RX_DROP;
80370 }
80371 @@ -3125,7 +3129,7 @@ int netif_rx_ni(struct sk_buff *skb)
80372 }
80373 EXPORT_SYMBOL(netif_rx_ni);
80374
80375 -static void net_tx_action(struct softirq_action *h)
80376 +static void net_tx_action(void)
80377 {
80378 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80379
80380 @@ -3456,7 +3460,7 @@ ncls:
80381 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
80382 } else {
80383 drop:
80384 - atomic_long_inc(&skb->dev->rx_dropped);
80385 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
80386 kfree_skb(skb);
80387 /* Jamal, now you will not able to escape explaining
80388 * me how you were going to use this. :-)
80389 @@ -4039,7 +4043,7 @@ void netif_napi_del(struct napi_struct *napi)
80390 }
80391 EXPORT_SYMBOL(netif_napi_del);
80392
80393 -static void net_rx_action(struct softirq_action *h)
80394 +static void net_rx_action(void)
80395 {
80396 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80397 unsigned long time_limit = jiffies + 2;
80398 @@ -4523,8 +4527,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
80399 else
80400 seq_printf(seq, "%04x", ntohs(pt->type));
80401
80402 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80403 + seq_printf(seq, " %-8s %p\n",
80404 + pt->dev ? pt->dev->name : "", NULL);
80405 +#else
80406 seq_printf(seq, " %-8s %pF\n",
80407 pt->dev ? pt->dev->name : "", pt->func);
80408 +#endif
80409 }
80410
80411 return 0;
80412 @@ -6096,7 +6105,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
80413 } else {
80414 netdev_stats_to_stats64(storage, &dev->stats);
80415 }
80416 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
80417 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
80418 return storage;
80419 }
80420 EXPORT_SYMBOL(dev_get_stats);
80421 diff --git a/net/core/flow.c b/net/core/flow.c
80422 index b0901ee..7d3c2ca 100644
80423 --- a/net/core/flow.c
80424 +++ b/net/core/flow.c
80425 @@ -61,7 +61,7 @@ struct flow_cache {
80426 struct timer_list rnd_timer;
80427 };
80428
80429 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
80430 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80431 EXPORT_SYMBOL(flow_cache_genid);
80432 static struct flow_cache flow_cache_global;
80433 static struct kmem_cache *flow_cachep __read_mostly;
80434 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
80435
80436 static int flow_entry_valid(struct flow_cache_entry *fle)
80437 {
80438 - if (atomic_read(&flow_cache_genid) != fle->genid)
80439 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
80440 return 0;
80441 if (fle->object && !fle->object->ops->check(fle->object))
80442 return 0;
80443 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
80444 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
80445 fcp->hash_count++;
80446 }
80447 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
80448 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
80449 flo = fle->object;
80450 if (!flo)
80451 goto ret_object;
80452 @@ -280,7 +280,7 @@ nocache:
80453 }
80454 flo = resolver(net, key, family, dir, flo, ctx);
80455 if (fle) {
80456 - fle->genid = atomic_read(&flow_cache_genid);
80457 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
80458 if (!IS_ERR(flo))
80459 fle->object = flo;
80460 else
80461 diff --git a/net/core/iovec.c b/net/core/iovec.c
80462 index 7e7aeb0..2a998cb 100644
80463 --- a/net/core/iovec.c
80464 +++ b/net/core/iovec.c
80465 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
80466 if (m->msg_namelen) {
80467 if (mode == VERIFY_READ) {
80468 void __user *namep;
80469 - namep = (void __user __force *) m->msg_name;
80470 + namep = (void __force_user *) m->msg_name;
80471 err = move_addr_to_kernel(namep, m->msg_namelen,
80472 address);
80473 if (err < 0)
80474 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
80475 }
80476
80477 size = m->msg_iovlen * sizeof(struct iovec);
80478 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
80479 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
80480 return -EFAULT;
80481
80482 m->msg_iov = iov;
80483 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
80484 index 1868625..5f4de62 100644
80485 --- a/net/core/rtnetlink.c
80486 +++ b/net/core/rtnetlink.c
80487 @@ -58,7 +58,7 @@ struct rtnl_link {
80488 rtnl_doit_func doit;
80489 rtnl_dumpit_func dumpit;
80490 rtnl_calcit_func calcit;
80491 -};
80492 +} __no_const;
80493
80494 static DEFINE_MUTEX(rtnl_mutex);
80495
80496 diff --git a/net/core/scm.c b/net/core/scm.c
80497 index 905dcc6..14ee2d6 100644
80498 --- a/net/core/scm.c
80499 +++ b/net/core/scm.c
80500 @@ -224,7 +224,7 @@ EXPORT_SYMBOL(__scm_send);
80501 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80502 {
80503 struct cmsghdr __user *cm
80504 - = (__force struct cmsghdr __user *)msg->msg_control;
80505 + = (struct cmsghdr __force_user *)msg->msg_control;
80506 struct cmsghdr cmhdr;
80507 int cmlen = CMSG_LEN(len);
80508 int err;
80509 @@ -247,7 +247,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80510 err = -EFAULT;
80511 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
80512 goto out;
80513 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
80514 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
80515 goto out;
80516 cmlen = CMSG_SPACE(len);
80517 if (msg->msg_controllen < cmlen)
80518 @@ -263,7 +263,7 @@ EXPORT_SYMBOL(put_cmsg);
80519 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80520 {
80521 struct cmsghdr __user *cm
80522 - = (__force struct cmsghdr __user*)msg->msg_control;
80523 + = (struct cmsghdr __force_user *)msg->msg_control;
80524
80525 int fdmax = 0;
80526 int fdnum = scm->fp->count;
80527 @@ -283,7 +283,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80528 if (fdnum < fdmax)
80529 fdmax = fdnum;
80530
80531 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
80532 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
80533 i++, cmfptr++)
80534 {
80535 struct socket *sock;
80536 diff --git a/net/core/sock.c b/net/core/sock.c
80537 index bc131d4..029e378 100644
80538 --- a/net/core/sock.c
80539 +++ b/net/core/sock.c
80540 @@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80541 struct sk_buff_head *list = &sk->sk_receive_queue;
80542
80543 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
80544 - atomic_inc(&sk->sk_drops);
80545 + atomic_inc_unchecked(&sk->sk_drops);
80546 trace_sock_rcvqueue_full(sk, skb);
80547 return -ENOMEM;
80548 }
80549 @@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80550 return err;
80551
80552 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
80553 - atomic_inc(&sk->sk_drops);
80554 + atomic_inc_unchecked(&sk->sk_drops);
80555 return -ENOBUFS;
80556 }
80557
80558 @@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80559 skb_dst_force(skb);
80560
80561 spin_lock_irqsave(&list->lock, flags);
80562 - skb->dropcount = atomic_read(&sk->sk_drops);
80563 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
80564 __skb_queue_tail(list, skb);
80565 spin_unlock_irqrestore(&list->lock, flags);
80566
80567 @@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
80568 skb->dev = NULL;
80569
80570 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
80571 - atomic_inc(&sk->sk_drops);
80572 + atomic_inc_unchecked(&sk->sk_drops);
80573 goto discard_and_relse;
80574 }
80575 if (nested)
80576 @@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
80577 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
80578 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
80579 bh_unlock_sock(sk);
80580 - atomic_inc(&sk->sk_drops);
80581 + atomic_inc_unchecked(&sk->sk_drops);
80582 goto discard_and_relse;
80583 }
80584
80585 @@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
80586 struct timeval tm;
80587 } v;
80588
80589 - int lv = sizeof(int);
80590 - int len;
80591 + unsigned int lv = sizeof(int);
80592 + unsigned int len;
80593
80594 if (get_user(len, optlen))
80595 return -EFAULT;
80596 - if (len < 0)
80597 + if (len > INT_MAX)
80598 return -EINVAL;
80599
80600 memset(&v, 0, sizeof(v));
80601 @@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
80602
80603 case SO_PEERNAME:
80604 {
80605 - char address[128];
80606 + char address[_K_SS_MAXSIZE];
80607
80608 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
80609 return -ENOTCONN;
80610 - if (lv < len)
80611 + if (lv < len || sizeof address < len)
80612 return -EINVAL;
80613 if (copy_to_user(optval, address, len))
80614 return -EFAULT;
80615 @@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
80616
80617 if (len > lv)
80618 len = lv;
80619 - if (copy_to_user(optval, &v, len))
80620 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
80621 return -EFAULT;
80622 lenout:
80623 if (put_user(len, optlen))
80624 @@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
80625 */
80626 smp_wmb();
80627 atomic_set(&sk->sk_refcnt, 1);
80628 - atomic_set(&sk->sk_drops, 0);
80629 + atomic_set_unchecked(&sk->sk_drops, 0);
80630 }
80631 EXPORT_SYMBOL(sock_init_data);
80632
80633 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
80634 index 750f44f..0a699b1 100644
80635 --- a/net/core/sock_diag.c
80636 +++ b/net/core/sock_diag.c
80637 @@ -15,20 +15,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
80638
80639 int sock_diag_check_cookie(void *sk, __u32 *cookie)
80640 {
80641 +#ifndef CONFIG_GRKERNSEC_HIDESYM
80642 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
80643 cookie[1] != INET_DIAG_NOCOOKIE) &&
80644 ((u32)(unsigned long)sk != cookie[0] ||
80645 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
80646 return -ESTALE;
80647 else
80648 +#endif
80649 return 0;
80650 }
80651 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
80652
80653 void sock_diag_save_cookie(void *sk, __u32 *cookie)
80654 {
80655 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80656 + cookie[0] = 0;
80657 + cookie[1] = 0;
80658 +#else
80659 cookie[0] = (u32)(unsigned long)sk;
80660 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
80661 +#endif
80662 }
80663 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
80664
80665 @@ -97,21 +104,6 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
80666 }
80667 EXPORT_SYMBOL_GPL(sock_diag_unregister);
80668
80669 -static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
80670 -{
80671 - if (sock_diag_handlers[family] == NULL)
80672 - request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
80673 - NETLINK_SOCK_DIAG, family);
80674 -
80675 - mutex_lock(&sock_diag_table_mutex);
80676 - return sock_diag_handlers[family];
80677 -}
80678 -
80679 -static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
80680 -{
80681 - mutex_unlock(&sock_diag_table_mutex);
80682 -}
80683 -
80684 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
80685 {
80686 int err;
80687 @@ -124,12 +116,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
80688 if (req->sdiag_family >= AF_MAX)
80689 return -EINVAL;
80690
80691 - hndl = sock_diag_lock_handler(req->sdiag_family);
80692 + if (sock_diag_handlers[req->sdiag_family] == NULL)
80693 + request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
80694 + NETLINK_SOCK_DIAG, req->sdiag_family);
80695 +
80696 + mutex_lock(&sock_diag_table_mutex);
80697 + hndl = sock_diag_handlers[req->sdiag_family];
80698 if (hndl == NULL)
80699 err = -ENOENT;
80700 else
80701 err = hndl->dump(skb, nlh);
80702 - sock_diag_unlock_handler(hndl);
80703 + mutex_unlock(&sock_diag_table_mutex);
80704
80705 return err;
80706 }
80707 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
80708 index a55eecc..dd8428c 100644
80709 --- a/net/decnet/sysctl_net_decnet.c
80710 +++ b/net/decnet/sysctl_net_decnet.c
80711 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
80712
80713 if (len > *lenp) len = *lenp;
80714
80715 - if (copy_to_user(buffer, addr, len))
80716 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
80717 return -EFAULT;
80718
80719 *lenp = len;
80720 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
80721
80722 if (len > *lenp) len = *lenp;
80723
80724 - if (copy_to_user(buffer, devname, len))
80725 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
80726 return -EFAULT;
80727
80728 *lenp = len;
80729 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
80730 index a69b4e4..dbccba5 100644
80731 --- a/net/ipv4/ah4.c
80732 +++ b/net/ipv4/ah4.c
80733 @@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
80734 return;
80735
80736 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
80737 - atomic_inc(&flow_cache_genid);
80738 + atomic_inc_unchecked(&flow_cache_genid);
80739 rt_genid_bump(net);
80740
80741 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
80742 diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
80743 index 3b4f0cd..8cb864c 100644
80744 --- a/net/ipv4/esp4.c
80745 +++ b/net/ipv4/esp4.c
80746 @@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
80747 return;
80748
80749 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
80750 - atomic_inc(&flow_cache_genid);
80751 + atomic_inc_unchecked(&flow_cache_genid);
80752 rt_genid_bump(net);
80753
80754 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
80755 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
80756 index 5cd75e2..f57ef39 100644
80757 --- a/net/ipv4/fib_frontend.c
80758 +++ b/net/ipv4/fib_frontend.c
80759 @@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
80760 #ifdef CONFIG_IP_ROUTE_MULTIPATH
80761 fib_sync_up(dev);
80762 #endif
80763 - atomic_inc(&net->ipv4.dev_addr_genid);
80764 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
80765 rt_cache_flush(dev_net(dev));
80766 break;
80767 case NETDEV_DOWN:
80768 fib_del_ifaddr(ifa, NULL);
80769 - atomic_inc(&net->ipv4.dev_addr_genid);
80770 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
80771 if (ifa->ifa_dev->ifa_list == NULL) {
80772 /* Last address was deleted from this interface.
80773 * Disable IP.
80774 @@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
80775 #ifdef CONFIG_IP_ROUTE_MULTIPATH
80776 fib_sync_up(dev);
80777 #endif
80778 - atomic_inc(&net->ipv4.dev_addr_genid);
80779 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
80780 rt_cache_flush(net);
80781 break;
80782 case NETDEV_DOWN:
80783 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
80784 index 4797a80..2bd54e9 100644
80785 --- a/net/ipv4/fib_semantics.c
80786 +++ b/net/ipv4/fib_semantics.c
80787 @@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
80788 nh->nh_saddr = inet_select_addr(nh->nh_dev,
80789 nh->nh_gw,
80790 nh->nh_parent->fib_scope);
80791 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
80792 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
80793
80794 return nh->nh_saddr;
80795 }
80796 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
80797 index fa3ae81..0dbe6b8 100644
80798 --- a/net/ipv4/inet_hashtables.c
80799 +++ b/net/ipv4/inet_hashtables.c
80800 @@ -18,12 +18,15 @@
80801 #include <linux/sched.h>
80802 #include <linux/slab.h>
80803 #include <linux/wait.h>
80804 +#include <linux/security.h>
80805
80806 #include <net/inet_connection_sock.h>
80807 #include <net/inet_hashtables.h>
80808 #include <net/secure_seq.h>
80809 #include <net/ip.h>
80810
80811 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
80812 +
80813 /*
80814 * Allocate and initialize a new local port bind bucket.
80815 * The bindhash mutex for snum's hash chain must be held here.
80816 @@ -540,6 +543,8 @@ ok:
80817 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
80818 spin_unlock(&head->lock);
80819
80820 + gr_update_task_in_ip_table(current, inet_sk(sk));
80821 +
80822 if (tw) {
80823 inet_twsk_deschedule(tw, death_row);
80824 while (twrefcnt) {
80825 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
80826 index 000e3d2..5472da3 100644
80827 --- a/net/ipv4/inetpeer.c
80828 +++ b/net/ipv4/inetpeer.c
80829 @@ -503,8 +503,8 @@ relookup:
80830 if (p) {
80831 p->daddr = *daddr;
80832 atomic_set(&p->refcnt, 1);
80833 - atomic_set(&p->rid, 0);
80834 - atomic_set(&p->ip_id_count,
80835 + atomic_set_unchecked(&p->rid, 0);
80836 + atomic_set_unchecked(&p->ip_id_count,
80837 (daddr->family == AF_INET) ?
80838 secure_ip_id(daddr->addr.a4) :
80839 secure_ipv6_id(daddr->addr.a6));
80840 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
80841 index eb9d63a..50babc1 100644
80842 --- a/net/ipv4/ip_fragment.c
80843 +++ b/net/ipv4/ip_fragment.c
80844 @@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
80845 return 0;
80846
80847 start = qp->rid;
80848 - end = atomic_inc_return(&peer->rid);
80849 + end = atomic_inc_return_unchecked(&peer->rid);
80850 qp->rid = end;
80851
80852 rc = qp->q.fragments && (end - start) > max;
80853 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
80854 index d9c4f11..02b82db 100644
80855 --- a/net/ipv4/ip_sockglue.c
80856 +++ b/net/ipv4/ip_sockglue.c
80857 @@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80858 len = min_t(unsigned int, len, opt->optlen);
80859 if (put_user(len, optlen))
80860 return -EFAULT;
80861 - if (copy_to_user(optval, opt->__data, len))
80862 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
80863 + copy_to_user(optval, opt->__data, len))
80864 return -EFAULT;
80865 return 0;
80866 }
80867 @@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80868 if (sk->sk_type != SOCK_STREAM)
80869 return -ENOPROTOOPT;
80870
80871 - msg.msg_control = optval;
80872 + msg.msg_control = (void __force_kernel *)optval;
80873 msg.msg_controllen = len;
80874 msg.msg_flags = flags;
80875
80876 diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
80877 index 9a46dae..5f793a0 100644
80878 --- a/net/ipv4/ipcomp.c
80879 +++ b/net/ipv4/ipcomp.c
80880 @@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
80881 return;
80882
80883 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
80884 - atomic_inc(&flow_cache_genid);
80885 + atomic_inc_unchecked(&flow_cache_genid);
80886 rt_genid_bump(net);
80887
80888 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
80889 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
80890 index a2e50ae..e152b7c 100644
80891 --- a/net/ipv4/ipconfig.c
80892 +++ b/net/ipv4/ipconfig.c
80893 @@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
80894
80895 mm_segment_t oldfs = get_fs();
80896 set_fs(get_ds());
80897 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80898 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80899 set_fs(oldfs);
80900 return res;
80901 }
80902 @@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
80903
80904 mm_segment_t oldfs = get_fs();
80905 set_fs(get_ds());
80906 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80907 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80908 set_fs(oldfs);
80909 return res;
80910 }
80911 @@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
80912
80913 mm_segment_t oldfs = get_fs();
80914 set_fs(get_ds());
80915 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
80916 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
80917 set_fs(oldfs);
80918 return res;
80919 }
80920 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
80921 index 3ea4127..849297b 100644
80922 --- a/net/ipv4/netfilter/arp_tables.c
80923 +++ b/net/ipv4/netfilter/arp_tables.c
80924 @@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
80925 #endif
80926
80927 static int get_info(struct net *net, void __user *user,
80928 - const int *len, int compat)
80929 + int len, int compat)
80930 {
80931 char name[XT_TABLE_MAXNAMELEN];
80932 struct xt_table *t;
80933 int ret;
80934
80935 - if (*len != sizeof(struct arpt_getinfo)) {
80936 - duprintf("length %u != %Zu\n", *len,
80937 + if (len != sizeof(struct arpt_getinfo)) {
80938 + duprintf("length %u != %Zu\n", len,
80939 sizeof(struct arpt_getinfo));
80940 return -EINVAL;
80941 }
80942 @@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
80943 info.size = private->size;
80944 strcpy(info.name, name);
80945
80946 - if (copy_to_user(user, &info, *len) != 0)
80947 + if (copy_to_user(user, &info, len) != 0)
80948 ret = -EFAULT;
80949 else
80950 ret = 0;
80951 @@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
80952
80953 switch (cmd) {
80954 case ARPT_SO_GET_INFO:
80955 - ret = get_info(sock_net(sk), user, len, 1);
80956 + ret = get_info(sock_net(sk), user, *len, 1);
80957 break;
80958 case ARPT_SO_GET_ENTRIES:
80959 ret = compat_get_entries(sock_net(sk), user, len);
80960 @@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
80961
80962 switch (cmd) {
80963 case ARPT_SO_GET_INFO:
80964 - ret = get_info(sock_net(sk), user, len, 0);
80965 + ret = get_info(sock_net(sk), user, *len, 0);
80966 break;
80967
80968 case ARPT_SO_GET_ENTRIES:
80969 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
80970 index 17c5e06..1b91206 100644
80971 --- a/net/ipv4/netfilter/ip_tables.c
80972 +++ b/net/ipv4/netfilter/ip_tables.c
80973 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
80974 #endif
80975
80976 static int get_info(struct net *net, void __user *user,
80977 - const int *len, int compat)
80978 + int len, int compat)
80979 {
80980 char name[XT_TABLE_MAXNAMELEN];
80981 struct xt_table *t;
80982 int ret;
80983
80984 - if (*len != sizeof(struct ipt_getinfo)) {
80985 - duprintf("length %u != %zu\n", *len,
80986 + if (len != sizeof(struct ipt_getinfo)) {
80987 + duprintf("length %u != %zu\n", len,
80988 sizeof(struct ipt_getinfo));
80989 return -EINVAL;
80990 }
80991 @@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
80992 info.size = private->size;
80993 strcpy(info.name, name);
80994
80995 - if (copy_to_user(user, &info, *len) != 0)
80996 + if (copy_to_user(user, &info, len) != 0)
80997 ret = -EFAULT;
80998 else
80999 ret = 0;
81000 @@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81001
81002 switch (cmd) {
81003 case IPT_SO_GET_INFO:
81004 - ret = get_info(sock_net(sk), user, len, 1);
81005 + ret = get_info(sock_net(sk), user, *len, 1);
81006 break;
81007 case IPT_SO_GET_ENTRIES:
81008 ret = compat_get_entries(sock_net(sk), user, len);
81009 @@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81010
81011 switch (cmd) {
81012 case IPT_SO_GET_INFO:
81013 - ret = get_info(sock_net(sk), user, len, 0);
81014 + ret = get_info(sock_net(sk), user, *len, 0);
81015 break;
81016
81017 case IPT_SO_GET_ENTRIES:
81018 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
81019 index dc454cc..5bb917f 100644
81020 --- a/net/ipv4/ping.c
81021 +++ b/net/ipv4/ping.c
81022 @@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
81023 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
81024 0, sock_i_ino(sp),
81025 atomic_read(&sp->sk_refcnt), sp,
81026 - atomic_read(&sp->sk_drops), len);
81027 + atomic_read_unchecked(&sp->sk_drops), len);
81028 }
81029
81030 static int ping_seq_show(struct seq_file *seq, void *v)
81031 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81032 index 6f08991..55867ad 100644
81033 --- a/net/ipv4/raw.c
81034 +++ b/net/ipv4/raw.c
81035 @@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81036 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81037 {
81038 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81039 - atomic_inc(&sk->sk_drops);
81040 + atomic_inc_unchecked(&sk->sk_drops);
81041 kfree_skb(skb);
81042 return NET_RX_DROP;
81043 }
81044 @@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
81045
81046 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81047 {
81048 + struct icmp_filter filter;
81049 +
81050 if (optlen > sizeof(struct icmp_filter))
81051 optlen = sizeof(struct icmp_filter);
81052 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81053 + if (copy_from_user(&filter, optval, optlen))
81054 return -EFAULT;
81055 + raw_sk(sk)->filter = filter;
81056 return 0;
81057 }
81058
81059 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81060 {
81061 int len, ret = -EFAULT;
81062 + struct icmp_filter filter;
81063
81064 if (get_user(len, optlen))
81065 goto out;
81066 @@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81067 if (len > sizeof(struct icmp_filter))
81068 len = sizeof(struct icmp_filter);
81069 ret = -EFAULT;
81070 - if (put_user(len, optlen) ||
81071 - copy_to_user(optval, &raw_sk(sk)->filter, len))
81072 + filter = raw_sk(sk)->filter;
81073 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
81074 goto out;
81075 ret = 0;
81076 out: return ret;
81077 @@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81078 0, 0L, 0,
81079 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
81080 0, sock_i_ino(sp),
81081 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81082 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
81083 }
81084
81085 static int raw_seq_show(struct seq_file *seq, void *v)
81086 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81087 index a0fcc47..5949bba1 100644
81088 --- a/net/ipv4/route.c
81089 +++ b/net/ipv4/route.c
81090 @@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81091
81092 static __net_init int rt_genid_init(struct net *net)
81093 {
81094 - atomic_set(&net->rt_genid, 0);
81095 + atomic_set_unchecked(&net->rt_genid, 0);
81096 get_random_bytes(&net->ipv4.dev_addr_genid,
81097 sizeof(net->ipv4.dev_addr_genid));
81098 return 0;
81099 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
81100 index ad70a96..50cb55b 100644
81101 --- a/net/ipv4/tcp_input.c
81102 +++ b/net/ipv4/tcp_input.c
81103 @@ -4733,7 +4733,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
81104 * simplifies code)
81105 */
81106 static void
81107 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
81108 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
81109 struct sk_buff *head, struct sk_buff *tail,
81110 u32 start, u32 end)
81111 {
81112 @@ -5850,6 +5850,7 @@ discard:
81113 tcp_paws_reject(&tp->rx_opt, 0))
81114 goto discard_and_undo;
81115
81116 +#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
81117 if (th->syn) {
81118 /* We see SYN without ACK. It is attempt of
81119 * simultaneous connect with crossed SYNs.
81120 @@ -5900,6 +5901,7 @@ discard:
81121 goto discard;
81122 #endif
81123 }
81124 +#endif
81125 /* "fifth, if neither of the SYN or RST bits is set then
81126 * drop the segment and return."
81127 */
81128 @@ -5944,7 +5946,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
81129 goto discard;
81130
81131 if (th->syn) {
81132 - if (th->fin)
81133 + if (th->fin || th->urg || th->psh)
81134 goto discard;
81135 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
81136 return 1;
81137 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81138 index eadb693..e8f7251 100644
81139 --- a/net/ipv4/tcp_ipv4.c
81140 +++ b/net/ipv4/tcp_ipv4.c
81141 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
81142 EXPORT_SYMBOL(sysctl_tcp_low_latency);
81143
81144
81145 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81146 +extern int grsec_enable_blackhole;
81147 +#endif
81148 +
81149 #ifdef CONFIG_TCP_MD5SIG
81150 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
81151 __be32 daddr, __be32 saddr, const struct tcphdr *th);
81152 @@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81153 return 0;
81154
81155 reset:
81156 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81157 + if (!grsec_enable_blackhole)
81158 +#endif
81159 tcp_v4_send_reset(rsk, skb);
81160 discard:
81161 kfree_skb(skb);
81162 @@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
81163 TCP_SKB_CB(skb)->sacked = 0;
81164
81165 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81166 - if (!sk)
81167 + if (!sk) {
81168 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81169 + ret = 1;
81170 +#endif
81171 goto no_tcp_socket;
81172 -
81173 + }
81174 process:
81175 - if (sk->sk_state == TCP_TIME_WAIT)
81176 + if (sk->sk_state == TCP_TIME_WAIT) {
81177 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81178 + ret = 2;
81179 +#endif
81180 goto do_time_wait;
81181 + }
81182
81183 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
81184 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
81185 @@ -2050,6 +2064,10 @@ no_tcp_socket:
81186 bad_packet:
81187 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81188 } else {
81189 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81190 + if (!grsec_enable_blackhole || (ret == 1 &&
81191 + (skb->dev->flags & IFF_LOOPBACK)))
81192 +#endif
81193 tcp_v4_send_reset(NULL, skb);
81194 }
81195
81196 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
81197 index f35f2df..ccb5ca6 100644
81198 --- a/net/ipv4/tcp_minisocks.c
81199 +++ b/net/ipv4/tcp_minisocks.c
81200 @@ -27,6 +27,10 @@
81201 #include <net/inet_common.h>
81202 #include <net/xfrm.h>
81203
81204 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81205 +extern int grsec_enable_blackhole;
81206 +#endif
81207 +
81208 int sysctl_tcp_syncookies __read_mostly = 1;
81209 EXPORT_SYMBOL(sysctl_tcp_syncookies);
81210
81211 @@ -742,7 +746,10 @@ embryonic_reset:
81212 * avoid becoming vulnerable to outside attack aiming at
81213 * resetting legit local connections.
81214 */
81215 - req->rsk_ops->send_reset(sk, skb);
81216 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81217 + if (!grsec_enable_blackhole)
81218 +#endif
81219 + req->rsk_ops->send_reset(sk, skb);
81220 } else if (fastopen) { /* received a valid RST pkt */
81221 reqsk_fastopen_remove(sk, req, true);
81222 tcp_reset(sk);
81223 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
81224 index 4526fe6..1a34e43 100644
81225 --- a/net/ipv4/tcp_probe.c
81226 +++ b/net/ipv4/tcp_probe.c
81227 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
81228 if (cnt + width >= len)
81229 break;
81230
81231 - if (copy_to_user(buf + cnt, tbuf, width))
81232 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
81233 return -EFAULT;
81234 cnt += width;
81235 }
81236 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
81237 index b78aac3..e18230b 100644
81238 --- a/net/ipv4/tcp_timer.c
81239 +++ b/net/ipv4/tcp_timer.c
81240 @@ -22,6 +22,10 @@
81241 #include <linux/gfp.h>
81242 #include <net/tcp.h>
81243
81244 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81245 +extern int grsec_lastack_retries;
81246 +#endif
81247 +
81248 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
81249 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
81250 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
81251 @@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
81252 }
81253 }
81254
81255 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81256 + if ((sk->sk_state == TCP_LAST_ACK) &&
81257 + (grsec_lastack_retries > 0) &&
81258 + (grsec_lastack_retries < retry_until))
81259 + retry_until = grsec_lastack_retries;
81260 +#endif
81261 +
81262 if (retransmits_timed_out(sk, retry_until,
81263 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
81264 /* Has it gone just too far? */
81265 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
81266 index 1f4d405..3524677 100644
81267 --- a/net/ipv4/udp.c
81268 +++ b/net/ipv4/udp.c
81269 @@ -87,6 +87,7 @@
81270 #include <linux/types.h>
81271 #include <linux/fcntl.h>
81272 #include <linux/module.h>
81273 +#include <linux/security.h>
81274 #include <linux/socket.h>
81275 #include <linux/sockios.h>
81276 #include <linux/igmp.h>
81277 @@ -111,6 +112,10 @@
81278 #include <trace/events/skb.h>
81279 #include "udp_impl.h"
81280
81281 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81282 +extern int grsec_enable_blackhole;
81283 +#endif
81284 +
81285 struct udp_table udp_table __read_mostly;
81286 EXPORT_SYMBOL(udp_table);
81287
81288 @@ -569,6 +574,9 @@ found:
81289 return s;
81290 }
81291
81292 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
81293 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
81294 +
81295 /*
81296 * This routine is called by the ICMP module when it gets some
81297 * sort of error condition. If err < 0 then the socket should
81298 @@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
81299 dport = usin->sin_port;
81300 if (dport == 0)
81301 return -EINVAL;
81302 +
81303 + err = gr_search_udp_sendmsg(sk, usin);
81304 + if (err)
81305 + return err;
81306 } else {
81307 if (sk->sk_state != TCP_ESTABLISHED)
81308 return -EDESTADDRREQ;
81309 +
81310 + err = gr_search_udp_sendmsg(sk, NULL);
81311 + if (err)
81312 + return err;
81313 +
81314 daddr = inet->inet_daddr;
81315 dport = inet->inet_dport;
81316 /* Open fast path for connected socket.
81317 @@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
81318 udp_lib_checksum_complete(skb)) {
81319 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
81320 IS_UDPLITE(sk));
81321 - atomic_inc(&sk->sk_drops);
81322 + atomic_inc_unchecked(&sk->sk_drops);
81323 __skb_unlink(skb, rcvq);
81324 __skb_queue_tail(&list_kill, skb);
81325 }
81326 @@ -1194,6 +1211,10 @@ try_again:
81327 if (!skb)
81328 goto out;
81329
81330 + err = gr_search_udp_recvmsg(sk, skb);
81331 + if (err)
81332 + goto out_free;
81333 +
81334 ulen = skb->len - sizeof(struct udphdr);
81335 copied = len;
81336 if (copied > ulen)
81337 @@ -1227,7 +1248,7 @@ try_again:
81338 if (unlikely(err)) {
81339 trace_kfree_skb(skb, udp_recvmsg);
81340 if (!peeked) {
81341 - atomic_inc(&sk->sk_drops);
81342 + atomic_inc_unchecked(&sk->sk_drops);
81343 UDP_INC_STATS_USER(sock_net(sk),
81344 UDP_MIB_INERRORS, is_udplite);
81345 }
81346 @@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81347
81348 drop:
81349 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
81350 - atomic_inc(&sk->sk_drops);
81351 + atomic_inc_unchecked(&sk->sk_drops);
81352 kfree_skb(skb);
81353 return -1;
81354 }
81355 @@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
81356 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
81357
81358 if (!skb1) {
81359 - atomic_inc(&sk->sk_drops);
81360 + atomic_inc_unchecked(&sk->sk_drops);
81361 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81362 IS_UDPLITE(sk));
81363 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
81364 @@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81365 goto csum_error;
81366
81367 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81368 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81369 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81370 +#endif
81371 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
81372
81373 /*
81374 @@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
81375 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
81376 0, sock_i_ino(sp),
81377 atomic_read(&sp->sk_refcnt), sp,
81378 - atomic_read(&sp->sk_drops), len);
81379 + atomic_read_unchecked(&sp->sk_drops), len);
81380 }
81381
81382 int udp4_seq_show(struct seq_file *seq, void *v)
81383 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
81384 index 1b5d8cb..2e8c2d9 100644
81385 --- a/net/ipv6/addrconf.c
81386 +++ b/net/ipv6/addrconf.c
81387 @@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
81388 p.iph.ihl = 5;
81389 p.iph.protocol = IPPROTO_IPV6;
81390 p.iph.ttl = 64;
81391 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
81392 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
81393
81394 if (ops->ndo_do_ioctl) {
81395 mm_segment_t oldfs = get_fs();
81396 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
81397 index 131dd09..7647ada 100644
81398 --- a/net/ipv6/ip6_gre.c
81399 +++ b/net/ipv6/ip6_gre.c
81400 @@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
81401 }
81402
81403
81404 -static struct inet6_protocol ip6gre_protocol __read_mostly = {
81405 +static struct inet6_protocol ip6gre_protocol = {
81406 .handler = ip6gre_rcv,
81407 .err_handler = ip6gre_err,
81408 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
81409 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81410 index d1e2e8e..51c19ae 100644
81411 --- a/net/ipv6/ipv6_sockglue.c
81412 +++ b/net/ipv6/ipv6_sockglue.c
81413 @@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81414 if (sk->sk_type != SOCK_STREAM)
81415 return -ENOPROTOOPT;
81416
81417 - msg.msg_control = optval;
81418 + msg.msg_control = (void __force_kernel *)optval;
81419 msg.msg_controllen = len;
81420 msg.msg_flags = flags;
81421
81422 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81423 index 125a90d..2a11f36 100644
81424 --- a/net/ipv6/netfilter/ip6_tables.c
81425 +++ b/net/ipv6/netfilter/ip6_tables.c
81426 @@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
81427 #endif
81428
81429 static int get_info(struct net *net, void __user *user,
81430 - const int *len, int compat)
81431 + int len, int compat)
81432 {
81433 char name[XT_TABLE_MAXNAMELEN];
81434 struct xt_table *t;
81435 int ret;
81436
81437 - if (*len != sizeof(struct ip6t_getinfo)) {
81438 - duprintf("length %u != %zu\n", *len,
81439 + if (len != sizeof(struct ip6t_getinfo)) {
81440 + duprintf("length %u != %zu\n", len,
81441 sizeof(struct ip6t_getinfo));
81442 return -EINVAL;
81443 }
81444 @@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
81445 info.size = private->size;
81446 strcpy(info.name, name);
81447
81448 - if (copy_to_user(user, &info, *len) != 0)
81449 + if (copy_to_user(user, &info, len) != 0)
81450 ret = -EFAULT;
81451 else
81452 ret = 0;
81453 @@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81454
81455 switch (cmd) {
81456 case IP6T_SO_GET_INFO:
81457 - ret = get_info(sock_net(sk), user, len, 1);
81458 + ret = get_info(sock_net(sk), user, *len, 1);
81459 break;
81460 case IP6T_SO_GET_ENTRIES:
81461 ret = compat_get_entries(sock_net(sk), user, len);
81462 @@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81463
81464 switch (cmd) {
81465 case IP6T_SO_GET_INFO:
81466 - ret = get_info(sock_net(sk), user, len, 0);
81467 + ret = get_info(sock_net(sk), user, *len, 0);
81468 break;
81469
81470 case IP6T_SO_GET_ENTRIES:
81471 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81472 index 70fa814..d70c28c 100644
81473 --- a/net/ipv6/raw.c
81474 +++ b/net/ipv6/raw.c
81475 @@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
81476 {
81477 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
81478 skb_checksum_complete(skb)) {
81479 - atomic_inc(&sk->sk_drops);
81480 + atomic_inc_unchecked(&sk->sk_drops);
81481 kfree_skb(skb);
81482 return NET_RX_DROP;
81483 }
81484 @@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81485 struct raw6_sock *rp = raw6_sk(sk);
81486
81487 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
81488 - atomic_inc(&sk->sk_drops);
81489 + atomic_inc_unchecked(&sk->sk_drops);
81490 kfree_skb(skb);
81491 return NET_RX_DROP;
81492 }
81493 @@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81494
81495 if (inet->hdrincl) {
81496 if (skb_checksum_complete(skb)) {
81497 - atomic_inc(&sk->sk_drops);
81498 + atomic_inc_unchecked(&sk->sk_drops);
81499 kfree_skb(skb);
81500 return NET_RX_DROP;
81501 }
81502 @@ -604,7 +604,7 @@ out:
81503 return err;
81504 }
81505
81506 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
81507 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
81508 struct flowi6 *fl6, struct dst_entry **dstp,
81509 unsigned int flags)
81510 {
81511 @@ -916,12 +916,15 @@ do_confirm:
81512 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
81513 char __user *optval, int optlen)
81514 {
81515 + struct icmp6_filter filter;
81516 +
81517 switch (optname) {
81518 case ICMPV6_FILTER:
81519 if (optlen > sizeof(struct icmp6_filter))
81520 optlen = sizeof(struct icmp6_filter);
81521 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
81522 + if (copy_from_user(&filter, optval, optlen))
81523 return -EFAULT;
81524 + raw6_sk(sk)->filter = filter;
81525 return 0;
81526 default:
81527 return -ENOPROTOOPT;
81528 @@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81529 char __user *optval, int __user *optlen)
81530 {
81531 int len;
81532 + struct icmp6_filter filter;
81533
81534 switch (optname) {
81535 case ICMPV6_FILTER:
81536 @@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81537 len = sizeof(struct icmp6_filter);
81538 if (put_user(len, optlen))
81539 return -EFAULT;
81540 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
81541 + filter = raw6_sk(sk)->filter;
81542 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
81543 return -EFAULT;
81544 return 0;
81545 default:
81546 @@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81547 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
81548 0,
81549 sock_i_ino(sp),
81550 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81551 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
81552 }
81553
81554 static int raw6_seq_show(struct seq_file *seq, void *v)
81555 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
81556 index 4f435371..5de9da7 100644
81557 --- a/net/ipv6/tcp_ipv6.c
81558 +++ b/net/ipv6/tcp_ipv6.c
81559 @@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
81560 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
81561 }
81562
81563 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81564 +extern int grsec_enable_blackhole;
81565 +#endif
81566 +
81567 static void tcp_v6_hash(struct sock *sk)
81568 {
81569 if (sk->sk_state != TCP_CLOSE) {
81570 @@ -1433,6 +1437,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
81571 return 0;
81572
81573 reset:
81574 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81575 + if (!grsec_enable_blackhole)
81576 +#endif
81577 tcp_v6_send_reset(sk, skb);
81578 discard:
81579 if (opt_skb)
81580 @@ -1514,12 +1521,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
81581 TCP_SKB_CB(skb)->sacked = 0;
81582
81583 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81584 - if (!sk)
81585 + if (!sk) {
81586 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81587 + ret = 1;
81588 +#endif
81589 goto no_tcp_socket;
81590 + }
81591
81592 process:
81593 - if (sk->sk_state == TCP_TIME_WAIT)
81594 + if (sk->sk_state == TCP_TIME_WAIT) {
81595 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81596 + ret = 2;
81597 +#endif
81598 goto do_time_wait;
81599 + }
81600
81601 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
81602 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
81603 @@ -1568,6 +1583,10 @@ no_tcp_socket:
81604 bad_packet:
81605 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81606 } else {
81607 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81608 + if (!grsec_enable_blackhole || (ret == 1 &&
81609 + (skb->dev->flags & IFF_LOOPBACK)))
81610 +#endif
81611 tcp_v6_send_reset(NULL, skb);
81612 }
81613
81614 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
81615 index fb08329..2d6919e 100644
81616 --- a/net/ipv6/udp.c
81617 +++ b/net/ipv6/udp.c
81618 @@ -51,6 +51,10 @@
81619 #include <trace/events/skb.h>
81620 #include "udp_impl.h"
81621
81622 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81623 +extern int grsec_enable_blackhole;
81624 +#endif
81625 +
81626 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81627 {
81628 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
81629 @@ -395,7 +399,7 @@ try_again:
81630 if (unlikely(err)) {
81631 trace_kfree_skb(skb, udpv6_recvmsg);
81632 if (!peeked) {
81633 - atomic_inc(&sk->sk_drops);
81634 + atomic_inc_unchecked(&sk->sk_drops);
81635 if (is_udp4)
81636 UDP_INC_STATS_USER(sock_net(sk),
81637 UDP_MIB_INERRORS,
81638 @@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81639 return rc;
81640 drop:
81641 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
81642 - atomic_inc(&sk->sk_drops);
81643 + atomic_inc_unchecked(&sk->sk_drops);
81644 kfree_skb(skb);
81645 return -1;
81646 }
81647 @@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
81648 if (likely(skb1 == NULL))
81649 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
81650 if (!skb1) {
81651 - atomic_inc(&sk->sk_drops);
81652 + atomic_inc_unchecked(&sk->sk_drops);
81653 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81654 IS_UDPLITE(sk));
81655 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
81656 @@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81657 goto discard;
81658
81659 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81660 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81661 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81662 +#endif
81663 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
81664
81665 kfree_skb(skb);
81666 @@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
81667 0,
81668 sock_i_ino(sp),
81669 atomic_read(&sp->sk_refcnt), sp,
81670 - atomic_read(&sp->sk_drops));
81671 + atomic_read_unchecked(&sp->sk_drops));
81672 }
81673
81674 int udp6_seq_show(struct seq_file *seq, void *v)
81675 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
81676 index a68c88c..d55b0c5 100644
81677 --- a/net/irda/ircomm/ircomm_tty.c
81678 +++ b/net/irda/ircomm/ircomm_tty.c
81679 @@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81680 add_wait_queue(&port->open_wait, &wait);
81681
81682 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
81683 - __FILE__, __LINE__, tty->driver->name, port->count);
81684 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
81685
81686 spin_lock_irqsave(&port->lock, flags);
81687 if (!tty_hung_up_p(filp)) {
81688 extra_count = 1;
81689 - port->count--;
81690 + atomic_dec(&port->count);
81691 }
81692 spin_unlock_irqrestore(&port->lock, flags);
81693 port->blocked_open++;
81694 @@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81695 }
81696
81697 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
81698 - __FILE__, __LINE__, tty->driver->name, port->count);
81699 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
81700
81701 schedule();
81702 }
81703 @@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81704 if (extra_count) {
81705 /* ++ is not atomic, so this should be protected - Jean II */
81706 spin_lock_irqsave(&port->lock, flags);
81707 - port->count++;
81708 + atomic_inc(&port->count);
81709 spin_unlock_irqrestore(&port->lock, flags);
81710 }
81711 port->blocked_open--;
81712
81713 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
81714 - __FILE__, __LINE__, tty->driver->name, port->count);
81715 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
81716
81717 if (!retval)
81718 port->flags |= ASYNC_NORMAL_ACTIVE;
81719 @@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
81720
81721 /* ++ is not atomic, so this should be protected - Jean II */
81722 spin_lock_irqsave(&self->port.lock, flags);
81723 - self->port.count++;
81724 + atomic_inc(&self->port.count);
81725 spin_unlock_irqrestore(&self->port.lock, flags);
81726 tty_port_tty_set(&self->port, tty);
81727
81728 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
81729 - self->line, self->port.count);
81730 + self->line, atomic_read(&self->port.count));
81731
81732 /* Not really used by us, but lets do it anyway */
81733 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
81734 @@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
81735 tty_kref_put(port->tty);
81736 }
81737 port->tty = NULL;
81738 - port->count = 0;
81739 + atomic_set(&port->count, 0);
81740 spin_unlock_irqrestore(&port->lock, flags);
81741
81742 wake_up_interruptible(&port->open_wait);
81743 @@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
81744 seq_putc(m, '\n');
81745
81746 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
81747 - seq_printf(m, "Open count: %d\n", self->port.count);
81748 + seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
81749 seq_printf(m, "Max data size: %d\n", self->max_data_size);
81750 seq_printf(m, "Max header size: %d\n", self->max_header_size);
81751
81752 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
81753 index cd6f7a9..e63fe89 100644
81754 --- a/net/iucv/af_iucv.c
81755 +++ b/net/iucv/af_iucv.c
81756 @@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
81757
81758 write_lock_bh(&iucv_sk_list.lock);
81759
81760 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
81761 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81762 while (__iucv_get_sock_by_name(name)) {
81763 sprintf(name, "%08x",
81764 - atomic_inc_return(&iucv_sk_list.autobind_name));
81765 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81766 }
81767
81768 write_unlock_bh(&iucv_sk_list.lock);
81769 diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
81770 index df08250..02021fe 100644
81771 --- a/net/iucv/iucv.c
81772 +++ b/net/iucv/iucv.c
81773 @@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
81774 return NOTIFY_OK;
81775 }
81776
81777 -static struct notifier_block __refdata iucv_cpu_notifier = {
81778 +static struct notifier_block iucv_cpu_notifier = {
81779 .notifier_call = iucv_cpu_notify,
81780 };
81781
81782 diff --git a/net/key/af_key.c b/net/key/af_key.c
81783 index 5b426a6..970032b 100644
81784 --- a/net/key/af_key.c
81785 +++ b/net/key/af_key.c
81786 @@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
81787 static u32 get_acqseq(void)
81788 {
81789 u32 res;
81790 - static atomic_t acqseq;
81791 + static atomic_unchecked_t acqseq;
81792
81793 do {
81794 - res = atomic_inc_return(&acqseq);
81795 + res = atomic_inc_return_unchecked(&acqseq);
81796 } while (!res);
81797 return res;
81798 }
81799 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
81800 index 0479c64..d031db6 100644
81801 --- a/net/mac80211/cfg.c
81802 +++ b/net/mac80211/cfg.c
81803 @@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
81804 ret = ieee80211_vif_use_channel(sdata, chandef,
81805 IEEE80211_CHANCTX_EXCLUSIVE);
81806 }
81807 - } else if (local->open_count == local->monitors) {
81808 + } else if (local_read(&local->open_count) == local->monitors) {
81809 local->_oper_channel = chandef->chan;
81810 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
81811 ieee80211_hw_config(local, 0);
81812 @@ -2716,7 +2716,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
81813 else
81814 local->probe_req_reg--;
81815
81816 - if (!local->open_count)
81817 + if (!local_read(&local->open_count))
81818 break;
81819
81820 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
81821 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
81822 index 2ed065c..948177f 100644
81823 --- a/net/mac80211/ieee80211_i.h
81824 +++ b/net/mac80211/ieee80211_i.h
81825 @@ -28,6 +28,7 @@
81826 #include <net/ieee80211_radiotap.h>
81827 #include <net/cfg80211.h>
81828 #include <net/mac80211.h>
81829 +#include <asm/local.h>
81830 #include "key.h"
81831 #include "sta_info.h"
81832 #include "debug.h"
81833 @@ -909,7 +910,7 @@ struct ieee80211_local {
81834 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
81835 spinlock_t queue_stop_reason_lock;
81836
81837 - int open_count;
81838 + local_t open_count;
81839 int monitors, cooked_mntrs;
81840 /* number of interfaces with corresponding FIF_ flags */
81841 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
81842 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
81843 index 8be854e..ad72a69 100644
81844 --- a/net/mac80211/iface.c
81845 +++ b/net/mac80211/iface.c
81846 @@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
81847 break;
81848 }
81849
81850 - if (local->open_count == 0) {
81851 + if (local_read(&local->open_count) == 0) {
81852 res = drv_start(local);
81853 if (res)
81854 goto err_del_bss;
81855 @@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
81856 break;
81857 }
81858
81859 - if (local->monitors == 0 && local->open_count == 0) {
81860 + if (local->monitors == 0 && local_read(&local->open_count) == 0) {
81861 res = ieee80211_add_virtual_monitor(local);
81862 if (res)
81863 goto err_stop;
81864 @@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
81865 mutex_unlock(&local->mtx);
81866
81867 if (coming_up)
81868 - local->open_count++;
81869 + local_inc(&local->open_count);
81870
81871 if (hw_reconf_flags)
81872 ieee80211_hw_config(local, hw_reconf_flags);
81873 @@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
81874 err_del_interface:
81875 drv_remove_interface(local, sdata);
81876 err_stop:
81877 - if (!local->open_count)
81878 + if (!local_read(&local->open_count))
81879 drv_stop(local);
81880 err_del_bss:
81881 sdata->bss = NULL;
81882 @@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
81883 }
81884
81885 if (going_down)
81886 - local->open_count--;
81887 + local_dec(&local->open_count);
81888
81889 switch (sdata->vif.type) {
81890 case NL80211_IFTYPE_AP_VLAN:
81891 @@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
81892
81893 ieee80211_recalc_ps(local, -1);
81894
81895 - if (local->open_count == 0) {
81896 + if (local_read(&local->open_count) == 0) {
81897 if (local->ops->napi_poll)
81898 napi_disable(&local->napi);
81899 ieee80211_clear_tx_pending(local);
81900 @@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
81901 }
81902 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
81903
81904 - if (local->monitors == local->open_count && local->monitors > 0)
81905 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
81906 ieee80211_add_virtual_monitor(local);
81907 }
81908
81909 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
81910 index 1b087ff..bf600e9 100644
81911 --- a/net/mac80211/main.c
81912 +++ b/net/mac80211/main.c
81913 @@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
81914 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
81915 IEEE80211_CONF_CHANGE_POWER);
81916
81917 - if (changed && local->open_count) {
81918 + if (changed && local_read(&local->open_count)) {
81919 ret = drv_config(local, changed);
81920 /*
81921 * Goal:
81922 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
81923 index 79a48f3..5e185c9 100644
81924 --- a/net/mac80211/pm.c
81925 +++ b/net/mac80211/pm.c
81926 @@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
81927 struct sta_info *sta;
81928 struct ieee80211_chanctx *ctx;
81929
81930 - if (!local->open_count)
81931 + if (!local_read(&local->open_count))
81932 goto suspend;
81933
81934 ieee80211_scan_cancel(local);
81935 @@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
81936 cancel_work_sync(&local->dynamic_ps_enable_work);
81937 del_timer_sync(&local->dynamic_ps_timer);
81938
81939 - local->wowlan = wowlan && local->open_count;
81940 + local->wowlan = wowlan && local_read(&local->open_count);
81941 if (local->wowlan) {
81942 int err = drv_suspend(local, wowlan);
81943 if (err < 0) {
81944 @@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
81945 mutex_unlock(&local->chanctx_mtx);
81946
81947 /* stop hardware - this must stop RX */
81948 - if (local->open_count)
81949 + if (local_read(&local->open_count))
81950 ieee80211_stop_device(local);
81951
81952 suspend:
81953 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
81954 index dd88381..eef4dd6 100644
81955 --- a/net/mac80211/rate.c
81956 +++ b/net/mac80211/rate.c
81957 @@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
81958
81959 ASSERT_RTNL();
81960
81961 - if (local->open_count)
81962 + if (local_read(&local->open_count))
81963 return -EBUSY;
81964
81965 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
81966 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
81967 index c97a065..ff61928 100644
81968 --- a/net/mac80211/rc80211_pid_debugfs.c
81969 +++ b/net/mac80211/rc80211_pid_debugfs.c
81970 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
81971
81972 spin_unlock_irqrestore(&events->lock, status);
81973
81974 - if (copy_to_user(buf, pb, p))
81975 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
81976 return -EFAULT;
81977
81978 return p;
81979 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
81980 index f11e8c5..08d0013 100644
81981 --- a/net/mac80211/util.c
81982 +++ b/net/mac80211/util.c
81983 @@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
81984 }
81985 #endif
81986 /* everything else happens only if HW was up & running */
81987 - if (!local->open_count)
81988 + if (!local_read(&local->open_count))
81989 goto wake_up;
81990
81991 /*
81992 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
81993 index 49e96df..63a51c3 100644
81994 --- a/net/netfilter/Kconfig
81995 +++ b/net/netfilter/Kconfig
81996 @@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
81997
81998 To compile it as a module, choose M here. If unsure, say N.
81999
82000 +config NETFILTER_XT_MATCH_GRADM
82001 + tristate '"gradm" match support'
82002 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
82003 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
82004 + ---help---
82005 + The gradm match allows to match on grsecurity RBAC being enabled.
82006 + It is useful when iptables rules are applied early on bootup to
82007 + prevent connections to the machine (except from a trusted host)
82008 + while the RBAC system is disabled.
82009 +
82010 config NETFILTER_XT_MATCH_HASHLIMIT
82011 tristate '"hashlimit" match support'
82012 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
82013 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
82014 index 3259697..54d5393 100644
82015 --- a/net/netfilter/Makefile
82016 +++ b/net/netfilter/Makefile
82017 @@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
82018 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
82019 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
82020 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
82021 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
82022 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
82023 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
82024 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
82025 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
82026 index 30e764a..c3b6a9d 100644
82027 --- a/net/netfilter/ipvs/ip_vs_conn.c
82028 +++ b/net/netfilter/ipvs/ip_vs_conn.c
82029 @@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
82030 /* Increase the refcnt counter of the dest */
82031 atomic_inc(&dest->refcnt);
82032
82033 - conn_flags = atomic_read(&dest->conn_flags);
82034 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
82035 if (cp->protocol != IPPROTO_UDP)
82036 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
82037 flags = cp->flags;
82038 @@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
82039 atomic_set(&cp->refcnt, 1);
82040
82041 atomic_set(&cp->n_control, 0);
82042 - atomic_set(&cp->in_pkts, 0);
82043 + atomic_set_unchecked(&cp->in_pkts, 0);
82044
82045 atomic_inc(&ipvs->conn_count);
82046 if (flags & IP_VS_CONN_F_NO_CPORT)
82047 @@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
82048
82049 /* Don't drop the entry if its number of incoming packets is not
82050 located in [0, 8] */
82051 - i = atomic_read(&cp->in_pkts);
82052 + i = atomic_read_unchecked(&cp->in_pkts);
82053 if (i > 8 || i < 0) return 0;
82054
82055 if (!todrop_rate[i]) return 0;
82056 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
82057 index 47edf5a..235b07d 100644
82058 --- a/net/netfilter/ipvs/ip_vs_core.c
82059 +++ b/net/netfilter/ipvs/ip_vs_core.c
82060 @@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
82061 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
82062 /* do not touch skb anymore */
82063
82064 - atomic_inc(&cp->in_pkts);
82065 + atomic_inc_unchecked(&cp->in_pkts);
82066 ip_vs_conn_put(cp);
82067 return ret;
82068 }
82069 @@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
82070 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
82071 pkts = sysctl_sync_threshold(ipvs);
82072 else
82073 - pkts = atomic_add_return(1, &cp->in_pkts);
82074 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82075
82076 if (ipvs->sync_state & IP_VS_STATE_MASTER)
82077 ip_vs_sync_conn(net, cp, pkts);
82078 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
82079 index ec664cb..cd576ab 100644
82080 --- a/net/netfilter/ipvs/ip_vs_ctl.c
82081 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
82082 @@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
82083 ip_vs_rs_hash(ipvs, dest);
82084 write_unlock_bh(&ipvs->rs_lock);
82085 }
82086 - atomic_set(&dest->conn_flags, conn_flags);
82087 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
82088
82089 /* bind the service */
82090 if (!dest->svc) {
82091 @@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82092 " %-7s %-6d %-10d %-10d\n",
82093 &dest->addr.in6,
82094 ntohs(dest->port),
82095 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82096 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82097 atomic_read(&dest->weight),
82098 atomic_read(&dest->activeconns),
82099 atomic_read(&dest->inactconns));
82100 @@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82101 "%-7s %-6d %-10d %-10d\n",
82102 ntohl(dest->addr.ip),
82103 ntohs(dest->port),
82104 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82105 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82106 atomic_read(&dest->weight),
82107 atomic_read(&dest->activeconns),
82108 atomic_read(&dest->inactconns));
82109 @@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
82110
82111 entry.addr = dest->addr.ip;
82112 entry.port = dest->port;
82113 - entry.conn_flags = atomic_read(&dest->conn_flags);
82114 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
82115 entry.weight = atomic_read(&dest->weight);
82116 entry.u_threshold = dest->u_threshold;
82117 entry.l_threshold = dest->l_threshold;
82118 @@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
82119 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
82120 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
82121 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
82122 - (atomic_read(&dest->conn_flags) &
82123 + (atomic_read_unchecked(&dest->conn_flags) &
82124 IP_VS_CONN_F_FWD_MASK)) ||
82125 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
82126 atomic_read(&dest->weight)) ||
82127 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
82128 index 44fd10c..2a163b3 100644
82129 --- a/net/netfilter/ipvs/ip_vs_sync.c
82130 +++ b/net/netfilter/ipvs/ip_vs_sync.c
82131 @@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
82132 cp = cp->control;
82133 if (cp) {
82134 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
82135 - pkts = atomic_add_return(1, &cp->in_pkts);
82136 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82137 else
82138 pkts = sysctl_sync_threshold(ipvs);
82139 ip_vs_sync_conn(net, cp->control, pkts);
82140 @@ -758,7 +758,7 @@ control:
82141 if (!cp)
82142 return;
82143 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
82144 - pkts = atomic_add_return(1, &cp->in_pkts);
82145 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82146 else
82147 pkts = sysctl_sync_threshold(ipvs);
82148 goto sloop;
82149 @@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
82150
82151 if (opt)
82152 memcpy(&cp->in_seq, opt, sizeof(*opt));
82153 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
82154 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
82155 cp->state = state;
82156 cp->old_state = cp->state;
82157 /*
82158 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82159 index ee6b7a9..f9a89f6 100644
82160 --- a/net/netfilter/ipvs/ip_vs_xmit.c
82161 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
82162 @@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82163 else
82164 rc = NF_ACCEPT;
82165 /* do not touch skb anymore */
82166 - atomic_inc(&cp->in_pkts);
82167 + atomic_inc_unchecked(&cp->in_pkts);
82168 goto out;
82169 }
82170
82171 @@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82172 else
82173 rc = NF_ACCEPT;
82174 /* do not touch skb anymore */
82175 - atomic_inc(&cp->in_pkts);
82176 + atomic_inc_unchecked(&cp->in_pkts);
82177 goto out;
82178 }
82179
82180 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
82181 index e4a0c4f..c263f28 100644
82182 --- a/net/netfilter/nf_conntrack_core.c
82183 +++ b/net/netfilter/nf_conntrack_core.c
82184 @@ -1529,6 +1529,10 @@ err_extend:
82185 #define DYING_NULLS_VAL ((1<<30)+1)
82186 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
82187
82188 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82189 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
82190 +#endif
82191 +
82192 static int nf_conntrack_init_net(struct net *net)
82193 {
82194 int ret;
82195 @@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
82196 goto err_stat;
82197 }
82198
82199 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82200 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
82201 +#else
82202 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
82203 +#endif
82204 if (!net->ct.slabname) {
82205 ret = -ENOMEM;
82206 goto err_slabname;
82207 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82208 index 92fd8ec..3f6ea4b 100644
82209 --- a/net/netfilter/nfnetlink_log.c
82210 +++ b/net/netfilter/nfnetlink_log.c
82211 @@ -72,7 +72,7 @@ struct nfulnl_instance {
82212 };
82213
82214 static DEFINE_SPINLOCK(instances_lock);
82215 -static atomic_t global_seq;
82216 +static atomic_unchecked_t global_seq;
82217
82218 #define INSTANCE_BUCKETS 16
82219 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82220 @@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82221 /* global sequence number */
82222 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
82223 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
82224 - htonl(atomic_inc_return(&global_seq))))
82225 + htonl(atomic_inc_return_unchecked(&global_seq))))
82226 goto nla_put_failure;
82227
82228 if (data_len) {
82229 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82230 new file mode 100644
82231 index 0000000..c566332
82232 --- /dev/null
82233 +++ b/net/netfilter/xt_gradm.c
82234 @@ -0,0 +1,51 @@
82235 +/*
82236 + * gradm match for netfilter
82237 + * Copyright © Zbigniew Krzystolik, 2010
82238 + *
82239 + * This program is free software; you can redistribute it and/or modify
82240 + * it under the terms of the GNU General Public License; either version
82241 + * 2 or 3 as published by the Free Software Foundation.
82242 + */
82243 +#include <linux/module.h>
82244 +#include <linux/moduleparam.h>
82245 +#include <linux/skbuff.h>
82246 +#include <linux/netfilter/x_tables.h>
82247 +#include <linux/grsecurity.h>
82248 +#include <linux/netfilter/xt_gradm.h>
82249 +
82250 +static bool
82251 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
82252 +{
82253 + const struct xt_gradm_mtinfo *info = par->matchinfo;
82254 + bool retval = false;
82255 + if (gr_acl_is_enabled())
82256 + retval = true;
82257 + return retval ^ info->invflags;
82258 +}
82259 +
82260 +static struct xt_match gradm_mt_reg __read_mostly = {
82261 + .name = "gradm",
82262 + .revision = 0,
82263 + .family = NFPROTO_UNSPEC,
82264 + .match = gradm_mt,
82265 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
82266 + .me = THIS_MODULE,
82267 +};
82268 +
82269 +static int __init gradm_mt_init(void)
82270 +{
82271 + return xt_register_match(&gradm_mt_reg);
82272 +}
82273 +
82274 +static void __exit gradm_mt_exit(void)
82275 +{
82276 + xt_unregister_match(&gradm_mt_reg);
82277 +}
82278 +
82279 +module_init(gradm_mt_init);
82280 +module_exit(gradm_mt_exit);
82281 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
82282 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
82283 +MODULE_LICENSE("GPL");
82284 +MODULE_ALIAS("ipt_gradm");
82285 +MODULE_ALIAS("ip6t_gradm");
82286 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
82287 index 4fe4fb4..87a89e5 100644
82288 --- a/net/netfilter/xt_statistic.c
82289 +++ b/net/netfilter/xt_statistic.c
82290 @@ -19,7 +19,7 @@
82291 #include <linux/module.h>
82292
82293 struct xt_statistic_priv {
82294 - atomic_t count;
82295 + atomic_unchecked_t count;
82296 } ____cacheline_aligned_in_smp;
82297
82298 MODULE_LICENSE("GPL");
82299 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
82300 break;
82301 case XT_STATISTIC_MODE_NTH:
82302 do {
82303 - oval = atomic_read(&info->master->count);
82304 + oval = atomic_read_unchecked(&info->master->count);
82305 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
82306 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
82307 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
82308 if (nval == 0)
82309 ret = !ret;
82310 break;
82311 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
82312 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
82313 if (info->master == NULL)
82314 return -ENOMEM;
82315 - atomic_set(&info->master->count, info->u.nth.count);
82316 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
82317
82318 return 0;
82319 }
82320 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
82321 index c0353d5..fcb0270 100644
82322 --- a/net/netlink/af_netlink.c
82323 +++ b/net/netlink/af_netlink.c
82324 @@ -785,7 +785,7 @@ static void netlink_overrun(struct sock *sk)
82325 sk->sk_error_report(sk);
82326 }
82327 }
82328 - atomic_inc(&sk->sk_drops);
82329 + atomic_inc_unchecked(&sk->sk_drops);
82330 }
82331
82332 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
82333 @@ -2071,7 +2071,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
82334 sk_wmem_alloc_get(s),
82335 nlk->cb,
82336 atomic_read(&s->sk_refcnt),
82337 - atomic_read(&s->sk_drops),
82338 + atomic_read_unchecked(&s->sk_drops),
82339 sock_i_ino(s)
82340 );
82341
82342 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
82343 index 7261eb8..44e8ac6 100644
82344 --- a/net/netrom/af_netrom.c
82345 +++ b/net/netrom/af_netrom.c
82346 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
82347 struct sock *sk = sock->sk;
82348 struct nr_sock *nr = nr_sk(sk);
82349
82350 + memset(sax, 0, sizeof(*sax));
82351 lock_sock(sk);
82352 if (peer != 0) {
82353 if (sk->sk_state != TCP_ESTABLISHED) {
82354 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
82355 *uaddr_len = sizeof(struct full_sockaddr_ax25);
82356 } else {
82357 sax->fsa_ax25.sax25_family = AF_NETROM;
82358 - sax->fsa_ax25.sax25_ndigis = 0;
82359 sax->fsa_ax25.sax25_call = nr->source_addr;
82360 *uaddr_len = sizeof(struct sockaddr_ax25);
82361 }
82362 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
82363 index c111bd0..7788ff7 100644
82364 --- a/net/packet/af_packet.c
82365 +++ b/net/packet/af_packet.c
82366 @@ -1578,7 +1578,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
82367
82368 spin_lock(&sk->sk_receive_queue.lock);
82369 po->stats.tp_packets++;
82370 - skb->dropcount = atomic_read(&sk->sk_drops);
82371 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
82372 __skb_queue_tail(&sk->sk_receive_queue, skb);
82373 spin_unlock(&sk->sk_receive_queue.lock);
82374 sk->sk_data_ready(sk, skb->len);
82375 @@ -1587,7 +1587,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
82376 drop_n_acct:
82377 spin_lock(&sk->sk_receive_queue.lock);
82378 po->stats.tp_drops++;
82379 - atomic_inc(&sk->sk_drops);
82380 + atomic_inc_unchecked(&sk->sk_drops);
82381 spin_unlock(&sk->sk_receive_queue.lock);
82382
82383 drop_n_restore:
82384 @@ -2565,6 +2565,7 @@ out:
82385
82386 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
82387 {
82388 + struct sock_extended_err ee;
82389 struct sock_exterr_skb *serr;
82390 struct sk_buff *skb, *skb2;
82391 int copied, err;
82392 @@ -2586,8 +2587,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
82393 sock_recv_timestamp(msg, sk, skb);
82394
82395 serr = SKB_EXT_ERR(skb);
82396 + ee = serr->ee;
82397 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
82398 - sizeof(serr->ee), &serr->ee);
82399 + sizeof ee, &ee);
82400
82401 msg->msg_flags |= MSG_ERRQUEUE;
82402 err = copied;
82403 @@ -3212,7 +3214,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
82404 case PACKET_HDRLEN:
82405 if (len > sizeof(int))
82406 len = sizeof(int);
82407 - if (copy_from_user(&val, optval, len))
82408 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
82409 return -EFAULT;
82410 switch (val) {
82411 case TPACKET_V1:
82412 @@ -3254,7 +3256,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
82413 len = lv;
82414 if (put_user(len, optlen))
82415 return -EFAULT;
82416 - if (copy_to_user(optval, data, len))
82417 + if (len > sizeof(st) || copy_to_user(optval, data, len))
82418 return -EFAULT;
82419 return 0;
82420 }
82421 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
82422 index 5a940db..f0b9c12 100644
82423 --- a/net/phonet/af_phonet.c
82424 +++ b/net/phonet/af_phonet.c
82425 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
82426 {
82427 struct phonet_protocol *pp;
82428
82429 - if (protocol >= PHONET_NPROTO)
82430 + if (protocol < 0 || protocol >= PHONET_NPROTO)
82431 return NULL;
82432
82433 rcu_read_lock();
82434 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
82435 {
82436 int err = 0;
82437
82438 - if (protocol >= PHONET_NPROTO)
82439 + if (protocol < 0 || protocol >= PHONET_NPROTO)
82440 return -EINVAL;
82441
82442 err = proto_register(pp->prot, 1);
82443 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
82444 index 576f22c..bc7a71b 100644
82445 --- a/net/phonet/pep.c
82446 +++ b/net/phonet/pep.c
82447 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
82448
82449 case PNS_PEP_CTRL_REQ:
82450 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
82451 - atomic_inc(&sk->sk_drops);
82452 + atomic_inc_unchecked(&sk->sk_drops);
82453 break;
82454 }
82455 __skb_pull(skb, 4);
82456 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
82457 }
82458
82459 if (pn->rx_credits == 0) {
82460 - atomic_inc(&sk->sk_drops);
82461 + atomic_inc_unchecked(&sk->sk_drops);
82462 err = -ENOBUFS;
82463 break;
82464 }
82465 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
82466 }
82467
82468 if (pn->rx_credits == 0) {
82469 - atomic_inc(&sk->sk_drops);
82470 + atomic_inc_unchecked(&sk->sk_drops);
82471 err = NET_RX_DROP;
82472 break;
82473 }
82474 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
82475 index b7e9827..c264c85 100644
82476 --- a/net/phonet/socket.c
82477 +++ b/net/phonet/socket.c
82478 @@ -615,7 +615,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
82479 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
82480 sock_i_ino(sk),
82481 atomic_read(&sk->sk_refcnt), sk,
82482 - atomic_read(&sk->sk_drops), &len);
82483 + atomic_read_unchecked(&sk->sk_drops), &len);
82484 }
82485 seq_printf(seq, "%*s\n", 127 - len, "");
82486 return 0;
82487 diff --git a/net/rds/cong.c b/net/rds/cong.c
82488 index e5b65ac..f3b6fb7 100644
82489 --- a/net/rds/cong.c
82490 +++ b/net/rds/cong.c
82491 @@ -78,7 +78,7 @@
82492 * finds that the saved generation number is smaller than the global generation
82493 * number, it wakes up the process.
82494 */
82495 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
82496 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
82497
82498 /*
82499 * Congestion monitoring
82500 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
82501 rdsdebug("waking map %p for %pI4\n",
82502 map, &map->m_addr);
82503 rds_stats_inc(s_cong_update_received);
82504 - atomic_inc(&rds_cong_generation);
82505 + atomic_inc_unchecked(&rds_cong_generation);
82506 if (waitqueue_active(&map->m_waitq))
82507 wake_up(&map->m_waitq);
82508 if (waitqueue_active(&rds_poll_waitq))
82509 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
82510
82511 int rds_cong_updated_since(unsigned long *recent)
82512 {
82513 - unsigned long gen = atomic_read(&rds_cong_generation);
82514 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
82515
82516 if (likely(*recent == gen))
82517 return 0;
82518 diff --git a/net/rds/ib.h b/net/rds/ib.h
82519 index 7280ab8..e04f4ea 100644
82520 --- a/net/rds/ib.h
82521 +++ b/net/rds/ib.h
82522 @@ -128,7 +128,7 @@ struct rds_ib_connection {
82523 /* sending acks */
82524 unsigned long i_ack_flags;
82525 #ifdef KERNEL_HAS_ATOMIC64
82526 - atomic64_t i_ack_next; /* next ACK to send */
82527 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
82528 #else
82529 spinlock_t i_ack_lock; /* protect i_ack_next */
82530 u64 i_ack_next; /* next ACK to send */
82531 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
82532 index 31b74f5..dc1fbfa 100644
82533 --- a/net/rds/ib_cm.c
82534 +++ b/net/rds/ib_cm.c
82535 @@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
82536 /* Clear the ACK state */
82537 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
82538 #ifdef KERNEL_HAS_ATOMIC64
82539 - atomic64_set(&ic->i_ack_next, 0);
82540 + atomic64_set_unchecked(&ic->i_ack_next, 0);
82541 #else
82542 ic->i_ack_next = 0;
82543 #endif
82544 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
82545 index 8eb9501..0c386ff 100644
82546 --- a/net/rds/ib_recv.c
82547 +++ b/net/rds/ib_recv.c
82548 @@ -597,7 +597,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
82549 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
82550 int ack_required)
82551 {
82552 - atomic64_set(&ic->i_ack_next, seq);
82553 + atomic64_set_unchecked(&ic->i_ack_next, seq);
82554 if (ack_required) {
82555 smp_mb__before_clear_bit();
82556 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
82557 @@ -609,7 +609,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
82558 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
82559 smp_mb__after_clear_bit();
82560
82561 - return atomic64_read(&ic->i_ack_next);
82562 + return atomic64_read_unchecked(&ic->i_ack_next);
82563 }
82564 #endif
82565
82566 diff --git a/net/rds/iw.h b/net/rds/iw.h
82567 index 04ce3b1..48119a6 100644
82568 --- a/net/rds/iw.h
82569 +++ b/net/rds/iw.h
82570 @@ -134,7 +134,7 @@ struct rds_iw_connection {
82571 /* sending acks */
82572 unsigned long i_ack_flags;
82573 #ifdef KERNEL_HAS_ATOMIC64
82574 - atomic64_t i_ack_next; /* next ACK to send */
82575 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
82576 #else
82577 spinlock_t i_ack_lock; /* protect i_ack_next */
82578 u64 i_ack_next; /* next ACK to send */
82579 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
82580 index a91e1db..cf3053f 100644
82581 --- a/net/rds/iw_cm.c
82582 +++ b/net/rds/iw_cm.c
82583 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
82584 /* Clear the ACK state */
82585 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
82586 #ifdef KERNEL_HAS_ATOMIC64
82587 - atomic64_set(&ic->i_ack_next, 0);
82588 + atomic64_set_unchecked(&ic->i_ack_next, 0);
82589 #else
82590 ic->i_ack_next = 0;
82591 #endif
82592 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
82593 index 4503335..db566b4 100644
82594 --- a/net/rds/iw_recv.c
82595 +++ b/net/rds/iw_recv.c
82596 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
82597 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
82598 int ack_required)
82599 {
82600 - atomic64_set(&ic->i_ack_next, seq);
82601 + atomic64_set_unchecked(&ic->i_ack_next, seq);
82602 if (ack_required) {
82603 smp_mb__before_clear_bit();
82604 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
82605 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
82606 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
82607 smp_mb__after_clear_bit();
82608
82609 - return atomic64_read(&ic->i_ack_next);
82610 + return atomic64_read_unchecked(&ic->i_ack_next);
82611 }
82612 #endif
82613
82614 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
82615 index edac9ef..16bcb98 100644
82616 --- a/net/rds/tcp.c
82617 +++ b/net/rds/tcp.c
82618 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
82619 int val = 1;
82620
82621 set_fs(KERNEL_DS);
82622 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
82623 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
82624 sizeof(val));
82625 set_fs(oldfs);
82626 }
82627 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
82628 index 81cf5a4..b5826ff 100644
82629 --- a/net/rds/tcp_send.c
82630 +++ b/net/rds/tcp_send.c
82631 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
82632
82633 oldfs = get_fs();
82634 set_fs(KERNEL_DS);
82635 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
82636 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
82637 sizeof(val));
82638 set_fs(oldfs);
82639 }
82640 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
82641 index 05996d0..5a1dfe0 100644
82642 --- a/net/rxrpc/af_rxrpc.c
82643 +++ b/net/rxrpc/af_rxrpc.c
82644 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
82645 __be32 rxrpc_epoch;
82646
82647 /* current debugging ID */
82648 -atomic_t rxrpc_debug_id;
82649 +atomic_unchecked_t rxrpc_debug_id;
82650
82651 /* count of skbs currently in use */
82652 atomic_t rxrpc_n_skbs;
82653 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
82654 index e4d9cbc..b229649 100644
82655 --- a/net/rxrpc/ar-ack.c
82656 +++ b/net/rxrpc/ar-ack.c
82657 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
82658
82659 _enter("{%d,%d,%d,%d},",
82660 call->acks_hard, call->acks_unacked,
82661 - atomic_read(&call->sequence),
82662 + atomic_read_unchecked(&call->sequence),
82663 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
82664
82665 stop = 0;
82666 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
82667
82668 /* each Tx packet has a new serial number */
82669 sp->hdr.serial =
82670 - htonl(atomic_inc_return(&call->conn->serial));
82671 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
82672
82673 hdr = (struct rxrpc_header *) txb->head;
82674 hdr->serial = sp->hdr.serial;
82675 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
82676 */
82677 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
82678 {
82679 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
82680 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
82681 }
82682
82683 /*
82684 @@ -629,7 +629,7 @@ process_further:
82685
82686 latest = ntohl(sp->hdr.serial);
82687 hard = ntohl(ack.firstPacket);
82688 - tx = atomic_read(&call->sequence);
82689 + tx = atomic_read_unchecked(&call->sequence);
82690
82691 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
82692 latest,
82693 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
82694 goto maybe_reschedule;
82695
82696 send_ACK_with_skew:
82697 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
82698 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
82699 ntohl(ack.serial));
82700 send_ACK:
82701 mtu = call->conn->trans->peer->if_mtu;
82702 @@ -1173,7 +1173,7 @@ send_ACK:
82703 ackinfo.rxMTU = htonl(5692);
82704 ackinfo.jumbo_max = htonl(4);
82705
82706 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
82707 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
82708 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
82709 ntohl(hdr.serial),
82710 ntohs(ack.maxSkew),
82711 @@ -1191,7 +1191,7 @@ send_ACK:
82712 send_message:
82713 _debug("send message");
82714
82715 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
82716 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
82717 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
82718 send_message_2:
82719
82720 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
82721 index a3bbb36..3341fb9 100644
82722 --- a/net/rxrpc/ar-call.c
82723 +++ b/net/rxrpc/ar-call.c
82724 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
82725 spin_lock_init(&call->lock);
82726 rwlock_init(&call->state_lock);
82727 atomic_set(&call->usage, 1);
82728 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
82729 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82730 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
82731
82732 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
82733 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
82734 index 4106ca9..a338d7a 100644
82735 --- a/net/rxrpc/ar-connection.c
82736 +++ b/net/rxrpc/ar-connection.c
82737 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
82738 rwlock_init(&conn->lock);
82739 spin_lock_init(&conn->state_lock);
82740 atomic_set(&conn->usage, 1);
82741 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
82742 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82743 conn->avail_calls = RXRPC_MAXCALLS;
82744 conn->size_align = 4;
82745 conn->header_size = sizeof(struct rxrpc_header);
82746 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
82747 index e7ed43a..6afa140 100644
82748 --- a/net/rxrpc/ar-connevent.c
82749 +++ b/net/rxrpc/ar-connevent.c
82750 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
82751
82752 len = iov[0].iov_len + iov[1].iov_len;
82753
82754 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
82755 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82756 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
82757
82758 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
82759 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
82760 index 529572f..c758ca7 100644
82761 --- a/net/rxrpc/ar-input.c
82762 +++ b/net/rxrpc/ar-input.c
82763 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
82764 /* track the latest serial number on this connection for ACK packet
82765 * information */
82766 serial = ntohl(sp->hdr.serial);
82767 - hi_serial = atomic_read(&call->conn->hi_serial);
82768 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
82769 while (serial > hi_serial)
82770 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
82771 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
82772 serial);
82773
82774 /* request ACK generation for any ACK or DATA packet that requests
82775 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
82776 index a693aca..81e7293 100644
82777 --- a/net/rxrpc/ar-internal.h
82778 +++ b/net/rxrpc/ar-internal.h
82779 @@ -272,8 +272,8 @@ struct rxrpc_connection {
82780 int error; /* error code for local abort */
82781 int debug_id; /* debug ID for printks */
82782 unsigned int call_counter; /* call ID counter */
82783 - atomic_t serial; /* packet serial number counter */
82784 - atomic_t hi_serial; /* highest serial number received */
82785 + atomic_unchecked_t serial; /* packet serial number counter */
82786 + atomic_unchecked_t hi_serial; /* highest serial number received */
82787 u8 avail_calls; /* number of calls available */
82788 u8 size_align; /* data size alignment (for security) */
82789 u8 header_size; /* rxrpc + security header size */
82790 @@ -346,7 +346,7 @@ struct rxrpc_call {
82791 spinlock_t lock;
82792 rwlock_t state_lock; /* lock for state transition */
82793 atomic_t usage;
82794 - atomic_t sequence; /* Tx data packet sequence counter */
82795 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
82796 u32 abort_code; /* local/remote abort code */
82797 enum { /* current state of call */
82798 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
82799 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
82800 */
82801 extern atomic_t rxrpc_n_skbs;
82802 extern __be32 rxrpc_epoch;
82803 -extern atomic_t rxrpc_debug_id;
82804 +extern atomic_unchecked_t rxrpc_debug_id;
82805 extern struct workqueue_struct *rxrpc_workqueue;
82806
82807 /*
82808 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
82809 index 87f7135..74d3703 100644
82810 --- a/net/rxrpc/ar-local.c
82811 +++ b/net/rxrpc/ar-local.c
82812 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
82813 spin_lock_init(&local->lock);
82814 rwlock_init(&local->services_lock);
82815 atomic_set(&local->usage, 1);
82816 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
82817 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82818 memcpy(&local->srx, srx, sizeof(*srx));
82819 }
82820
82821 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
82822 index e1ac183..b43e10e 100644
82823 --- a/net/rxrpc/ar-output.c
82824 +++ b/net/rxrpc/ar-output.c
82825 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
82826 sp->hdr.cid = call->cid;
82827 sp->hdr.callNumber = call->call_id;
82828 sp->hdr.seq =
82829 - htonl(atomic_inc_return(&call->sequence));
82830 + htonl(atomic_inc_return_unchecked(&call->sequence));
82831 sp->hdr.serial =
82832 - htonl(atomic_inc_return(&conn->serial));
82833 + htonl(atomic_inc_return_unchecked(&conn->serial));
82834 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
82835 sp->hdr.userStatus = 0;
82836 sp->hdr.securityIndex = conn->security_ix;
82837 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
82838 index bebaa43..2644591 100644
82839 --- a/net/rxrpc/ar-peer.c
82840 +++ b/net/rxrpc/ar-peer.c
82841 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
82842 INIT_LIST_HEAD(&peer->error_targets);
82843 spin_lock_init(&peer->lock);
82844 atomic_set(&peer->usage, 1);
82845 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
82846 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82847 memcpy(&peer->srx, srx, sizeof(*srx));
82848
82849 rxrpc_assess_MTU_size(peer);
82850 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
82851 index 38047f7..9f48511 100644
82852 --- a/net/rxrpc/ar-proc.c
82853 +++ b/net/rxrpc/ar-proc.c
82854 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
82855 atomic_read(&conn->usage),
82856 rxrpc_conn_states[conn->state],
82857 key_serial(conn->key),
82858 - atomic_read(&conn->serial),
82859 - atomic_read(&conn->hi_serial));
82860 + atomic_read_unchecked(&conn->serial),
82861 + atomic_read_unchecked(&conn->hi_serial));
82862
82863 return 0;
82864 }
82865 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
82866 index 92df566..87ec1bf 100644
82867 --- a/net/rxrpc/ar-transport.c
82868 +++ b/net/rxrpc/ar-transport.c
82869 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
82870 spin_lock_init(&trans->client_lock);
82871 rwlock_init(&trans->conn_lock);
82872 atomic_set(&trans->usage, 1);
82873 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
82874 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82875
82876 if (peer->srx.transport.family == AF_INET) {
82877 switch (peer->srx.transport_type) {
82878 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
82879 index f226709..0e735a8 100644
82880 --- a/net/rxrpc/rxkad.c
82881 +++ b/net/rxrpc/rxkad.c
82882 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
82883
82884 len = iov[0].iov_len + iov[1].iov_len;
82885
82886 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
82887 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82888 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
82889
82890 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
82891 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
82892
82893 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
82894
82895 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
82896 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82897 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
82898
82899 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
82900 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
82901 index 391a245..8f6a898 100644
82902 --- a/net/sctp/ipv6.c
82903 +++ b/net/sctp/ipv6.c
82904 @@ -1038,7 +1038,7 @@ void sctp_v6_pf_init(void)
82905
82906 void sctp_v6_pf_exit(void)
82907 {
82908 - list_del(&sctp_af_inet6.list);
82909 + pax_list_del((struct list_head *)&sctp_af_inet6.list);
82910 }
82911
82912 /* Initialize IPv6 support and register with socket layer. */
82913 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
82914 index 8c19e97..16264b8 100644
82915 --- a/net/sctp/proc.c
82916 +++ b/net/sctp/proc.c
82917 @@ -338,7 +338,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
82918 seq_printf(seq,
82919 "%8pK %8pK %-3d %-3d %-2d %-4d "
82920 "%4d %8d %8d %7d %5lu %-5d %5d ",
82921 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
82922 + assoc, sk,
82923 + sctp_sk(sk)->type, sk->sk_state,
82924 assoc->state, hash,
82925 assoc->assoc_id,
82926 assoc->sndbuf_used,
82927 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
82928 index f898b1c..60bf8f2 100644
82929 --- a/net/sctp/protocol.c
82930 +++ b/net/sctp/protocol.c
82931 @@ -834,8 +834,10 @@ int sctp_register_af(struct sctp_af *af)
82932 return 0;
82933 }
82934
82935 - INIT_LIST_HEAD(&af->list);
82936 - list_add_tail(&af->list, &sctp_address_families);
82937 + pax_open_kernel();
82938 + INIT_LIST_HEAD((struct list_head *)&af->list);
82939 + pax_close_kernel();
82940 + pax_list_add_tail((struct list_head *)&af->list, &sctp_address_families);
82941 return 1;
82942 }
82943
82944 @@ -1122,7 +1124,7 @@ static void sctp_v4_pf_init(void)
82945
82946 static void sctp_v4_pf_exit(void)
82947 {
82948 - list_del(&sctp_af_inet.list);
82949 + pax_list_del((struct list_head *)&sctp_af_inet.list);
82950 }
82951
82952 static int sctp_v4_protosw_init(void)
82953 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
82954 index cedd9bf..b1fddeb 100644
82955 --- a/net/sctp/socket.c
82956 +++ b/net/sctp/socket.c
82957 @@ -4665,6 +4665,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
82958 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
82959 if (space_left < addrlen)
82960 return -ENOMEM;
82961 + if (addrlen > sizeof(temp) || addrlen < 0)
82962 + return -EFAULT;
82963 if (copy_to_user(to, &temp, addrlen))
82964 return -EFAULT;
82965 to += addrlen;
82966 diff --git a/net/socket.c b/net/socket.c
82967 index 2ca51c7..45d0b31 100644
82968 --- a/net/socket.c
82969 +++ b/net/socket.c
82970 @@ -89,6 +89,7 @@
82971 #include <linux/magic.h>
82972 #include <linux/slab.h>
82973 #include <linux/xattr.h>
82974 +#include <linux/in.h>
82975
82976 #include <asm/uaccess.h>
82977 #include <asm/unistd.h>
82978 @@ -106,6 +107,8 @@
82979 #include <linux/sockios.h>
82980 #include <linux/atalk.h>
82981
82982 +#include <linux/grsock.h>
82983 +
82984 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
82985 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
82986 unsigned long nr_segs, loff_t pos);
82987 @@ -322,7 +325,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
82988 &sockfs_dentry_operations, SOCKFS_MAGIC);
82989 }
82990
82991 -static struct vfsmount *sock_mnt __read_mostly;
82992 +struct vfsmount *sock_mnt __read_mostly;
82993
82994 static struct file_system_type sock_fs_type = {
82995 .name = "sockfs",
82996 @@ -1270,6 +1273,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
82997 return -EAFNOSUPPORT;
82998 if (type < 0 || type >= SOCK_MAX)
82999 return -EINVAL;
83000 + if (protocol < 0)
83001 + return -EINVAL;
83002
83003 /* Compatibility.
83004
83005 @@ -1401,6 +1406,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
83006 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
83007 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
83008
83009 + if(!gr_search_socket(family, type, protocol)) {
83010 + retval = -EACCES;
83011 + goto out;
83012 + }
83013 +
83014 + if (gr_handle_sock_all(family, type, protocol)) {
83015 + retval = -EACCES;
83016 + goto out;
83017 + }
83018 +
83019 retval = sock_create(family, type, protocol, &sock);
83020 if (retval < 0)
83021 goto out;
83022 @@ -1528,6 +1543,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
83023 if (sock) {
83024 err = move_addr_to_kernel(umyaddr, addrlen, &address);
83025 if (err >= 0) {
83026 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
83027 + err = -EACCES;
83028 + goto error;
83029 + }
83030 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
83031 + if (err)
83032 + goto error;
83033 +
83034 err = security_socket_bind(sock,
83035 (struct sockaddr *)&address,
83036 addrlen);
83037 @@ -1536,6 +1559,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
83038 (struct sockaddr *)
83039 &address, addrlen);
83040 }
83041 +error:
83042 fput_light(sock->file, fput_needed);
83043 }
83044 return err;
83045 @@ -1559,10 +1583,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
83046 if ((unsigned int)backlog > somaxconn)
83047 backlog = somaxconn;
83048
83049 + if (gr_handle_sock_server_other(sock->sk)) {
83050 + err = -EPERM;
83051 + goto error;
83052 + }
83053 +
83054 + err = gr_search_listen(sock);
83055 + if (err)
83056 + goto error;
83057 +
83058 err = security_socket_listen(sock, backlog);
83059 if (!err)
83060 err = sock->ops->listen(sock, backlog);
83061
83062 +error:
83063 fput_light(sock->file, fput_needed);
83064 }
83065 return err;
83066 @@ -1606,6 +1640,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
83067 newsock->type = sock->type;
83068 newsock->ops = sock->ops;
83069
83070 + if (gr_handle_sock_server_other(sock->sk)) {
83071 + err = -EPERM;
83072 + sock_release(newsock);
83073 + goto out_put;
83074 + }
83075 +
83076 + err = gr_search_accept(sock);
83077 + if (err) {
83078 + sock_release(newsock);
83079 + goto out_put;
83080 + }
83081 +
83082 /*
83083 * We don't need try_module_get here, as the listening socket (sock)
83084 * has the protocol module (sock->ops->owner) held.
83085 @@ -1651,6 +1697,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
83086 fd_install(newfd, newfile);
83087 err = newfd;
83088
83089 + gr_attach_curr_ip(newsock->sk);
83090 +
83091 out_put:
83092 fput_light(sock->file, fput_needed);
83093 out:
83094 @@ -1683,6 +1731,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
83095 int, addrlen)
83096 {
83097 struct socket *sock;
83098 + struct sockaddr *sck;
83099 struct sockaddr_storage address;
83100 int err, fput_needed;
83101
83102 @@ -1693,6 +1742,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
83103 if (err < 0)
83104 goto out_put;
83105
83106 + sck = (struct sockaddr *)&address;
83107 +
83108 + if (gr_handle_sock_client(sck)) {
83109 + err = -EACCES;
83110 + goto out_put;
83111 + }
83112 +
83113 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
83114 + if (err)
83115 + goto out_put;
83116 +
83117 err =
83118 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
83119 if (err)
83120 @@ -2047,7 +2107,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
83121 * checking falls down on this.
83122 */
83123 if (copy_from_user(ctl_buf,
83124 - (void __user __force *)msg_sys->msg_control,
83125 + (void __force_user *)msg_sys->msg_control,
83126 ctl_len))
83127 goto out_freectl;
83128 msg_sys->msg_control = ctl_buf;
83129 @@ -2215,7 +2275,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
83130 * kernel msghdr to use the kernel address space)
83131 */
83132
83133 - uaddr = (__force void __user *)msg_sys->msg_name;
83134 + uaddr = (void __force_user *)msg_sys->msg_name;
83135 uaddr_len = COMPAT_NAMELEN(msg);
83136 if (MSG_CMSG_COMPAT & flags) {
83137 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
83138 @@ -2838,7 +2898,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
83139 }
83140
83141 ifr = compat_alloc_user_space(buf_size);
83142 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
83143 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
83144
83145 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
83146 return -EFAULT;
83147 @@ -2862,12 +2922,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
83148 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
83149
83150 if (copy_in_user(rxnfc, compat_rxnfc,
83151 - (void *)(&rxnfc->fs.m_ext + 1) -
83152 - (void *)rxnfc) ||
83153 + (void __user *)(&rxnfc->fs.m_ext + 1) -
83154 + (void __user *)rxnfc) ||
83155 copy_in_user(&rxnfc->fs.ring_cookie,
83156 &compat_rxnfc->fs.ring_cookie,
83157 - (void *)(&rxnfc->fs.location + 1) -
83158 - (void *)&rxnfc->fs.ring_cookie) ||
83159 + (void __user *)(&rxnfc->fs.location + 1) -
83160 + (void __user *)&rxnfc->fs.ring_cookie) ||
83161 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
83162 sizeof(rxnfc->rule_cnt)))
83163 return -EFAULT;
83164 @@ -2879,12 +2939,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
83165
83166 if (convert_out) {
83167 if (copy_in_user(compat_rxnfc, rxnfc,
83168 - (const void *)(&rxnfc->fs.m_ext + 1) -
83169 - (const void *)rxnfc) ||
83170 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
83171 + (const void __user *)rxnfc) ||
83172 copy_in_user(&compat_rxnfc->fs.ring_cookie,
83173 &rxnfc->fs.ring_cookie,
83174 - (const void *)(&rxnfc->fs.location + 1) -
83175 - (const void *)&rxnfc->fs.ring_cookie) ||
83176 + (const void __user *)(&rxnfc->fs.location + 1) -
83177 + (const void __user *)&rxnfc->fs.ring_cookie) ||
83178 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
83179 sizeof(rxnfc->rule_cnt)))
83180 return -EFAULT;
83181 @@ -2954,7 +3014,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
83182 old_fs = get_fs();
83183 set_fs(KERNEL_DS);
83184 err = dev_ioctl(net, cmd,
83185 - (struct ifreq __user __force *) &kifr);
83186 + (struct ifreq __force_user *) &kifr);
83187 set_fs(old_fs);
83188
83189 return err;
83190 @@ -3063,7 +3123,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
83191
83192 old_fs = get_fs();
83193 set_fs(KERNEL_DS);
83194 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
83195 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
83196 set_fs(old_fs);
83197
83198 if (cmd == SIOCGIFMAP && !err) {
83199 @@ -3168,7 +3228,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
83200 ret |= __get_user(rtdev, &(ur4->rt_dev));
83201 if (rtdev) {
83202 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
83203 - r4.rt_dev = (char __user __force *)devname;
83204 + r4.rt_dev = (char __force_user *)devname;
83205 devname[15] = 0;
83206 } else
83207 r4.rt_dev = NULL;
83208 @@ -3394,8 +3454,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
83209 int __user *uoptlen;
83210 int err;
83211
83212 - uoptval = (char __user __force *) optval;
83213 - uoptlen = (int __user __force *) optlen;
83214 + uoptval = (char __force_user *) optval;
83215 + uoptlen = (int __force_user *) optlen;
83216
83217 set_fs(KERNEL_DS);
83218 if (level == SOL_SOCKET)
83219 @@ -3415,7 +3475,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
83220 char __user *uoptval;
83221 int err;
83222
83223 - uoptval = (char __user __force *) optval;
83224 + uoptval = (char __force_user *) optval;
83225
83226 set_fs(KERNEL_DS);
83227 if (level == SOL_SOCKET)
83228 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
83229 index fb20f25..e3ba316 100644
83230 --- a/net/sunrpc/sched.c
83231 +++ b/net/sunrpc/sched.c
83232 @@ -259,9 +259,9 @@ static int rpc_wait_bit_killable(void *word)
83233 #ifdef RPC_DEBUG
83234 static void rpc_task_set_debuginfo(struct rpc_task *task)
83235 {
83236 - static atomic_t rpc_pid;
83237 + static atomic_unchecked_t rpc_pid;
83238
83239 - task->tk_pid = atomic_inc_return(&rpc_pid);
83240 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
83241 }
83242 #else
83243 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
83244 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
83245 index 8343737..677025e 100644
83246 --- a/net/sunrpc/xprtrdma/svc_rdma.c
83247 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
83248 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
83249 static unsigned int min_max_inline = 4096;
83250 static unsigned int max_max_inline = 65536;
83251
83252 -atomic_t rdma_stat_recv;
83253 -atomic_t rdma_stat_read;
83254 -atomic_t rdma_stat_write;
83255 -atomic_t rdma_stat_sq_starve;
83256 -atomic_t rdma_stat_rq_starve;
83257 -atomic_t rdma_stat_rq_poll;
83258 -atomic_t rdma_stat_rq_prod;
83259 -atomic_t rdma_stat_sq_poll;
83260 -atomic_t rdma_stat_sq_prod;
83261 +atomic_unchecked_t rdma_stat_recv;
83262 +atomic_unchecked_t rdma_stat_read;
83263 +atomic_unchecked_t rdma_stat_write;
83264 +atomic_unchecked_t rdma_stat_sq_starve;
83265 +atomic_unchecked_t rdma_stat_rq_starve;
83266 +atomic_unchecked_t rdma_stat_rq_poll;
83267 +atomic_unchecked_t rdma_stat_rq_prod;
83268 +atomic_unchecked_t rdma_stat_sq_poll;
83269 +atomic_unchecked_t rdma_stat_sq_prod;
83270
83271 /* Temporary NFS request map and context caches */
83272 struct kmem_cache *svc_rdma_map_cachep;
83273 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
83274 len -= *ppos;
83275 if (len > *lenp)
83276 len = *lenp;
83277 - if (len && copy_to_user(buffer, str_buf, len))
83278 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
83279 return -EFAULT;
83280 *lenp = len;
83281 *ppos += len;
83282 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
83283 {
83284 .procname = "rdma_stat_read",
83285 .data = &rdma_stat_read,
83286 - .maxlen = sizeof(atomic_t),
83287 + .maxlen = sizeof(atomic_unchecked_t),
83288 .mode = 0644,
83289 .proc_handler = read_reset_stat,
83290 },
83291 {
83292 .procname = "rdma_stat_recv",
83293 .data = &rdma_stat_recv,
83294 - .maxlen = sizeof(atomic_t),
83295 + .maxlen = sizeof(atomic_unchecked_t),
83296 .mode = 0644,
83297 .proc_handler = read_reset_stat,
83298 },
83299 {
83300 .procname = "rdma_stat_write",
83301 .data = &rdma_stat_write,
83302 - .maxlen = sizeof(atomic_t),
83303 + .maxlen = sizeof(atomic_unchecked_t),
83304 .mode = 0644,
83305 .proc_handler = read_reset_stat,
83306 },
83307 {
83308 .procname = "rdma_stat_sq_starve",
83309 .data = &rdma_stat_sq_starve,
83310 - .maxlen = sizeof(atomic_t),
83311 + .maxlen = sizeof(atomic_unchecked_t),
83312 .mode = 0644,
83313 .proc_handler = read_reset_stat,
83314 },
83315 {
83316 .procname = "rdma_stat_rq_starve",
83317 .data = &rdma_stat_rq_starve,
83318 - .maxlen = sizeof(atomic_t),
83319 + .maxlen = sizeof(atomic_unchecked_t),
83320 .mode = 0644,
83321 .proc_handler = read_reset_stat,
83322 },
83323 {
83324 .procname = "rdma_stat_rq_poll",
83325 .data = &rdma_stat_rq_poll,
83326 - .maxlen = sizeof(atomic_t),
83327 + .maxlen = sizeof(atomic_unchecked_t),
83328 .mode = 0644,
83329 .proc_handler = read_reset_stat,
83330 },
83331 {
83332 .procname = "rdma_stat_rq_prod",
83333 .data = &rdma_stat_rq_prod,
83334 - .maxlen = sizeof(atomic_t),
83335 + .maxlen = sizeof(atomic_unchecked_t),
83336 .mode = 0644,
83337 .proc_handler = read_reset_stat,
83338 },
83339 {
83340 .procname = "rdma_stat_sq_poll",
83341 .data = &rdma_stat_sq_poll,
83342 - .maxlen = sizeof(atomic_t),
83343 + .maxlen = sizeof(atomic_unchecked_t),
83344 .mode = 0644,
83345 .proc_handler = read_reset_stat,
83346 },
83347 {
83348 .procname = "rdma_stat_sq_prod",
83349 .data = &rdma_stat_sq_prod,
83350 - .maxlen = sizeof(atomic_t),
83351 + .maxlen = sizeof(atomic_unchecked_t),
83352 .mode = 0644,
83353 .proc_handler = read_reset_stat,
83354 },
83355 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83356 index 0ce7552..d074459 100644
83357 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83358 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83359 @@ -501,7 +501,7 @@ next_sge:
83360 svc_rdma_put_context(ctxt, 0);
83361 goto out;
83362 }
83363 - atomic_inc(&rdma_stat_read);
83364 + atomic_inc_unchecked(&rdma_stat_read);
83365
83366 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
83367 chl_map->ch[ch_no].count -= read_wr.num_sge;
83368 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83369 dto_q);
83370 list_del_init(&ctxt->dto_q);
83371 } else {
83372 - atomic_inc(&rdma_stat_rq_starve);
83373 + atomic_inc_unchecked(&rdma_stat_rq_starve);
83374 clear_bit(XPT_DATA, &xprt->xpt_flags);
83375 ctxt = NULL;
83376 }
83377 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83378 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
83379 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
83380 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
83381 - atomic_inc(&rdma_stat_recv);
83382 + atomic_inc_unchecked(&rdma_stat_recv);
83383
83384 /* Build up the XDR from the receive buffers. */
83385 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
83386 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83387 index c1d124d..acfc59e 100644
83388 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83389 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83390 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
83391 write_wr.wr.rdma.remote_addr = to;
83392
83393 /* Post It */
83394 - atomic_inc(&rdma_stat_write);
83395 + atomic_inc_unchecked(&rdma_stat_write);
83396 if (svc_rdma_send(xprt, &write_wr))
83397 goto err;
83398 return 0;
83399 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83400 index 62e4f9b..dd3f2d7 100644
83401 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
83402 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83403 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83404 return;
83405
83406 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
83407 - atomic_inc(&rdma_stat_rq_poll);
83408 + atomic_inc_unchecked(&rdma_stat_rq_poll);
83409
83410 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
83411 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
83412 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83413 }
83414
83415 if (ctxt)
83416 - atomic_inc(&rdma_stat_rq_prod);
83417 + atomic_inc_unchecked(&rdma_stat_rq_prod);
83418
83419 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
83420 /*
83421 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83422 return;
83423
83424 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
83425 - atomic_inc(&rdma_stat_sq_poll);
83426 + atomic_inc_unchecked(&rdma_stat_sq_poll);
83427 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
83428 if (wc.status != IB_WC_SUCCESS)
83429 /* Close the transport */
83430 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83431 }
83432
83433 if (ctxt)
83434 - atomic_inc(&rdma_stat_sq_prod);
83435 + atomic_inc_unchecked(&rdma_stat_sq_prod);
83436 }
83437
83438 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
83439 @@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
83440 spin_lock_bh(&xprt->sc_lock);
83441 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
83442 spin_unlock_bh(&xprt->sc_lock);
83443 - atomic_inc(&rdma_stat_sq_starve);
83444 + atomic_inc_unchecked(&rdma_stat_sq_starve);
83445
83446 /* See if we can opportunistically reap SQ WR to make room */
83447 sq_cq_reap(xprt);
83448 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
83449 index 9bc6db0..47ac8c0 100644
83450 --- a/net/sysctl_net.c
83451 +++ b/net/sysctl_net.c
83452 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
83453 kgid_t root_gid = make_kgid(net->user_ns, 0);
83454
83455 /* Allow network administrator to have same access as root. */
83456 - if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
83457 + if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
83458 uid_eq(root_uid, current_uid())) {
83459 int mode = (table->mode >> 6) & 7;
83460 return (mode << 6) | (mode << 3) | mode;
83461 diff --git a/net/tipc/link.c b/net/tipc/link.c
83462 index daa6080..02d357f 100644
83463 --- a/net/tipc/link.c
83464 +++ b/net/tipc/link.c
83465 @@ -1201,7 +1201,7 @@ static int link_send_sections_long(struct tipc_port *sender,
83466 struct tipc_msg fragm_hdr;
83467 struct sk_buff *buf, *buf_chain, *prev;
83468 u32 fragm_crs, fragm_rest, hsz, sect_rest;
83469 - const unchar *sect_crs;
83470 + const unchar __user *sect_crs;
83471 int curr_sect;
83472 u32 fragm_no;
83473
83474 @@ -1242,7 +1242,7 @@ again:
83475
83476 if (!sect_rest) {
83477 sect_rest = msg_sect[++curr_sect].iov_len;
83478 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
83479 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
83480 }
83481
83482 if (sect_rest < fragm_rest)
83483 @@ -1261,7 +1261,7 @@ error:
83484 }
83485 } else
83486 skb_copy_to_linear_data_offset(buf, fragm_crs,
83487 - sect_crs, sz);
83488 + (const void __force_kernel *)sect_crs, sz);
83489 sect_crs += sz;
83490 sect_rest -= sz;
83491 fragm_crs += sz;
83492 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
83493 index f2db8a8..9245aa4 100644
83494 --- a/net/tipc/msg.c
83495 +++ b/net/tipc/msg.c
83496 @@ -98,7 +98,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
83497 msg_sect[cnt].iov_len);
83498 else
83499 skb_copy_to_linear_data_offset(*buf, pos,
83500 - msg_sect[cnt].iov_base,
83501 + (const void __force_kernel *)msg_sect[cnt].iov_base,
83502 msg_sect[cnt].iov_len);
83503 pos += msg_sect[cnt].iov_len;
83504 }
83505 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
83506 index 6b42d47..2ac24d5 100644
83507 --- a/net/tipc/subscr.c
83508 +++ b/net/tipc/subscr.c
83509 @@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
83510 {
83511 struct iovec msg_sect;
83512
83513 - msg_sect.iov_base = (void *)&sub->evt;
83514 + msg_sect.iov_base = (void __force_user *)&sub->evt;
83515 msg_sect.iov_len = sizeof(struct tipc_event);
83516
83517 sub->evt.event = htohl(event, sub->swap);
83518 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
83519 index 5b5c876..3127bf7 100644
83520 --- a/net/unix/af_unix.c
83521 +++ b/net/unix/af_unix.c
83522 @@ -786,6 +786,12 @@ static struct sock *unix_find_other(struct net *net,
83523 err = -ECONNREFUSED;
83524 if (!S_ISSOCK(inode->i_mode))
83525 goto put_fail;
83526 +
83527 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
83528 + err = -EACCES;
83529 + goto put_fail;
83530 + }
83531 +
83532 u = unix_find_socket_byinode(inode);
83533 if (!u)
83534 goto put_fail;
83535 @@ -806,6 +812,13 @@ static struct sock *unix_find_other(struct net *net,
83536 if (u) {
83537 struct dentry *dentry;
83538 dentry = unix_sk(u)->path.dentry;
83539 +
83540 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
83541 + err = -EPERM;
83542 + sock_put(u);
83543 + goto fail;
83544 + }
83545 +
83546 if (dentry)
83547 touch_atime(&unix_sk(u)->path);
83548 } else
83549 @@ -839,12 +852,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
83550 */
83551 err = security_path_mknod(&path, dentry, mode, 0);
83552 if (!err) {
83553 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
83554 + err = -EACCES;
83555 + goto out;
83556 + }
83557 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
83558 if (!err) {
83559 res->mnt = mntget(path.mnt);
83560 res->dentry = dget(dentry);
83561 + gr_handle_create(dentry, path.mnt);
83562 }
83563 }
83564 +out:
83565 done_path_create(&path, dentry);
83566 return err;
83567 }
83568 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
83569 index c8717c1..08539f5 100644
83570 --- a/net/wireless/wext-core.c
83571 +++ b/net/wireless/wext-core.c
83572 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
83573 */
83574
83575 /* Support for very large requests */
83576 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
83577 - (user_length > descr->max_tokens)) {
83578 + if (user_length > descr->max_tokens) {
83579 /* Allow userspace to GET more than max so
83580 * we can support any size GET requests.
83581 * There is still a limit : -ENOMEM.
83582 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
83583 }
83584 }
83585
83586 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
83587 - /*
83588 - * If this is a GET, but not NOMAX, it means that the extra
83589 - * data is not bounded by userspace, but by max_tokens. Thus
83590 - * set the length to max_tokens. This matches the extra data
83591 - * allocation.
83592 - * The driver should fill it with the number of tokens it
83593 - * provided, and it may check iwp->length rather than having
83594 - * knowledge of max_tokens. If the driver doesn't change the
83595 - * iwp->length, this ioctl just copies back max_token tokens
83596 - * filled with zeroes. Hopefully the driver isn't claiming
83597 - * them to be valid data.
83598 - */
83599 - iwp->length = descr->max_tokens;
83600 - }
83601 -
83602 err = handler(dev, info, (union iwreq_data *) iwp, extra);
83603
83604 iwp->length += essid_compat;
83605 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
83606 index 07c5857..edc6dc0 100644
83607 --- a/net/xfrm/xfrm_policy.c
83608 +++ b/net/xfrm/xfrm_policy.c
83609 @@ -317,7 +317,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
83610 {
83611 policy->walk.dead = 1;
83612
83613 - atomic_inc(&policy->genid);
83614 + atomic_inc_unchecked(&policy->genid);
83615
83616 if (del_timer(&policy->timer))
83617 xfrm_pol_put(policy);
83618 @@ -601,7 +601,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
83619 hlist_add_head(&policy->bydst, chain);
83620 xfrm_pol_hold(policy);
83621 net->xfrm.policy_count[dir]++;
83622 - atomic_inc(&flow_cache_genid);
83623 + atomic_inc_unchecked(&flow_cache_genid);
83624 rt_genid_bump(net);
83625 if (delpol)
83626 __xfrm_policy_unlink(delpol, dir);
83627 @@ -1550,7 +1550,7 @@ free_dst:
83628 goto out;
83629 }
83630
83631 -static int inline
83632 +static inline int
83633 xfrm_dst_alloc_copy(void **target, const void *src, int size)
83634 {
83635 if (!*target) {
83636 @@ -1562,7 +1562,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
83637 return 0;
83638 }
83639
83640 -static int inline
83641 +static inline int
83642 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
83643 {
83644 #ifdef CONFIG_XFRM_SUB_POLICY
83645 @@ -1574,7 +1574,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
83646 #endif
83647 }
83648
83649 -static int inline
83650 +static inline int
83651 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
83652 {
83653 #ifdef CONFIG_XFRM_SUB_POLICY
83654 @@ -1668,7 +1668,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
83655
83656 xdst->num_pols = num_pols;
83657 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
83658 - xdst->policy_genid = atomic_read(&pols[0]->genid);
83659 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
83660
83661 return xdst;
83662 }
83663 @@ -2369,7 +2369,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
83664 if (xdst->xfrm_genid != dst->xfrm->genid)
83665 return 0;
83666 if (xdst->num_pols > 0 &&
83667 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
83668 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
83669 return 0;
83670
83671 mtu = dst_mtu(dst->child);
83672 @@ -2896,7 +2896,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
83673 sizeof(pol->xfrm_vec[i].saddr));
83674 pol->xfrm_vec[i].encap_family = mp->new_family;
83675 /* flush bundles */
83676 - atomic_inc(&pol->genid);
83677 + atomic_inc_unchecked(&pol->genid);
83678 }
83679 }
83680
83681 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
83682 index 3459692..eefb515 100644
83683 --- a/net/xfrm/xfrm_state.c
83684 +++ b/net/xfrm/xfrm_state.c
83685 @@ -278,7 +278,9 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
83686 if (!try_module_get(afinfo->owner))
83687 goto out;
83688
83689 - mode->afinfo = afinfo;
83690 + pax_open_kernel();
83691 + *(void **)&mode->afinfo = afinfo;
83692 + pax_close_kernel();
83693 modemap[mode->encap] = mode;
83694 err = 0;
83695
83696 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
83697 index 0e801c3..5c8ad3b 100644
83698 --- a/scripts/Makefile.build
83699 +++ b/scripts/Makefile.build
83700 @@ -111,7 +111,7 @@ endif
83701 endif
83702
83703 # Do not include host rules unless needed
83704 -ifneq ($(hostprogs-y)$(hostprogs-m),)
83705 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
83706 include scripts/Makefile.host
83707 endif
83708
83709 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
83710 index 686cb0d..9d653bf 100644
83711 --- a/scripts/Makefile.clean
83712 +++ b/scripts/Makefile.clean
83713 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
83714 __clean-files := $(extra-y) $(always) \
83715 $(targets) $(clean-files) \
83716 $(host-progs) \
83717 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
83718 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
83719 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
83720
83721 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
83722
83723 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
83724 index 1ac414f..38575f7 100644
83725 --- a/scripts/Makefile.host
83726 +++ b/scripts/Makefile.host
83727 @@ -31,6 +31,8 @@
83728 # Note: Shared libraries consisting of C++ files are not supported
83729
83730 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
83731 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
83732 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
83733
83734 # C code
83735 # Executables compiled from a single .c file
83736 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
83737 # Shared libaries (only .c supported)
83738 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
83739 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
83740 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
83741 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
83742 # Remove .so files from "xxx-objs"
83743 host-cobjs := $(filter-out %.so,$(host-cobjs))
83744 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
83745
83746 -#Object (.o) files used by the shared libaries
83747 +# Object (.o) files used by the shared libaries
83748 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
83749 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
83750
83751 # output directory for programs/.o files
83752 # hostprogs-y := tools/build may have been specified. Retrieve directory
83753 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
83754 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
83755 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
83756 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
83757 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
83758 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
83759 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
83760 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
83761
83762 obj-dirs += $(host-objdirs)
83763 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
83764 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
83765 $(call if_changed_dep,host-cshobjs)
83766
83767 +# Compile .c file, create position independent .o file
83768 +# host-cxxshobjs -> .o
83769 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
83770 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
83771 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
83772 + $(call if_changed_dep,host-cxxshobjs)
83773 +
83774 # Link a shared library, based on position independent .o files
83775 # *.o -> .so shared library (host-cshlib)
83776 quiet_cmd_host-cshlib = HOSTLLD -shared $@
83777 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
83778 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
83779 $(call if_changed,host-cshlib)
83780
83781 +# Link a shared library, based on position independent .o files
83782 +# *.o -> .so shared library (host-cxxshlib)
83783 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
83784 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
83785 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
83786 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
83787 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
83788 + $(call if_changed,host-cxxshlib)
83789 +
83790 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
83791 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
83792 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
83793
83794 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
83795 index cb1f50c..cef2a7c 100644
83796 --- a/scripts/basic/fixdep.c
83797 +++ b/scripts/basic/fixdep.c
83798 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
83799 /*
83800 * Lookup a value in the configuration string.
83801 */
83802 -static int is_defined_config(const char *name, int len, unsigned int hash)
83803 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
83804 {
83805 struct item *aux;
83806
83807 @@ -211,10 +211,10 @@ static void clear_config(void)
83808 /*
83809 * Record the use of a CONFIG_* word.
83810 */
83811 -static void use_config(const char *m, int slen)
83812 +static void use_config(const char *m, unsigned int slen)
83813 {
83814 unsigned int hash = strhash(m, slen);
83815 - int c, i;
83816 + unsigned int c, i;
83817
83818 if (is_defined_config(m, slen, hash))
83819 return;
83820 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
83821
83822 static void parse_config_file(const char *map, size_t len)
83823 {
83824 - const int *end = (const int *) (map + len);
83825 + const unsigned int *end = (const unsigned int *) (map + len);
83826 /* start at +1, so that p can never be < map */
83827 - const int *m = (const int *) map + 1;
83828 + const unsigned int *m = (const unsigned int *) map + 1;
83829 const char *p, *q;
83830
83831 for (; m < end; m++) {
83832 @@ -406,7 +406,7 @@ static void print_deps(void)
83833 static void traps(void)
83834 {
83835 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
83836 - int *p = (int *)test;
83837 + unsigned int *p = (unsigned int *)test;
83838
83839 if (*p != INT_CONF) {
83840 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
83841 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
83842 new file mode 100644
83843 index 0000000..5e0222d
83844 --- /dev/null
83845 +++ b/scripts/gcc-plugin.sh
83846 @@ -0,0 +1,17 @@
83847 +#!/bin/bash
83848 +plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
83849 +#include "gcc-plugin.h"
83850 +#include "tree.h"
83851 +#include "tm.h"
83852 +#include "rtl.h"
83853 +#ifdef ENABLE_BUILD_WITH_CXX
83854 +#warning $2
83855 +#else
83856 +#warning $1
83857 +#endif
83858 +EOF`
83859 +if [ $? -eq 0 ]
83860 +then
83861 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
83862 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
83863 +fi
83864 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
83865 index b3d907e..a4782ab 100644
83866 --- a/scripts/link-vmlinux.sh
83867 +++ b/scripts/link-vmlinux.sh
83868 @@ -152,7 +152,7 @@ else
83869 fi;
83870
83871 # final build of init/
83872 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
83873 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
83874
83875 kallsymso=""
83876 kallsyms_vmlinux=""
83877 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
83878 index df4fc23..0ea719d 100644
83879 --- a/scripts/mod/file2alias.c
83880 +++ b/scripts/mod/file2alias.c
83881 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
83882 unsigned long size, unsigned long id_size,
83883 void *symval)
83884 {
83885 - int i;
83886 + unsigned int i;
83887
83888 if (size % id_size || size < id_size) {
83889 if (cross_build != 0)
83890 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
83891 /* USB is special because the bcdDevice can be matched against a numeric range */
83892 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
83893 static void do_usb_entry(struct usb_device_id *id,
83894 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
83895 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
83896 unsigned char range_lo, unsigned char range_hi,
83897 unsigned char max, struct module *mod)
83898 {
83899 @@ -262,7 +262,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
83900 {
83901 unsigned int devlo, devhi;
83902 unsigned char chi, clo, max;
83903 - int ndigits;
83904 + unsigned int ndigits;
83905
83906 id->match_flags = TO_NATIVE(id->match_flags);
83907 id->idVendor = TO_NATIVE(id->idVendor);
83908 @@ -507,7 +507,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
83909 for (i = 0; i < count; i++) {
83910 const char *id = (char *)devs[i].id;
83911 char acpi_id[sizeof(devs[0].id)];
83912 - int j;
83913 + unsigned int j;
83914
83915 buf_printf(&mod->dev_table_buf,
83916 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
83917 @@ -537,7 +537,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
83918
83919 for (j = 0; j < PNP_MAX_DEVICES; j++) {
83920 const char *id = (char *)card->devs[j].id;
83921 - int i2, j2;
83922 + unsigned int i2, j2;
83923 int dup = 0;
83924
83925 if (!id[0])
83926 @@ -563,7 +563,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
83927 /* add an individual alias for every device entry */
83928 if (!dup) {
83929 char acpi_id[sizeof(card->devs[0].id)];
83930 - int k;
83931 + unsigned int k;
83932
83933 buf_printf(&mod->dev_table_buf,
83934 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
83935 @@ -888,7 +888,7 @@ static void dmi_ascii_filter(char *d, const char *s)
83936 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
83937 char *alias)
83938 {
83939 - int i, j;
83940 + unsigned int i, j;
83941
83942 sprintf(alias, "dmi*");
83943
83944 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
83945 index ff36c50..7ab4fa9 100644
83946 --- a/scripts/mod/modpost.c
83947 +++ b/scripts/mod/modpost.c
83948 @@ -929,6 +929,7 @@ enum mismatch {
83949 ANY_INIT_TO_ANY_EXIT,
83950 ANY_EXIT_TO_ANY_INIT,
83951 EXPORT_TO_INIT_EXIT,
83952 + DATA_TO_TEXT
83953 };
83954
83955 struct sectioncheck {
83956 @@ -1043,6 +1044,12 @@ const struct sectioncheck sectioncheck[] = {
83957 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
83958 .mismatch = EXPORT_TO_INIT_EXIT,
83959 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
83960 +},
83961 +/* Do not reference code from writable data */
83962 +{
83963 + .fromsec = { DATA_SECTIONS, NULL },
83964 + .tosec = { TEXT_SECTIONS, NULL },
83965 + .mismatch = DATA_TO_TEXT
83966 }
83967 };
83968
83969 @@ -1165,10 +1172,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
83970 continue;
83971 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
83972 continue;
83973 - if (sym->st_value == addr)
83974 - return sym;
83975 /* Find a symbol nearby - addr are maybe negative */
83976 d = sym->st_value - addr;
83977 + if (d == 0)
83978 + return sym;
83979 if (d < 0)
83980 d = addr - sym->st_value;
83981 if (d < distance) {
83982 @@ -1447,6 +1454,14 @@ static void report_sec_mismatch(const char *modname,
83983 tosym, prl_to, prl_to, tosym);
83984 free(prl_to);
83985 break;
83986 + case DATA_TO_TEXT:
83987 +#if 0
83988 + fprintf(stderr,
83989 + "The %s %s:%s references\n"
83990 + "the %s %s:%s%s\n",
83991 + from, fromsec, fromsym, to, tosec, tosym, to_p);
83992 +#endif
83993 + break;
83994 }
83995 fprintf(stderr, "\n");
83996 }
83997 @@ -1681,7 +1696,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
83998 static void check_sec_ref(struct module *mod, const char *modname,
83999 struct elf_info *elf)
84000 {
84001 - int i;
84002 + unsigned int i;
84003 Elf_Shdr *sechdrs = elf->sechdrs;
84004
84005 /* Walk through all sections */
84006 @@ -1779,7 +1794,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
84007 va_end(ap);
84008 }
84009
84010 -void buf_write(struct buffer *buf, const char *s, int len)
84011 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
84012 {
84013 if (buf->size - buf->pos < len) {
84014 buf->size += len + SZ;
84015 @@ -1997,7 +2012,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
84016 if (fstat(fileno(file), &st) < 0)
84017 goto close_write;
84018
84019 - if (st.st_size != b->pos)
84020 + if (st.st_size != (off_t)b->pos)
84021 goto close_write;
84022
84023 tmp = NOFAIL(malloc(b->pos));
84024 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
84025 index 51207e4..f7d603d 100644
84026 --- a/scripts/mod/modpost.h
84027 +++ b/scripts/mod/modpost.h
84028 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
84029
84030 struct buffer {
84031 char *p;
84032 - int pos;
84033 - int size;
84034 + unsigned int pos;
84035 + unsigned int size;
84036 };
84037
84038 void __attribute__((format(printf, 2, 3)))
84039 buf_printf(struct buffer *buf, const char *fmt, ...);
84040
84041 void
84042 -buf_write(struct buffer *buf, const char *s, int len);
84043 +buf_write(struct buffer *buf, const char *s, unsigned int len);
84044
84045 struct module {
84046 struct module *next;
84047 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
84048 index 9dfcd6d..099068e 100644
84049 --- a/scripts/mod/sumversion.c
84050 +++ b/scripts/mod/sumversion.c
84051 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
84052 goto out;
84053 }
84054
84055 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
84056 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
84057 warn("writing sum in %s failed: %s\n",
84058 filename, strerror(errno));
84059 goto out;
84060 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
84061 index 68bb4ef..2f419e1 100644
84062 --- a/scripts/pnmtologo.c
84063 +++ b/scripts/pnmtologo.c
84064 @@ -244,14 +244,14 @@ static void write_header(void)
84065 fprintf(out, " * Linux logo %s\n", logoname);
84066 fputs(" */\n\n", out);
84067 fputs("#include <linux/linux_logo.h>\n\n", out);
84068 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
84069 + fprintf(out, "static unsigned char %s_data[] = {\n",
84070 logoname);
84071 }
84072
84073 static void write_footer(void)
84074 {
84075 fputs("\n};\n\n", out);
84076 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
84077 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
84078 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
84079 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
84080 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
84081 @@ -381,7 +381,7 @@ static void write_logo_clut224(void)
84082 fputs("\n};\n\n", out);
84083
84084 /* write logo clut */
84085 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
84086 + fprintf(out, "static unsigned char %s_clut[] = {\n",
84087 logoname);
84088 write_hex_cnt = 0;
84089 for (i = 0; i < logo_clutsize; i++) {
84090 diff --git a/security/Kconfig b/security/Kconfig
84091 index e9c6ac7..ab9590d 100644
84092 --- a/security/Kconfig
84093 +++ b/security/Kconfig
84094 @@ -4,6 +4,902 @@
84095
84096 menu "Security options"
84097
84098 +menu "Grsecurity"
84099 +
84100 + config ARCH_TRACK_EXEC_LIMIT
84101 + bool
84102 +
84103 + config PAX_KERNEXEC_PLUGIN
84104 + bool
84105 +
84106 + config PAX_PER_CPU_PGD
84107 + bool
84108 +
84109 + config TASK_SIZE_MAX_SHIFT
84110 + int
84111 + depends on X86_64
84112 + default 47 if !PAX_PER_CPU_PGD
84113 + default 42 if PAX_PER_CPU_PGD
84114 +
84115 + config PAX_ENABLE_PAE
84116 + bool
84117 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
84118 +
84119 + config PAX_USERCOPY_SLABS
84120 + bool
84121 +
84122 +config GRKERNSEC
84123 + bool "Grsecurity"
84124 + select CRYPTO
84125 + select CRYPTO_SHA256
84126 + select PROC_FS
84127 + select STOP_MACHINE
84128 + help
84129 + If you say Y here, you will be able to configure many features
84130 + that will enhance the security of your system. It is highly
84131 + recommended that you say Y here and read through the help
84132 + for each option so that you fully understand the features and
84133 + can evaluate their usefulness for your machine.
84134 +
84135 +choice
84136 + prompt "Configuration Method"
84137 + depends on GRKERNSEC
84138 + default GRKERNSEC_CONFIG_CUSTOM
84139 + help
84140 +
84141 +config GRKERNSEC_CONFIG_AUTO
84142 + bool "Automatic"
84143 + help
84144 + If you choose this configuration method, you'll be able to answer a small
84145 + number of simple questions about how you plan to use this kernel.
84146 + The settings of grsecurity and PaX will be automatically configured for
84147 + the highest commonly-used settings within the provided constraints.
84148 +
84149 + If you require additional configuration, custom changes can still be made
84150 + from the "custom configuration" menu.
84151 +
84152 +config GRKERNSEC_CONFIG_CUSTOM
84153 + bool "Custom"
84154 + help
84155 + If you choose this configuration method, you'll be able to configure all
84156 + grsecurity and PaX settings manually. Via this method, no options are
84157 + automatically enabled.
84158 +
84159 +endchoice
84160 +
84161 +choice
84162 + prompt "Usage Type"
84163 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
84164 + default GRKERNSEC_CONFIG_SERVER
84165 + help
84166 +
84167 +config GRKERNSEC_CONFIG_SERVER
84168 + bool "Server"
84169 + help
84170 + Choose this option if you plan to use this kernel on a server.
84171 +
84172 +config GRKERNSEC_CONFIG_DESKTOP
84173 + bool "Desktop"
84174 + help
84175 + Choose this option if you plan to use this kernel on a desktop.
84176 +
84177 +endchoice
84178 +
84179 +choice
84180 + prompt "Virtualization Type"
84181 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
84182 + default GRKERNSEC_CONFIG_VIRT_NONE
84183 + help
84184 +
84185 +config GRKERNSEC_CONFIG_VIRT_NONE
84186 + bool "None"
84187 + help
84188 + Choose this option if this kernel will be run on bare metal.
84189 +
84190 +config GRKERNSEC_CONFIG_VIRT_GUEST
84191 + bool "Guest"
84192 + help
84193 + Choose this option if this kernel will be run as a VM guest.
84194 +
84195 +config GRKERNSEC_CONFIG_VIRT_HOST
84196 + bool "Host"
84197 + help
84198 + Choose this option if this kernel will be run as a VM host.
84199 +
84200 +endchoice
84201 +
84202 +choice
84203 + prompt "Virtualization Hardware"
84204 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
84205 + help
84206 +
84207 +config GRKERNSEC_CONFIG_VIRT_EPT
84208 + bool "EPT/RVI Processor Support"
84209 + depends on X86
84210 + help
84211 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
84212 + hardware virtualization. This allows for additional kernel hardening protections
84213 + to operate without additional performance impact.
84214 +
84215 + To see if your Intel processor supports EPT, see:
84216 + http://ark.intel.com/Products/VirtualizationTechnology
84217 + (Most Core i3/5/7 support EPT)
84218 +
84219 + To see if your AMD processor supports RVI, see:
84220 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
84221 +
84222 +config GRKERNSEC_CONFIG_VIRT_SOFT
84223 + bool "First-gen/No Hardware Virtualization"
84224 + help
84225 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
84226 + support hardware virtualization or doesn't support the EPT/RVI extensions.
84227 +
84228 +endchoice
84229 +
84230 +choice
84231 + prompt "Virtualization Software"
84232 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
84233 + help
84234 +
84235 +config GRKERNSEC_CONFIG_VIRT_XEN
84236 + bool "Xen"
84237 + help
84238 + Choose this option if this kernel is running as a Xen guest or host.
84239 +
84240 +config GRKERNSEC_CONFIG_VIRT_VMWARE
84241 + bool "VMWare"
84242 + help
84243 + Choose this option if this kernel is running as a VMWare guest or host.
84244 +
84245 +config GRKERNSEC_CONFIG_VIRT_KVM
84246 + bool "KVM"
84247 + help
84248 + Choose this option if this kernel is running as a KVM guest or host.
84249 +
84250 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
84251 + bool "VirtualBox"
84252 + help
84253 + Choose this option if this kernel is running as a VirtualBox guest or host.
84254 +
84255 +endchoice
84256 +
84257 +choice
84258 + prompt "Required Priorities"
84259 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
84260 + default GRKERNSEC_CONFIG_PRIORITY_PERF
84261 + help
84262 +
84263 +config GRKERNSEC_CONFIG_PRIORITY_PERF
84264 + bool "Performance"
84265 + help
84266 + Choose this option if performance is of highest priority for this deployment
84267 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
84268 + and freed memory sanitizing will be disabled.
84269 +
84270 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
84271 + bool "Security"
84272 + help
84273 + Choose this option if security is of highest priority for this deployment of
84274 + grsecurity. UDEREF, kernel stack clearing, and freed memory sanitizing will
84275 + be enabled for this kernel. In a worst-case scenario, these features can
84276 + introduce a 20% performance hit (UDEREF on x64 contributing half of this hit).
84277 +
84278 +endchoice
84279 +
84280 +menu "Default Special Groups"
84281 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
84282 +
84283 +config GRKERNSEC_PROC_GID
84284 + int "GID exempted from /proc restrictions"
84285 + default 1001
84286 + help
84287 + Setting this GID determines which group will be exempted from
84288 + grsecurity's /proc restrictions, allowing users of the specified
84289 + group to view network statistics and the existence of other users'
84290 + processes on the system. This GID may also be chosen at boot time
84291 + via "grsec_proc_gid=" on the kernel commandline.
84292 +
84293 +config GRKERNSEC_TPE_UNTRUSTED_GID
84294 + int "GID for TPE-untrusted users"
84295 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
84296 + default 1005
84297 + help
84298 + Setting this GID determines which group untrusted users should
84299 + be added to. These users will be placed under grsecurity's Trusted Path
84300 + Execution mechanism, preventing them from executing their own binaries.
84301 + The users will only be able to execute binaries in directories owned and
84302 + writable only by the root user. If the sysctl option is enabled, a sysctl
84303 + option with name "tpe_gid" is created.
84304 +
84305 +config GRKERNSEC_TPE_TRUSTED_GID
84306 + int "GID for TPE-trusted users"
84307 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
84308 + default 1005
84309 + help
84310 + Setting this GID determines what group TPE restrictions will be
84311 + *disabled* for. If the sysctl option is enabled, a sysctl option
84312 + with name "tpe_gid" is created.
84313 +
84314 +config GRKERNSEC_SYMLINKOWN_GID
84315 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
84316 + depends on GRKERNSEC_CONFIG_SERVER
84317 + default 1006
84318 + help
84319 + Setting this GID determines what group kernel-enforced
84320 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
84321 + is enabled, a sysctl option with name "symlinkown_gid" is created.
84322 +
84323 +
84324 +endmenu
84325 +
84326 +menu "Customize Configuration"
84327 +depends on GRKERNSEC
84328 +
84329 +menu "PaX"
84330 +
84331 +config PAX
84332 + bool "Enable various PaX features"
84333 + default y if GRKERNSEC_CONFIG_AUTO
84334 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
84335 + help
84336 + This allows you to enable various PaX features. PaX adds
84337 + intrusion prevention mechanisms to the kernel that reduce
84338 + the risks posed by exploitable memory corruption bugs.
84339 +
84340 +menu "PaX Control"
84341 + depends on PAX
84342 +
84343 +config PAX_SOFTMODE
84344 + bool 'Support soft mode'
84345 + help
84346 + Enabling this option will allow you to run PaX in soft mode, that
84347 + is, PaX features will not be enforced by default, only on executables
84348 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
84349 + support as they are the only way to mark executables for soft mode use.
84350 +
84351 + Soft mode can be activated by using the "pax_softmode=1" kernel command
84352 + line option on boot. Furthermore you can control various PaX features
84353 + at runtime via the entries in /proc/sys/kernel/pax.
84354 +
84355 +config PAX_EI_PAX
84356 + bool 'Use legacy ELF header marking'
84357 + default y if GRKERNSEC_CONFIG_AUTO
84358 + help
84359 + Enabling this option will allow you to control PaX features on
84360 + a per executable basis via the 'chpax' utility available at
84361 + http://pax.grsecurity.net/. The control flags will be read from
84362 + an otherwise reserved part of the ELF header. This marking has
84363 + numerous drawbacks (no support for soft-mode, toolchain does not
84364 + know about the non-standard use of the ELF header) therefore it
84365 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
84366 + support.
84367 +
84368 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
84369 + support as well, they will override the legacy EI_PAX marks.
84370 +
84371 + If you enable none of the marking options then all applications
84372 + will run with PaX enabled on them by default.
84373 +
84374 +config PAX_PT_PAX_FLAGS
84375 + bool 'Use ELF program header marking'
84376 + default y if GRKERNSEC_CONFIG_AUTO
84377 + help
84378 + Enabling this option will allow you to control PaX features on
84379 + a per executable basis via the 'paxctl' utility available at
84380 + http://pax.grsecurity.net/. The control flags will be read from
84381 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
84382 + has the benefits of supporting both soft mode and being fully
84383 + integrated into the toolchain (the binutils patch is available
84384 + from http://pax.grsecurity.net).
84385 +
84386 + Note that if you enable the legacy EI_PAX marking support as well,
84387 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
84388 +
84389 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
84390 + must make sure that the marks are the same if a binary has both marks.
84391 +
84392 + If you enable none of the marking options then all applications
84393 + will run with PaX enabled on them by default.
84394 +
84395 +config PAX_XATTR_PAX_FLAGS
84396 + bool 'Use filesystem extended attributes marking'
84397 + default y if GRKERNSEC_CONFIG_AUTO
84398 + select CIFS_XATTR if CIFS
84399 + select EXT2_FS_XATTR if EXT2_FS
84400 + select EXT3_FS_XATTR if EXT3_FS
84401 + select EXT4_FS_XATTR if EXT4_FS
84402 + select JFFS2_FS_XATTR if JFFS2_FS
84403 + select REISERFS_FS_XATTR if REISERFS_FS
84404 + select SQUASHFS_XATTR if SQUASHFS
84405 + select TMPFS_XATTR if TMPFS
84406 + select UBIFS_FS_XATTR if UBIFS_FS
84407 + help
84408 + Enabling this option will allow you to control PaX features on
84409 + a per executable basis via the 'setfattr' utility. The control
84410 + flags will be read from the user.pax.flags extended attribute of
84411 + the file. This marking has the benefit of supporting binary-only
84412 + applications that self-check themselves (e.g., skype) and would
84413 + not tolerate chpax/paxctl changes. The main drawback is that
84414 + extended attributes are not supported by some filesystems (e.g.,
84415 + isofs, udf, vfat) so copying files through such filesystems will
84416 + lose the extended attributes and these PaX markings.
84417 +
84418 + Note that if you enable the legacy EI_PAX marking support as well,
84419 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
84420 +
84421 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
84422 + must make sure that the marks are the same if a binary has both marks.
84423 +
84424 + If you enable none of the marking options then all applications
84425 + will run with PaX enabled on them by default.
84426 +
84427 +choice
84428 + prompt 'MAC system integration'
84429 + default PAX_HAVE_ACL_FLAGS
84430 + help
84431 + Mandatory Access Control systems have the option of controlling
84432 + PaX flags on a per executable basis, choose the method supported
84433 + by your particular system.
84434 +
84435 + - "none": if your MAC system does not interact with PaX,
84436 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
84437 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
84438 +
84439 + NOTE: this option is for developers/integrators only.
84440 +
84441 + config PAX_NO_ACL_FLAGS
84442 + bool 'none'
84443 +
84444 + config PAX_HAVE_ACL_FLAGS
84445 + bool 'direct'
84446 +
84447 + config PAX_HOOK_ACL_FLAGS
84448 + bool 'hook'
84449 +endchoice
84450 +
84451 +endmenu
84452 +
84453 +menu "Non-executable pages"
84454 + depends on PAX
84455 +
84456 +config PAX_NOEXEC
84457 + bool "Enforce non-executable pages"
84458 + default y if GRKERNSEC_CONFIG_AUTO
84459 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
84460 + help
84461 + By design some architectures do not allow for protecting memory
84462 + pages against execution or even if they do, Linux does not make
84463 + use of this feature. In practice this means that if a page is
84464 + readable (such as the stack or heap) it is also executable.
84465 +
84466 + There is a well known exploit technique that makes use of this
84467 + fact and a common programming mistake where an attacker can
84468 + introduce code of his choice somewhere in the attacked program's
84469 + memory (typically the stack or the heap) and then execute it.
84470 +
84471 + If the attacked program was running with different (typically
84472 + higher) privileges than that of the attacker, then he can elevate
84473 + his own privilege level (e.g. get a root shell, write to files for
84474 + which he does not have write access to, etc).
84475 +
84476 + Enabling this option will let you choose from various features
84477 + that prevent the injection and execution of 'foreign' code in
84478 + a program.
84479 +
84480 + This will also break programs that rely on the old behaviour and
84481 + expect that dynamically allocated memory via the malloc() family
84482 + of functions is executable (which it is not). Notable examples
84483 + are the XFree86 4.x server, the java runtime and wine.
84484 +
84485 +config PAX_PAGEEXEC
84486 + bool "Paging based non-executable pages"
84487 + default y if GRKERNSEC_CONFIG_AUTO
84488 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
84489 + select S390_SWITCH_AMODE if S390
84490 + select S390_EXEC_PROTECT if S390
84491 + select ARCH_TRACK_EXEC_LIMIT if X86_32
84492 + help
84493 + This implementation is based on the paging feature of the CPU.
84494 + On i386 without hardware non-executable bit support there is a
84495 + variable but usually low performance impact, however on Intel's
84496 + P4 core based CPUs it is very high so you should not enable this
84497 + for kernels meant to be used on such CPUs.
84498 +
84499 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
84500 + with hardware non-executable bit support there is no performance
84501 + impact, on ppc the impact is negligible.
84502 +
84503 + Note that several architectures require various emulations due to
84504 + badly designed userland ABIs, this will cause a performance impact
84505 + but will disappear as soon as userland is fixed. For example, ppc
84506 + userland MUST have been built with secure-plt by a recent toolchain.
84507 +
84508 +config PAX_SEGMEXEC
84509 + bool "Segmentation based non-executable pages"
84510 + default y if GRKERNSEC_CONFIG_AUTO
84511 + depends on PAX_NOEXEC && X86_32
84512 + help
84513 + This implementation is based on the segmentation feature of the
84514 + CPU and has a very small performance impact, however applications
84515 + will be limited to a 1.5 GB address space instead of the normal
84516 + 3 GB.
84517 +
84518 +config PAX_EMUTRAMP
84519 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
84520 + default y if PARISC
84521 + help
84522 + There are some programs and libraries that for one reason or
84523 + another attempt to execute special small code snippets from
84524 + non-executable memory pages. Most notable examples are the
84525 + signal handler return code generated by the kernel itself and
84526 + the GCC trampolines.
84527 +
84528 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
84529 + such programs will no longer work under your kernel.
84530 +
84531 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
84532 + utilities to enable trampoline emulation for the affected programs
84533 + yet still have the protection provided by the non-executable pages.
84534 +
84535 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
84536 + your system will not even boot.
84537 +
84538 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
84539 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
84540 + for the affected files.
84541 +
84542 + NOTE: enabling this feature *may* open up a loophole in the
84543 + protection provided by non-executable pages that an attacker
84544 + could abuse. Therefore the best solution is to not have any
84545 + files on your system that would require this option. This can
84546 + be achieved by not using libc5 (which relies on the kernel
84547 + signal handler return code) and not using or rewriting programs
84548 + that make use of the nested function implementation of GCC.
84549 + Skilled users can just fix GCC itself so that it implements
84550 + nested function calls in a way that does not interfere with PaX.
84551 +
84552 +config PAX_EMUSIGRT
84553 + bool "Automatically emulate sigreturn trampolines"
84554 + depends on PAX_EMUTRAMP && PARISC
84555 + default y
84556 + help
84557 + Enabling this option will have the kernel automatically detect
84558 + and emulate signal return trampolines executing on the stack
84559 + that would otherwise lead to task termination.
84560 +
84561 + This solution is intended as a temporary one for users with
84562 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
84563 + Modula-3 runtime, etc) or executables linked to such, basically
84564 + everything that does not specify its own SA_RESTORER function in
84565 + normal executable memory like glibc 2.1+ does.
84566 +
84567 + On parisc you MUST enable this option, otherwise your system will
84568 + not even boot.
84569 +
84570 + NOTE: this feature cannot be disabled on a per executable basis
84571 + and since it *does* open up a loophole in the protection provided
84572 + by non-executable pages, the best solution is to not have any
84573 + files on your system that would require this option.
84574 +
84575 +config PAX_MPROTECT
84576 + bool "Restrict mprotect()"
84577 + default y if GRKERNSEC_CONFIG_AUTO
84578 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
84579 + help
84580 + Enabling this option will prevent programs from
84581 + - changing the executable status of memory pages that were
84582 + not originally created as executable,
84583 + - making read-only executable pages writable again,
84584 + - creating executable pages from anonymous memory,
84585 + - making read-only-after-relocations (RELRO) data pages writable again.
84586 +
84587 + You should say Y here to complete the protection provided by
84588 + the enforcement of non-executable pages.
84589 +
84590 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84591 + this feature on a per file basis.
84592 +
84593 +config PAX_MPROTECT_COMPAT
84594 + bool "Use legacy/compat protection demoting (read help)"
84595 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
84596 + depends on PAX_MPROTECT
84597 + help
84598 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
84599 + by sending the proper error code to the application. For some broken
84600 + userland, this can cause problems with Python or other applications. The
84601 + current implementation however allows for applications like clamav to
84602 + detect if JIT compilation/execution is allowed and to fall back gracefully
84603 + to an interpreter-based mode if it does not. While we encourage everyone
84604 + to use the current implementation as-is and push upstream to fix broken
84605 + userland (note that the RWX logging option can assist with this), in some
84606 + environments this may not be possible. Having to disable MPROTECT
84607 + completely on certain binaries reduces the security benefit of PaX,
84608 + so this option is provided for those environments to revert to the old
84609 + behavior.
84610 +
84611 +config PAX_ELFRELOCS
84612 + bool "Allow ELF text relocations (read help)"
84613 + depends on PAX_MPROTECT
84614 + default n
84615 + help
84616 + Non-executable pages and mprotect() restrictions are effective
84617 + in preventing the introduction of new executable code into an
84618 + attacked task's address space. There remain only two venues
84619 + for this kind of attack: if the attacker can execute already
84620 + existing code in the attacked task then he can either have it
84621 + create and mmap() a file containing his code or have it mmap()
84622 + an already existing ELF library that does not have position
84623 + independent code in it and use mprotect() on it to make it
84624 + writable and copy his code there. While protecting against
84625 + the former approach is beyond PaX, the latter can be prevented
84626 + by having only PIC ELF libraries on one's system (which do not
84627 + need to relocate their code). If you are sure this is your case,
84628 + as is the case with all modern Linux distributions, then leave
84629 + this option disabled. You should say 'n' here.
84630 +
84631 +config PAX_ETEXECRELOCS
84632 + bool "Allow ELF ET_EXEC text relocations"
84633 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
84634 + select PAX_ELFRELOCS
84635 + default y
84636 + help
84637 + On some architectures there are incorrectly created applications
84638 + that require text relocations and would not work without enabling
84639 + this option. If you are an alpha, ia64 or parisc user, you should
84640 + enable this option and disable it once you have made sure that
84641 + none of your applications need it.
84642 +
84643 +config PAX_EMUPLT
84644 + bool "Automatically emulate ELF PLT"
84645 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
84646 + default y
84647 + help
84648 + Enabling this option will have the kernel automatically detect
84649 + and emulate the Procedure Linkage Table entries in ELF files.
84650 + On some architectures such entries are in writable memory, and
84651 + become non-executable leading to task termination. Therefore
84652 + it is mandatory that you enable this option on alpha, parisc,
84653 + sparc and sparc64, otherwise your system would not even boot.
84654 +
84655 + NOTE: this feature *does* open up a loophole in the protection
84656 + provided by the non-executable pages, therefore the proper
84657 + solution is to modify the toolchain to produce a PLT that does
84658 + not need to be writable.
84659 +
84660 +config PAX_DLRESOLVE
84661 + bool 'Emulate old glibc resolver stub'
84662 + depends on PAX_EMUPLT && SPARC
84663 + default n
84664 + help
84665 + This option is needed if userland has an old glibc (before 2.4)
84666 + that puts a 'save' instruction into the runtime generated resolver
84667 + stub that needs special emulation.
84668 +
84669 +config PAX_KERNEXEC
84670 + bool "Enforce non-executable kernel pages"
84671 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
84672 + depends on ((X86 && (!X86_32 || X86_WP_WORKS_OK)) || (ARM && (CPU_V6 || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
84673 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
84674 + select PAX_KERNEXEC_PLUGIN if X86_64
84675 + help
84676 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
84677 + that is, enabling this option will make it harder to inject
84678 + and execute 'foreign' code in kernel memory itself.
84679 +
84680 +choice
84681 + prompt "Return Address Instrumentation Method"
84682 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
84683 + depends on PAX_KERNEXEC_PLUGIN
84684 + help
84685 + Select the method used to instrument function pointer dereferences.
84686 + Note that binary modules cannot be instrumented by this approach.
84687 +
84688 + Note that the implementation requires a gcc with plugin support,
84689 + i.e., gcc 4.5 or newer. You may need to install the supporting
84690 + headers explicitly in addition to the normal gcc package.
84691 +
84692 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
84693 + bool "bts"
84694 + help
84695 + This method is compatible with binary only modules but has
84696 + a higher runtime overhead.
84697 +
84698 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
84699 + bool "or"
84700 + depends on !PARAVIRT
84701 + help
84702 + This method is incompatible with binary only modules but has
84703 + a lower runtime overhead.
84704 +endchoice
84705 +
84706 +config PAX_KERNEXEC_PLUGIN_METHOD
84707 + string
84708 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
84709 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
84710 + default ""
84711 +
84712 +config PAX_KERNEXEC_MODULE_TEXT
84713 + int "Minimum amount of memory reserved for module code"
84714 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
84715 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
84716 + depends on PAX_KERNEXEC && X86_32 && MODULES
84717 + help
84718 + Due to implementation details the kernel must reserve a fixed
84719 + amount of memory for module code at compile time that cannot be
84720 + changed at runtime. Here you can specify the minimum amount
84721 + in MB that will be reserved. Due to the same implementation
84722 + details this size will always be rounded up to the next 2/4 MB
84723 + boundary (depends on PAE) so the actually available memory for
84724 + module code will usually be more than this minimum.
84725 +
84726 + The default 4 MB should be enough for most users but if you have
84727 + an excessive number of modules (e.g., most distribution configs
84728 + compile many drivers as modules) or use huge modules such as
84729 + nvidia's kernel driver, you will need to adjust this amount.
84730 + A good rule of thumb is to look at your currently loaded kernel
84731 + modules and add up their sizes.
84732 +
84733 +endmenu
84734 +
84735 +menu "Address Space Layout Randomization"
84736 + depends on PAX
84737 +
84738 +config PAX_ASLR
84739 + bool "Address Space Layout Randomization"
84740 + default y if GRKERNSEC_CONFIG_AUTO
84741 + help
84742 + Many if not most exploit techniques rely on the knowledge of
84743 + certain addresses in the attacked program. The following options
84744 + will allow the kernel to apply a certain amount of randomization
84745 + to specific parts of the program thereby forcing an attacker to
84746 + guess them in most cases. Any failed guess will most likely crash
84747 + the attacked program which allows the kernel to detect such attempts
84748 + and react on them. PaX itself provides no reaction mechanisms,
84749 + instead it is strongly encouraged that you make use of Nergal's
84750 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
84751 + (http://www.grsecurity.net/) built-in crash detection features or
84752 + develop one yourself.
84753 +
84754 + By saying Y here you can choose to randomize the following areas:
84755 + - top of the task's kernel stack
84756 + - top of the task's userland stack
84757 + - base address for mmap() requests that do not specify one
84758 + (this includes all libraries)
84759 + - base address of the main executable
84760 +
84761 + It is strongly recommended to say Y here as address space layout
84762 + randomization has negligible impact on performance yet it provides
84763 + a very effective protection.
84764 +
84765 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84766 + this feature on a per file basis.
84767 +
84768 +config PAX_RANDKSTACK
84769 + bool "Randomize kernel stack base"
84770 + default y if GRKERNSEC_CONFIG_AUTO
84771 + depends on X86_TSC && X86
84772 + help
84773 + By saying Y here the kernel will randomize every task's kernel
84774 + stack on every system call. This will not only force an attacker
84775 + to guess it but also prevent him from making use of possible
84776 + leaked information about it.
84777 +
84778 + Since the kernel stack is a rather scarce resource, randomization
84779 + may cause unexpected stack overflows, therefore you should very
84780 + carefully test your system. Note that once enabled in the kernel
84781 + configuration, this feature cannot be disabled on a per file basis.
84782 +
84783 +config PAX_RANDUSTACK
84784 + bool "Randomize user stack base"
84785 + default y if GRKERNSEC_CONFIG_AUTO
84786 + depends on PAX_ASLR
84787 + help
84788 + By saying Y here the kernel will randomize every task's userland
84789 + stack. The randomization is done in two steps where the second
84790 + one may apply a big amount of shift to the top of the stack and
84791 + cause problems for programs that want to use lots of memory (more
84792 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
84793 + For this reason the second step can be controlled by 'chpax' or
84794 + 'paxctl' on a per file basis.
84795 +
84796 +config PAX_RANDMMAP
84797 + bool "Randomize mmap() base"
84798 + default y if GRKERNSEC_CONFIG_AUTO
84799 + depends on PAX_ASLR
84800 + help
84801 + By saying Y here the kernel will use a randomized base address for
84802 + mmap() requests that do not specify one themselves. As a result
84803 + all dynamically loaded libraries will appear at random addresses
84804 + and therefore be harder to exploit by a technique where an attacker
84805 + attempts to execute library code for his purposes (e.g. spawn a
84806 + shell from an exploited program that is running at an elevated
84807 + privilege level).
84808 +
84809 + Furthermore, if a program is relinked as a dynamic ELF file, its
84810 + base address will be randomized as well, completing the full
84811 + randomization of the address space layout. Attacking such programs
84812 + becomes a guess game. You can find an example of doing this at
84813 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
84814 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
84815 +
84816 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
84817 + feature on a per file basis.
84818 +
84819 +endmenu
84820 +
84821 +menu "Miscellaneous hardening features"
84822 +
84823 +config PAX_MEMORY_SANITIZE
84824 + bool "Sanitize all freed memory"
84825 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
84826 + depends on !HIBERNATION
84827 + help
84828 + By saying Y here the kernel will erase memory pages as soon as they
84829 + are freed. This in turn reduces the lifetime of data stored in the
84830 + pages, making it less likely that sensitive information such as
84831 + passwords, cryptographic secrets, etc stay in memory for too long.
84832 +
84833 + This is especially useful for programs whose runtime is short, long
84834 + lived processes and the kernel itself benefit from this as long as
84835 + they operate on whole memory pages and ensure timely freeing of pages
84836 + that may hold sensitive information.
84837 +
84838 + The tradeoff is performance impact, on a single CPU system kernel
84839 + compilation sees a 3% slowdown, other systems and workloads may vary
84840 + and you are advised to test this feature on your expected workload
84841 + before deploying it.
84842 +
84843 + Note that this feature does not protect data stored in live pages,
84844 + e.g., process memory swapped to disk may stay there for a long time.
84845 +
84846 +config PAX_MEMORY_STACKLEAK
84847 + bool "Sanitize kernel stack"
84848 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
84849 + depends on X86
84850 + help
84851 + By saying Y here the kernel will erase the kernel stack before it
84852 + returns from a system call. This in turn reduces the information
84853 + that a kernel stack leak bug can reveal.
84854 +
84855 + Note that such a bug can still leak information that was put on
84856 + the stack by the current system call (the one eventually triggering
84857 + the bug) but traces of earlier system calls on the kernel stack
84858 + cannot leak anymore.
84859 +
84860 + The tradeoff is performance impact: on a single CPU system kernel
84861 + compilation sees a 1% slowdown, other systems and workloads may vary
84862 + and you are advised to test this feature on your expected workload
84863 + before deploying it.
84864 +
84865 + Note that the full feature requires a gcc with plugin support,
84866 + i.e., gcc 4.5 or newer. You may need to install the supporting
84867 + headers explicitly in addition to the normal gcc package. Using
84868 + older gcc versions means that functions with large enough stack
84869 + frames may leave uninitialized memory behind that may be exposed
84870 + to a later syscall leaking the stack.
84871 +
84872 +config PAX_MEMORY_UDEREF
84873 + bool "Prevent invalid userland pointer dereference"
84874 + default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
84875 + depends on (X86 || (ARM && (CPU_V6 || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
84876 + select PAX_PER_CPU_PGD if X86_64
84877 + help
84878 + By saying Y here the kernel will be prevented from dereferencing
84879 + userland pointers in contexts where the kernel expects only kernel
84880 + pointers. This is both a useful runtime debugging feature and a
84881 + security measure that prevents exploiting a class of kernel bugs.
84882 +
84883 + The tradeoff is that some virtualization solutions may experience
84884 + a huge slowdown and therefore you should not enable this feature
84885 + for kernels meant to run in such environments. Whether a given VM
84886 + solution is affected or not is best determined by simply trying it
84887 + out, the performance impact will be obvious right on boot as this
84888 + mechanism engages from very early on. A good rule of thumb is that
84889 + VMs running on CPUs without hardware virtualization support (i.e.,
84890 + the majority of IA-32 CPUs) will likely experience the slowdown.
84891 +
84892 +config PAX_REFCOUNT
84893 + bool "Prevent various kernel object reference counter overflows"
84894 + default y if GRKERNSEC_CONFIG_AUTO
84895 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
84896 + help
84897 + By saying Y here the kernel will detect and prevent overflowing
84898 + various (but not all) kinds of object reference counters. Such
84899 + overflows can normally occur due to bugs only and are often, if
84900 + not always, exploitable.
84901 +
84902 + The tradeoff is that data structures protected by an overflowed
84903 + refcount will never be freed and therefore will leak memory. Note
84904 + that this leak also happens even without this protection but in
84905 + that case the overflow can eventually trigger the freeing of the
84906 + data structure while it is still being used elsewhere, resulting
84907 + in the exploitable situation that this feature prevents.
84908 +
84909 + Since this has a negligible performance impact, you should enable
84910 + this feature.
84911 +
84912 +config PAX_USERCOPY
84913 + bool "Harden heap object copies between kernel and userland"
84914 + default y if GRKERNSEC_CONFIG_AUTO
84915 + depends on ARM || IA64 || PPC || SPARC || X86
84916 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
84917 + select PAX_USERCOPY_SLABS
84918 + help
84919 + By saying Y here the kernel will enforce the size of heap objects
84920 + when they are copied in either direction between the kernel and
84921 + userland, even if only a part of the heap object is copied.
84922 +
84923 + Specifically, this checking prevents information leaking from the
84924 + kernel heap during kernel to userland copies (if the kernel heap
84925 + object is otherwise fully initialized) and prevents kernel heap
84926 + overflows during userland to kernel copies.
84927 +
84928 + Note that the current implementation provides the strictest bounds
84929 + checks for the SLUB allocator.
84930 +
84931 + Enabling this option also enables per-slab cache protection against
84932 + data in a given cache being copied into/out of via userland
84933 + accessors. Though the whitelist of regions will be reduced over
84934 + time, it notably protects important data structures like task structs.
84935 +
84936 + If frame pointers are enabled on x86, this option will also restrict
84937 + copies into and out of the kernel stack to local variables within a
84938 + single frame.
84939 +
84940 + Since this has a negligible performance impact, you should enable
84941 + this feature.
84942 +
84943 +
84944 +config PAX_USERCOPY_DEBUG
84945 + bool
84946 + depends on X86 && PAX_USERCOPY
84947 + default n
84948 +
84949 +config PAX_SIZE_OVERFLOW
84950 + bool "Prevent various integer overflows in function size parameters"
84951 + default y if GRKERNSEC_CONFIG_AUTO
84952 + depends on X86
84953 + help
84954 + By saying Y here the kernel recomputes expressions of function
84955 + arguments marked by a size_overflow attribute with double integer
84956 + precision (DImode/TImode for 32/64 bit integer types).
84957 +
84958 + The recomputed argument is checked against TYPE_MAX and an event
84959 + is logged on overflow and the triggering process is killed.
84960 +
84961 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
84962 +
84963 + Note that the implementation requires a gcc with plugin support,
84964 + i.e., gcc 4.5 or newer. You may need to install the supporting
84965 + headers explicitly in addition to the normal gcc package.
84966 +
84967 +config PAX_LATENT_ENTROPY
84968 + bool "Generate some entropy during boot"
84969 + default y if GRKERNSEC_CONFIG_AUTO
84970 + help
84971 + By saying Y here the kernel will instrument early boot code to
84972 + extract some entropy from both original and artificially created
84973 + program state. This will help especially embedded systems where
84974 + there is little 'natural' source of entropy normally. The cost
84975 + is some slowdown of the boot process.
84976 +
84977 + Note that the implementation requires a gcc with plugin support,
84978 + i.e., gcc 4.5 or newer. You may need to install the supporting
84979 + headers explicitly in addition to the normal gcc package.
84980 +
84981 + Note that entropy extracted this way is not cryptographically
84982 + secure!
84983 +
84984 +endmenu
84985 +
84986 +endmenu
84987 +
84988 +source grsecurity/Kconfig
84989 +
84990 +endmenu
84991 +
84992 +endmenu
84993 +
84994 source security/keys/Kconfig
84995
84996 config SECURITY_DMESG_RESTRICT
84997 @@ -103,7 +999,7 @@ config INTEL_TXT
84998 config LSM_MMAP_MIN_ADDR
84999 int "Low address space for LSM to protect from user allocation"
85000 depends on SECURITY && SECURITY_SELINUX
85001 - default 32768 if ARM
85002 + default 32768 if ALPHA || ARM || PARISC || SPARC32
85003 default 65536
85004 help
85005 This is the portion of low virtual memory which should be protected
85006 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
85007 index 8c2a7f6..b133ac9 100644
85008 --- a/security/apparmor/lsm.c
85009 +++ b/security/apparmor/lsm.c
85010 @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
85011 return error;
85012 }
85013
85014 -static struct security_operations apparmor_ops = {
85015 +static struct security_operations apparmor_ops __read_only = {
85016 .name = "apparmor",
85017
85018 .ptrace_access_check = apparmor_ptrace_access_check,
85019 diff --git a/security/commoncap.c b/security/commoncap.c
85020 index 7ee08c7..8d1a9d6 100644
85021 --- a/security/commoncap.c
85022 +++ b/security/commoncap.c
85023 @@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
85024 return 0;
85025 }
85026
85027 +/* returns:
85028 + 1 for suid privilege
85029 + 2 for sgid privilege
85030 + 3 for fscap privilege
85031 +*/
85032 +int is_privileged_binary(const struct dentry *dentry)
85033 +{
85034 + struct cpu_vfs_cap_data capdata;
85035 + struct inode *inode = dentry->d_inode;
85036 +
85037 + if (!inode || S_ISDIR(inode->i_mode))
85038 + return 0;
85039 +
85040 + if (inode->i_mode & S_ISUID)
85041 + return 1;
85042 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
85043 + return 2;
85044 +
85045 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
85046 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
85047 + return 3;
85048 + }
85049 +
85050 + return 0;
85051 +}
85052 +
85053 /*
85054 * Attempt to get the on-exec apply capability sets for an executable file from
85055 * its xattrs and, if present, apply them to the proposed credentials being
85056 @@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
85057 const struct cred *cred = current_cred();
85058 kuid_t root_uid = make_kuid(cred->user_ns, 0);
85059
85060 + if (gr_acl_enable_at_secure())
85061 + return 1;
85062 +
85063 if (!uid_eq(cred->uid, root_uid)) {
85064 if (bprm->cap_effective)
85065 return 1;
85066 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
85067 index 079a85d..12e93f8 100644
85068 --- a/security/integrity/ima/ima.h
85069 +++ b/security/integrity/ima/ima.h
85070 @@ -96,8 +96,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85071 extern spinlock_t ima_queue_lock;
85072
85073 struct ima_h_table {
85074 - atomic_long_t len; /* number of stored measurements in the list */
85075 - atomic_long_t violations;
85076 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
85077 + atomic_long_unchecked_t violations;
85078 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
85079 };
85080 extern struct ima_h_table ima_htable;
85081 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
85082 index 0cea3db..2f0ef77 100644
85083 --- a/security/integrity/ima/ima_api.c
85084 +++ b/security/integrity/ima/ima_api.c
85085 @@ -79,7 +79,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85086 int result;
85087
85088 /* can overflow, only indicator */
85089 - atomic_long_inc(&ima_htable.violations);
85090 + atomic_long_inc_unchecked(&ima_htable.violations);
85091
85092 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
85093 if (!entry) {
85094 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
85095 index 38477c9..87a60c7 100644
85096 --- a/security/integrity/ima/ima_fs.c
85097 +++ b/security/integrity/ima/ima_fs.c
85098 @@ -28,12 +28,12 @@
85099 static int valid_policy = 1;
85100 #define TMPBUFLEN 12
85101 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
85102 - loff_t *ppos, atomic_long_t *val)
85103 + loff_t *ppos, atomic_long_unchecked_t *val)
85104 {
85105 char tmpbuf[TMPBUFLEN];
85106 ssize_t len;
85107
85108 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
85109 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
85110 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
85111 }
85112
85113 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
85114 index 55a6271..ad829c3 100644
85115 --- a/security/integrity/ima/ima_queue.c
85116 +++ b/security/integrity/ima/ima_queue.c
85117 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
85118 INIT_LIST_HEAD(&qe->later);
85119 list_add_tail_rcu(&qe->later, &ima_measurements);
85120
85121 - atomic_long_inc(&ima_htable.len);
85122 + atomic_long_inc_unchecked(&ima_htable.len);
85123 key = ima_hash_key(entry->digest);
85124 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
85125 return 0;
85126 diff --git a/security/keys/compat.c b/security/keys/compat.c
85127 index 1c26176..64a1ba2 100644
85128 --- a/security/keys/compat.c
85129 +++ b/security/keys/compat.c
85130 @@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
85131 if (ret == 0)
85132 goto no_payload_free;
85133
85134 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
85135 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
85136
85137 if (iov != iovstack)
85138 kfree(iov);
85139 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
85140 index 4b5c948..2054dc1 100644
85141 --- a/security/keys/keyctl.c
85142 +++ b/security/keys/keyctl.c
85143 @@ -986,7 +986,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
85144 /*
85145 * Copy the iovec data from userspace
85146 */
85147 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
85148 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
85149 unsigned ioc)
85150 {
85151 for (; ioc > 0; ioc--) {
85152 @@ -1008,7 +1008,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
85153 * If successful, 0 will be returned.
85154 */
85155 long keyctl_instantiate_key_common(key_serial_t id,
85156 - const struct iovec *payload_iov,
85157 + const struct iovec __user *payload_iov,
85158 unsigned ioc,
85159 size_t plen,
85160 key_serial_t ringid)
85161 @@ -1103,7 +1103,7 @@ long keyctl_instantiate_key(key_serial_t id,
85162 [0].iov_len = plen
85163 };
85164
85165 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
85166 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
85167 }
85168
85169 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
85170 @@ -1136,7 +1136,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
85171 if (ret == 0)
85172 goto no_payload_free;
85173
85174 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
85175 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
85176 err:
85177 if (iov != iovstack)
85178 kfree(iov);
85179 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
85180 index 6ece7f2..ecdb55c 100644
85181 --- a/security/keys/keyring.c
85182 +++ b/security/keys/keyring.c
85183 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
85184 ret = -EFAULT;
85185
85186 for (loop = 0; loop < klist->nkeys; loop++) {
85187 + key_serial_t serial;
85188 key = rcu_deref_link_locked(klist, loop,
85189 keyring);
85190 + serial = key->serial;
85191
85192 tmp = sizeof(key_serial_t);
85193 if (tmp > buflen)
85194 tmp = buflen;
85195
85196 - if (copy_to_user(buffer,
85197 - &key->serial,
85198 - tmp) != 0)
85199 + if (copy_to_user(buffer, &serial, tmp))
85200 goto error;
85201
85202 buflen -= tmp;
85203 diff --git a/security/min_addr.c b/security/min_addr.c
85204 index f728728..6457a0c 100644
85205 --- a/security/min_addr.c
85206 +++ b/security/min_addr.c
85207 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
85208 */
85209 static void update_mmap_min_addr(void)
85210 {
85211 +#ifndef SPARC
85212 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
85213 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
85214 mmap_min_addr = dac_mmap_min_addr;
85215 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
85216 #else
85217 mmap_min_addr = dac_mmap_min_addr;
85218 #endif
85219 +#endif
85220 }
85221
85222 /*
85223 diff --git a/security/security.c b/security/security.c
85224 index 7b88c6a..1e3ea8f 100644
85225 --- a/security/security.c
85226 +++ b/security/security.c
85227 @@ -20,6 +20,7 @@
85228 #include <linux/ima.h>
85229 #include <linux/evm.h>
85230 #include <linux/fsnotify.h>
85231 +#include <linux/mm.h>
85232 #include <linux/mman.h>
85233 #include <linux/mount.h>
85234 #include <linux/personality.h>
85235 @@ -32,8 +33,8 @@
85236 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
85237 CONFIG_DEFAULT_SECURITY;
85238
85239 -static struct security_operations *security_ops;
85240 -static struct security_operations default_security_ops = {
85241 +static struct security_operations *security_ops __read_only;
85242 +static struct security_operations default_security_ops __read_only = {
85243 .name = "default",
85244 };
85245
85246 @@ -74,7 +75,9 @@ int __init security_init(void)
85247
85248 void reset_security_ops(void)
85249 {
85250 + pax_open_kernel();
85251 security_ops = &default_security_ops;
85252 + pax_close_kernel();
85253 }
85254
85255 /* Save user chosen LSM */
85256 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
85257 index ef26e96..642fb78 100644
85258 --- a/security/selinux/hooks.c
85259 +++ b/security/selinux/hooks.c
85260 @@ -95,8 +95,6 @@
85261
85262 #define NUM_SEL_MNT_OPTS 5
85263
85264 -extern struct security_operations *security_ops;
85265 -
85266 /* SECMARK reference count */
85267 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
85268
85269 @@ -5501,7 +5499,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
85270
85271 #endif
85272
85273 -static struct security_operations selinux_ops = {
85274 +static struct security_operations selinux_ops __read_only = {
85275 .name = "selinux",
85276
85277 .ptrace_access_check = selinux_ptrace_access_check,
85278 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
85279 index 65f67cb..3f141ef 100644
85280 --- a/security/selinux/include/xfrm.h
85281 +++ b/security/selinux/include/xfrm.h
85282 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
85283
85284 static inline void selinux_xfrm_notify_policyload(void)
85285 {
85286 - atomic_inc(&flow_cache_genid);
85287 + atomic_inc_unchecked(&flow_cache_genid);
85288 rt_genid_bump(&init_net);
85289 }
85290 #else
85291 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
85292 index 38be92c..21f49ee 100644
85293 --- a/security/smack/smack_lsm.c
85294 +++ b/security/smack/smack_lsm.c
85295 @@ -3398,7 +3398,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
85296 return 0;
85297 }
85298
85299 -struct security_operations smack_ops = {
85300 +struct security_operations smack_ops __read_only = {
85301 .name = "smack",
85302
85303 .ptrace_access_check = smack_ptrace_access_check,
85304 diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
85305 index 390c646..f2f8db3 100644
85306 --- a/security/tomoyo/mount.c
85307 +++ b/security/tomoyo/mount.c
85308 @@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
85309 type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
85310 need_dev = -1; /* dev_name is a directory */
85311 } else {
85312 + if (!capable(CAP_SYS_ADMIN)) {
85313 + error = -EPERM;
85314 + goto out;
85315 + }
85316 fstype = get_fs_type(type);
85317 if (!fstype) {
85318 error = -ENODEV;
85319 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
85320 index a2ee362..5754f34 100644
85321 --- a/security/tomoyo/tomoyo.c
85322 +++ b/security/tomoyo/tomoyo.c
85323 @@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
85324 * tomoyo_security_ops is a "struct security_operations" which is used for
85325 * registering TOMOYO.
85326 */
85327 -static struct security_operations tomoyo_security_ops = {
85328 +static struct security_operations tomoyo_security_ops __read_only = {
85329 .name = "tomoyo",
85330 .cred_alloc_blank = tomoyo_cred_alloc_blank,
85331 .cred_prepare = tomoyo_cred_prepare,
85332 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
85333 index 20ef514..4182bed 100644
85334 --- a/security/yama/Kconfig
85335 +++ b/security/yama/Kconfig
85336 @@ -1,6 +1,6 @@
85337 config SECURITY_YAMA
85338 bool "Yama support"
85339 - depends on SECURITY
85340 + depends on SECURITY && !GRKERNSEC
85341 select SECURITYFS
85342 select SECURITY_PATH
85343 default n
85344 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
85345 index 4cedc69..e59d8a3 100644
85346 --- a/sound/aoa/codecs/onyx.c
85347 +++ b/sound/aoa/codecs/onyx.c
85348 @@ -54,7 +54,7 @@ struct onyx {
85349 spdif_locked:1,
85350 analog_locked:1,
85351 original_mute:2;
85352 - int open_count;
85353 + local_t open_count;
85354 struct codec_info *codec_info;
85355
85356 /* mutex serializes concurrent access to the device
85357 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
85358 struct onyx *onyx = cii->codec_data;
85359
85360 mutex_lock(&onyx->mutex);
85361 - onyx->open_count++;
85362 + local_inc(&onyx->open_count);
85363 mutex_unlock(&onyx->mutex);
85364
85365 return 0;
85366 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
85367 struct onyx *onyx = cii->codec_data;
85368
85369 mutex_lock(&onyx->mutex);
85370 - onyx->open_count--;
85371 - if (!onyx->open_count)
85372 + if (local_dec_and_test(&onyx->open_count))
85373 onyx->spdif_locked = onyx->analog_locked = 0;
85374 mutex_unlock(&onyx->mutex);
85375
85376 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
85377 index ffd2025..df062c9 100644
85378 --- a/sound/aoa/codecs/onyx.h
85379 +++ b/sound/aoa/codecs/onyx.h
85380 @@ -11,6 +11,7 @@
85381 #include <linux/i2c.h>
85382 #include <asm/pmac_low_i2c.h>
85383 #include <asm/prom.h>
85384 +#include <asm/local.h>
85385
85386 /* PCM3052 register definitions */
85387
85388 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
85389 index 4c1cc51..16040040 100644
85390 --- a/sound/core/oss/pcm_oss.c
85391 +++ b/sound/core/oss/pcm_oss.c
85392 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
85393 if (in_kernel) {
85394 mm_segment_t fs;
85395 fs = snd_enter_user();
85396 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
85397 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
85398 snd_leave_user(fs);
85399 } else {
85400 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
85401 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
85402 }
85403 if (ret != -EPIPE && ret != -ESTRPIPE)
85404 break;
85405 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
85406 if (in_kernel) {
85407 mm_segment_t fs;
85408 fs = snd_enter_user();
85409 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
85410 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
85411 snd_leave_user(fs);
85412 } else {
85413 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
85414 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
85415 }
85416 if (ret == -EPIPE) {
85417 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
85418 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
85419 struct snd_pcm_plugin_channel *channels;
85420 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
85421 if (!in_kernel) {
85422 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
85423 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
85424 return -EFAULT;
85425 buf = runtime->oss.buffer;
85426 }
85427 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
85428 }
85429 } else {
85430 tmp = snd_pcm_oss_write2(substream,
85431 - (const char __force *)buf,
85432 + (const char __force_kernel *)buf,
85433 runtime->oss.period_bytes, 0);
85434 if (tmp <= 0)
85435 goto err;
85436 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
85437 struct snd_pcm_runtime *runtime = substream->runtime;
85438 snd_pcm_sframes_t frames, frames1;
85439 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
85440 - char __user *final_dst = (char __force __user *)buf;
85441 + char __user *final_dst = (char __force_user *)buf;
85442 if (runtime->oss.plugin_first) {
85443 struct snd_pcm_plugin_channel *channels;
85444 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
85445 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
85446 xfer += tmp;
85447 runtime->oss.buffer_used -= tmp;
85448 } else {
85449 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
85450 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
85451 runtime->oss.period_bytes, 0);
85452 if (tmp <= 0)
85453 goto err;
85454 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
85455 size1);
85456 size1 /= runtime->channels; /* frames */
85457 fs = snd_enter_user();
85458 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
85459 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
85460 snd_leave_user(fs);
85461 }
85462 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
85463 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
85464 index af49721..e85058e 100644
85465 --- a/sound/core/pcm_compat.c
85466 +++ b/sound/core/pcm_compat.c
85467 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
85468 int err;
85469
85470 fs = snd_enter_user();
85471 - err = snd_pcm_delay(substream, &delay);
85472 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
85473 snd_leave_user(fs);
85474 if (err < 0)
85475 return err;
85476 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
85477 index 09b4286..8620fac 100644
85478 --- a/sound/core/pcm_native.c
85479 +++ b/sound/core/pcm_native.c
85480 @@ -2806,11 +2806,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
85481 switch (substream->stream) {
85482 case SNDRV_PCM_STREAM_PLAYBACK:
85483 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
85484 - (void __user *)arg);
85485 + (void __force_user *)arg);
85486 break;
85487 case SNDRV_PCM_STREAM_CAPTURE:
85488 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
85489 - (void __user *)arg);
85490 + (void __force_user *)arg);
85491 break;
85492 default:
85493 result = -EINVAL;
85494 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
85495 index 040c60e..989a19a 100644
85496 --- a/sound/core/seq/seq_device.c
85497 +++ b/sound/core/seq/seq_device.c
85498 @@ -64,7 +64,7 @@ struct ops_list {
85499 int argsize; /* argument size */
85500
85501 /* operators */
85502 - struct snd_seq_dev_ops ops;
85503 + struct snd_seq_dev_ops *ops;
85504
85505 /* registered devices */
85506 struct list_head dev_list; /* list of devices */
85507 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
85508
85509 mutex_lock(&ops->reg_mutex);
85510 /* copy driver operators */
85511 - ops->ops = *entry;
85512 + ops->ops = entry;
85513 ops->driver |= DRIVER_LOADED;
85514 ops->argsize = argsize;
85515
85516 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
85517 dev->name, ops->id, ops->argsize, dev->argsize);
85518 return -EINVAL;
85519 }
85520 - if (ops->ops.init_device(dev) >= 0) {
85521 + if (ops->ops->init_device(dev) >= 0) {
85522 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
85523 ops->num_init_devices++;
85524 } else {
85525 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
85526 dev->name, ops->id, ops->argsize, dev->argsize);
85527 return -EINVAL;
85528 }
85529 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
85530 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
85531 dev->status = SNDRV_SEQ_DEVICE_FREE;
85532 dev->driver_data = NULL;
85533 ops->num_init_devices--;
85534 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
85535 index 4e0dd22..7a1f32c 100644
85536 --- a/sound/drivers/mts64.c
85537 +++ b/sound/drivers/mts64.c
85538 @@ -29,6 +29,7 @@
85539 #include <sound/initval.h>
85540 #include <sound/rawmidi.h>
85541 #include <sound/control.h>
85542 +#include <asm/local.h>
85543
85544 #define CARD_NAME "Miditerminal 4140"
85545 #define DRIVER_NAME "MTS64"
85546 @@ -67,7 +68,7 @@ struct mts64 {
85547 struct pardevice *pardev;
85548 int pardev_claimed;
85549
85550 - int open_count;
85551 + local_t open_count;
85552 int current_midi_output_port;
85553 int current_midi_input_port;
85554 u8 mode[MTS64_NUM_INPUT_PORTS];
85555 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
85556 {
85557 struct mts64 *mts = substream->rmidi->private_data;
85558
85559 - if (mts->open_count == 0) {
85560 + if (local_read(&mts->open_count) == 0) {
85561 /* We don't need a spinlock here, because this is just called
85562 if the device has not been opened before.
85563 So there aren't any IRQs from the device */
85564 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
85565
85566 msleep(50);
85567 }
85568 - ++(mts->open_count);
85569 + local_inc(&mts->open_count);
85570
85571 return 0;
85572 }
85573 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
85574 struct mts64 *mts = substream->rmidi->private_data;
85575 unsigned long flags;
85576
85577 - --(mts->open_count);
85578 - if (mts->open_count == 0) {
85579 + if (local_dec_return(&mts->open_count) == 0) {
85580 /* We need the spinlock_irqsave here because we can still
85581 have IRQs at this point */
85582 spin_lock_irqsave(&mts->lock, flags);
85583 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
85584
85585 msleep(500);
85586
85587 - } else if (mts->open_count < 0)
85588 - mts->open_count = 0;
85589 + } else if (local_read(&mts->open_count) < 0)
85590 + local_set(&mts->open_count, 0);
85591
85592 return 0;
85593 }
85594 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
85595 index b953fb4..1999c01 100644
85596 --- a/sound/drivers/opl4/opl4_lib.c
85597 +++ b/sound/drivers/opl4/opl4_lib.c
85598 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
85599 MODULE_DESCRIPTION("OPL4 driver");
85600 MODULE_LICENSE("GPL");
85601
85602 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
85603 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
85604 {
85605 int timeout = 10;
85606 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
85607 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
85608 index 991018d..8984740 100644
85609 --- a/sound/drivers/portman2x4.c
85610 +++ b/sound/drivers/portman2x4.c
85611 @@ -48,6 +48,7 @@
85612 #include <sound/initval.h>
85613 #include <sound/rawmidi.h>
85614 #include <sound/control.h>
85615 +#include <asm/local.h>
85616
85617 #define CARD_NAME "Portman 2x4"
85618 #define DRIVER_NAME "portman"
85619 @@ -85,7 +86,7 @@ struct portman {
85620 struct pardevice *pardev;
85621 int pardev_claimed;
85622
85623 - int open_count;
85624 + local_t open_count;
85625 int mode[PORTMAN_NUM_INPUT_PORTS];
85626 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
85627 };
85628 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
85629 index ea995af..f1bfa37 100644
85630 --- a/sound/firewire/amdtp.c
85631 +++ b/sound/firewire/amdtp.c
85632 @@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
85633 ptr = s->pcm_buffer_pointer + data_blocks;
85634 if (ptr >= pcm->runtime->buffer_size)
85635 ptr -= pcm->runtime->buffer_size;
85636 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
85637 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
85638
85639 s->pcm_period_pointer += data_blocks;
85640 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
85641 @@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
85642 */
85643 void amdtp_out_stream_update(struct amdtp_out_stream *s)
85644 {
85645 - ACCESS_ONCE(s->source_node_id_field) =
85646 + ACCESS_ONCE_RW(s->source_node_id_field) =
85647 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
85648 }
85649 EXPORT_SYMBOL(amdtp_out_stream_update);
85650 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
85651 index b680c5e..061b7a0 100644
85652 --- a/sound/firewire/amdtp.h
85653 +++ b/sound/firewire/amdtp.h
85654 @@ -139,7 +139,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
85655 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
85656 struct snd_pcm_substream *pcm)
85657 {
85658 - ACCESS_ONCE(s->pcm) = pcm;
85659 + ACCESS_ONCE_RW(s->pcm) = pcm;
85660 }
85661
85662 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
85663 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
85664 index d428ffe..751ef78 100644
85665 --- a/sound/firewire/isight.c
85666 +++ b/sound/firewire/isight.c
85667 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
85668 ptr += count;
85669 if (ptr >= runtime->buffer_size)
85670 ptr -= runtime->buffer_size;
85671 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
85672 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
85673
85674 isight->period_counter += count;
85675 if (isight->period_counter >= runtime->period_size) {
85676 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
85677 if (err < 0)
85678 return err;
85679
85680 - ACCESS_ONCE(isight->pcm_active) = true;
85681 + ACCESS_ONCE_RW(isight->pcm_active) = true;
85682
85683 return 0;
85684 }
85685 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
85686 {
85687 struct isight *isight = substream->private_data;
85688
85689 - ACCESS_ONCE(isight->pcm_active) = false;
85690 + ACCESS_ONCE_RW(isight->pcm_active) = false;
85691
85692 mutex_lock(&isight->mutex);
85693 isight_stop_streaming(isight);
85694 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
85695
85696 switch (cmd) {
85697 case SNDRV_PCM_TRIGGER_START:
85698 - ACCESS_ONCE(isight->pcm_running) = true;
85699 + ACCESS_ONCE_RW(isight->pcm_running) = true;
85700 break;
85701 case SNDRV_PCM_TRIGGER_STOP:
85702 - ACCESS_ONCE(isight->pcm_running) = false;
85703 + ACCESS_ONCE_RW(isight->pcm_running) = false;
85704 break;
85705 default:
85706 return -EINVAL;
85707 diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
85708 index 844a555..985ab83 100644
85709 --- a/sound/firewire/scs1x.c
85710 +++ b/sound/firewire/scs1x.c
85711 @@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
85712 {
85713 struct scs *scs = stream->rmidi->private_data;
85714
85715 - ACCESS_ONCE(scs->output) = up ? stream : NULL;
85716 + ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
85717 if (up) {
85718 scs->output_idle = false;
85719 tasklet_schedule(&scs->tasklet);
85720 @@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
85721 {
85722 struct scs *scs = stream->rmidi->private_data;
85723
85724 - ACCESS_ONCE(scs->input) = up ? stream : NULL;
85725 + ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
85726 }
85727
85728 static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
85729 @@ -457,8 +457,8 @@ static int scs_remove(struct device *dev)
85730
85731 snd_card_disconnect(scs->card);
85732
85733 - ACCESS_ONCE(scs->output) = NULL;
85734 - ACCESS_ONCE(scs->input) = NULL;
85735 + ACCESS_ONCE_RW(scs->output) = NULL;
85736 + ACCESS_ONCE_RW(scs->input) = NULL;
85737
85738 wait_event(scs->idle_wait, scs->output_idle);
85739
85740 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
85741 index 048439a..3be9f6f 100644
85742 --- a/sound/oss/sb_audio.c
85743 +++ b/sound/oss/sb_audio.c
85744 @@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
85745 buf16 = (signed short *)(localbuf + localoffs);
85746 while (c)
85747 {
85748 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85749 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85750 if (copy_from_user(lbuf8,
85751 userbuf+useroffs + p,
85752 locallen))
85753 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
85754 index 7d8803a..559f8d0 100644
85755 --- a/sound/oss/swarm_cs4297a.c
85756 +++ b/sound/oss/swarm_cs4297a.c
85757 @@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
85758 {
85759 struct cs4297a_state *s;
85760 u32 pwr, id;
85761 - mm_segment_t fs;
85762 int rval;
85763 #ifndef CONFIG_BCM_CS4297A_CSWARM
85764 u64 cfg;
85765 @@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
85766 if (!rval) {
85767 char *sb1250_duart_present;
85768
85769 +#if 0
85770 + mm_segment_t fs;
85771 fs = get_fs();
85772 set_fs(KERNEL_DS);
85773 -#if 0
85774 val = SOUND_MASK_LINE;
85775 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
85776 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
85777 val = initvol[i].vol;
85778 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
85779 }
85780 + set_fs(fs);
85781 // cs4297a_write_ac97(s, 0x18, 0x0808);
85782 #else
85783 // cs4297a_write_ac97(s, 0x5e, 0x180);
85784 cs4297a_write_ac97(s, 0x02, 0x0808);
85785 cs4297a_write_ac97(s, 0x18, 0x0808);
85786 #endif
85787 - set_fs(fs);
85788
85789 list_add(&s->list, &cs4297a_devs);
85790
85791 diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
85792 index 4631a23..001ae57 100644
85793 --- a/sound/pci/ymfpci/ymfpci.h
85794 +++ b/sound/pci/ymfpci/ymfpci.h
85795 @@ -358,7 +358,7 @@ struct snd_ymfpci {
85796 spinlock_t reg_lock;
85797 spinlock_t voice_lock;
85798 wait_queue_head_t interrupt_sleep;
85799 - atomic_t interrupt_sleep_count;
85800 + atomic_unchecked_t interrupt_sleep_count;
85801 struct snd_info_entry *proc_entry;
85802 const struct firmware *dsp_microcode;
85803 const struct firmware *controller_microcode;
85804 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
85805 index 22056c5..25d3244 100644
85806 --- a/sound/pci/ymfpci/ymfpci_main.c
85807 +++ b/sound/pci/ymfpci/ymfpci_main.c
85808 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
85809 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
85810 break;
85811 }
85812 - if (atomic_read(&chip->interrupt_sleep_count)) {
85813 - atomic_set(&chip->interrupt_sleep_count, 0);
85814 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
85815 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85816 wake_up(&chip->interrupt_sleep);
85817 }
85818 __end:
85819 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
85820 continue;
85821 init_waitqueue_entry(&wait, current);
85822 add_wait_queue(&chip->interrupt_sleep, &wait);
85823 - atomic_inc(&chip->interrupt_sleep_count);
85824 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
85825 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
85826 remove_wait_queue(&chip->interrupt_sleep, &wait);
85827 }
85828 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
85829 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
85830 spin_unlock(&chip->reg_lock);
85831
85832 - if (atomic_read(&chip->interrupt_sleep_count)) {
85833 - atomic_set(&chip->interrupt_sleep_count, 0);
85834 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
85835 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85836 wake_up(&chip->interrupt_sleep);
85837 }
85838 }
85839 @@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card,
85840 spin_lock_init(&chip->reg_lock);
85841 spin_lock_init(&chip->voice_lock);
85842 init_waitqueue_head(&chip->interrupt_sleep);
85843 - atomic_set(&chip->interrupt_sleep_count, 0);
85844 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85845 chip->card = card;
85846 chip->pci = pci;
85847 chip->irq = -1;
85848 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
85849 new file mode 100644
85850 index 0000000..50f2f2f
85851 --- /dev/null
85852 +++ b/tools/gcc/.gitignore
85853 @@ -0,0 +1 @@
85854 +size_overflow_hash.h
85855 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
85856 new file mode 100644
85857 index 0000000..1d09b7e
85858 --- /dev/null
85859 +++ b/tools/gcc/Makefile
85860 @@ -0,0 +1,43 @@
85861 +#CC := gcc
85862 +#PLUGIN_SOURCE_FILES := pax_plugin.c
85863 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
85864 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
85865 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
85866 +
85867 +ifeq ($(PLUGINCC),$(HOSTCC))
85868 +HOSTLIBS := hostlibs
85869 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
85870 +else
85871 +HOSTLIBS := hostcxxlibs
85872 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
85873 +endif
85874 +
85875 +$(HOSTLIBS)-y := constify_plugin.so
85876 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
85877 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
85878 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
85879 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
85880 +$(HOSTLIBS)-y += colorize_plugin.so
85881 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
85882 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
85883 +
85884 +always := $($(HOSTLIBS)-y)
85885 +
85886 +constify_plugin-objs := constify_plugin.o
85887 +stackleak_plugin-objs := stackleak_plugin.o
85888 +kallocstat_plugin-objs := kallocstat_plugin.o
85889 +kernexec_plugin-objs := kernexec_plugin.o
85890 +checker_plugin-objs := checker_plugin.o
85891 +colorize_plugin-objs := colorize_plugin.o
85892 +size_overflow_plugin-objs := size_overflow_plugin.o
85893 +latent_entropy_plugin-objs := latent_entropy_plugin.o
85894 +
85895 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
85896 +
85897 +quiet_cmd_build_size_overflow_hash = GENHASH $@
85898 + cmd_build_size_overflow_hash = \
85899 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
85900 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
85901 + $(call if_changed,build_size_overflow_hash)
85902 +
85903 +targets += size_overflow_hash.h
85904 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
85905 new file mode 100644
85906 index 0000000..d41b5af
85907 --- /dev/null
85908 +++ b/tools/gcc/checker_plugin.c
85909 @@ -0,0 +1,171 @@
85910 +/*
85911 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85912 + * Licensed under the GPL v2
85913 + *
85914 + * Note: the choice of the license means that the compilation process is
85915 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85916 + * but for the kernel it doesn't matter since it doesn't link against
85917 + * any of the gcc libraries
85918 + *
85919 + * gcc plugin to implement various sparse (source code checker) features
85920 + *
85921 + * TODO:
85922 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
85923 + *
85924 + * BUGS:
85925 + * - none known
85926 + */
85927 +#include "gcc-plugin.h"
85928 +#include "config.h"
85929 +#include "system.h"
85930 +#include "coretypes.h"
85931 +#include "tree.h"
85932 +#include "tree-pass.h"
85933 +#include "flags.h"
85934 +#include "intl.h"
85935 +#include "toplev.h"
85936 +#include "plugin.h"
85937 +//#include "expr.h" where are you...
85938 +#include "diagnostic.h"
85939 +#include "plugin-version.h"
85940 +#include "tm.h"
85941 +#include "function.h"
85942 +#include "basic-block.h"
85943 +#include "gimple.h"
85944 +#include "rtl.h"
85945 +#include "emit-rtl.h"
85946 +#include "tree-flow.h"
85947 +#include "target.h"
85948 +
85949 +extern void c_register_addr_space (const char *str, addr_space_t as);
85950 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
85951 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
85952 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
85953 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
85954 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
85955 +
85956 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85957 +extern rtx emit_move_insn(rtx x, rtx y);
85958 +
85959 +int plugin_is_GPL_compatible;
85960 +
85961 +static struct plugin_info checker_plugin_info = {
85962 + .version = "201111150100",
85963 +};
85964 +
85965 +#define ADDR_SPACE_KERNEL 0
85966 +#define ADDR_SPACE_FORCE_KERNEL 1
85967 +#define ADDR_SPACE_USER 2
85968 +#define ADDR_SPACE_FORCE_USER 3
85969 +#define ADDR_SPACE_IOMEM 0
85970 +#define ADDR_SPACE_FORCE_IOMEM 0
85971 +#define ADDR_SPACE_PERCPU 0
85972 +#define ADDR_SPACE_FORCE_PERCPU 0
85973 +#define ADDR_SPACE_RCU 0
85974 +#define ADDR_SPACE_FORCE_RCU 0
85975 +
85976 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
85977 +{
85978 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
85979 +}
85980 +
85981 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
85982 +{
85983 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
85984 +}
85985 +
85986 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
85987 +{
85988 + return default_addr_space_valid_pointer_mode(mode, as);
85989 +}
85990 +
85991 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
85992 +{
85993 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
85994 +}
85995 +
85996 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
85997 +{
85998 + return default_addr_space_legitimize_address(x, oldx, mode, as);
85999 +}
86000 +
86001 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
86002 +{
86003 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
86004 + return true;
86005 +
86006 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
86007 + return true;
86008 +
86009 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
86010 + return true;
86011 +
86012 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
86013 + return true;
86014 +
86015 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
86016 + return true;
86017 +
86018 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
86019 + return true;
86020 +
86021 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
86022 + return true;
86023 +
86024 + return subset == superset;
86025 +}
86026 +
86027 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
86028 +{
86029 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
86030 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
86031 +
86032 + return op;
86033 +}
86034 +
86035 +static void register_checker_address_spaces(void *event_data, void *data)
86036 +{
86037 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
86038 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
86039 + c_register_addr_space("__user", ADDR_SPACE_USER);
86040 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
86041 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
86042 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
86043 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
86044 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
86045 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
86046 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
86047 +
86048 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
86049 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
86050 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
86051 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
86052 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
86053 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
86054 + targetm.addr_space.convert = checker_addr_space_convert;
86055 +}
86056 +
86057 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86058 +{
86059 + const char * const plugin_name = plugin_info->base_name;
86060 + const int argc = plugin_info->argc;
86061 + const struct plugin_argument * const argv = plugin_info->argv;
86062 + int i;
86063 +
86064 + if (!plugin_default_version_check(version, &gcc_version)) {
86065 + error(G_("incompatible gcc/plugin versions"));
86066 + return 1;
86067 + }
86068 +
86069 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
86070 +
86071 + for (i = 0; i < argc; ++i)
86072 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86073 +
86074 + if (TARGET_64BIT == 0)
86075 + return 0;
86076 +
86077 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
86078 +
86079 + return 0;
86080 +}
86081 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
86082 new file mode 100644
86083 index 0000000..414fe5e
86084 --- /dev/null
86085 +++ b/tools/gcc/colorize_plugin.c
86086 @@ -0,0 +1,151 @@
86087 +/*
86088 + * Copyright 2012-2013 by PaX Team <pageexec@freemail.hu>
86089 + * Licensed under the GPL v2
86090 + *
86091 + * Note: the choice of the license means that the compilation process is
86092 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86093 + * but for the kernel it doesn't matter since it doesn't link against
86094 + * any of the gcc libraries
86095 + *
86096 + * gcc plugin to colorize diagnostic output
86097 + *
86098 + */
86099 +
86100 +#include "gcc-plugin.h"
86101 +#include "config.h"
86102 +#include "system.h"
86103 +#include "coretypes.h"
86104 +#include "tree.h"
86105 +#include "tree-pass.h"
86106 +#include "flags.h"
86107 +#include "intl.h"
86108 +#include "toplev.h"
86109 +#include "plugin.h"
86110 +#include "diagnostic.h"
86111 +#include "plugin-version.h"
86112 +#include "tm.h"
86113 +
86114 +int plugin_is_GPL_compatible;
86115 +
86116 +static struct plugin_info colorize_plugin_info = {
86117 + .version = "201302112000",
86118 + .help = NULL,
86119 +};
86120 +
86121 +#define GREEN "\033[32m\033[2m"
86122 +#define LIGHTGREEN "\033[32m\033[1m"
86123 +#define YELLOW "\033[33m\033[2m"
86124 +#define LIGHTYELLOW "\033[33m\033[1m"
86125 +#define RED "\033[31m\033[2m"
86126 +#define LIGHTRED "\033[31m\033[1m"
86127 +#define BLUE "\033[34m\033[2m"
86128 +#define LIGHTBLUE "\033[34m\033[1m"
86129 +#define BRIGHT "\033[m\033[1m"
86130 +#define NORMAL "\033[m"
86131 +
86132 +static diagnostic_starter_fn old_starter;
86133 +static diagnostic_finalizer_fn old_finalizer;
86134 +
86135 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
86136 +{
86137 + const char *color;
86138 + char *newprefix;
86139 +
86140 + switch (diagnostic->kind) {
86141 + case DK_NOTE:
86142 + color = LIGHTBLUE;
86143 + break;
86144 +
86145 + case DK_PEDWARN:
86146 + case DK_WARNING:
86147 + color = LIGHTYELLOW;
86148 + break;
86149 +
86150 + case DK_ERROR:
86151 + case DK_FATAL:
86152 + case DK_ICE:
86153 + case DK_PERMERROR:
86154 + case DK_SORRY:
86155 + color = LIGHTRED;
86156 + break;
86157 +
86158 + default:
86159 + color = NORMAL;
86160 + }
86161 +
86162 + old_starter(context, diagnostic);
86163 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
86164 + return;
86165 + pp_destroy_prefix(context->printer);
86166 + pp_set_prefix(context->printer, newprefix);
86167 +}
86168 +
86169 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
86170 +{
86171 + old_finalizer(context, diagnostic);
86172 +}
86173 +
86174 +static void colorize_arm(void)
86175 +{
86176 + old_starter = diagnostic_starter(global_dc);
86177 + old_finalizer = diagnostic_finalizer(global_dc);
86178 +
86179 + diagnostic_starter(global_dc) = start_colorize;
86180 + diagnostic_finalizer(global_dc) = finalize_colorize;
86181 +}
86182 +
86183 +static unsigned int execute_colorize_rearm(void)
86184 +{
86185 + if (diagnostic_starter(global_dc) == start_colorize)
86186 + return 0;
86187 +
86188 + colorize_arm();
86189 + return 0;
86190 +}
86191 +
86192 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
86193 + .pass = {
86194 + .type = SIMPLE_IPA_PASS,
86195 + .name = "colorize_rearm",
86196 +#if BUILDING_GCC_VERSION >= 4008
86197 + .optinfo_flags = OPTGROUP_NONE,
86198 +#endif
86199 + .gate = NULL,
86200 + .execute = execute_colorize_rearm,
86201 + .sub = NULL,
86202 + .next = NULL,
86203 + .static_pass_number = 0,
86204 + .tv_id = TV_NONE,
86205 + .properties_required = 0,
86206 + .properties_provided = 0,
86207 + .properties_destroyed = 0,
86208 + .todo_flags_start = 0,
86209 + .todo_flags_finish = 0
86210 + }
86211 +};
86212 +
86213 +static void colorize_start_unit(void *gcc_data, void *user_data)
86214 +{
86215 + colorize_arm();
86216 +}
86217 +
86218 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86219 +{
86220 + const char * const plugin_name = plugin_info->base_name;
86221 + struct register_pass_info colorize_rearm_pass_info = {
86222 + .pass = &pass_ipa_colorize_rearm.pass,
86223 + .reference_pass_name = "*free_lang_data",
86224 + .ref_pass_instance_number = 1,
86225 + .pos_op = PASS_POS_INSERT_AFTER
86226 + };
86227 +
86228 + if (!plugin_default_version_check(version, &gcc_version)) {
86229 + error(G_("incompatible gcc/plugin versions"));
86230 + return 1;
86231 + }
86232 +
86233 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
86234 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
86235 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
86236 + return 0;
86237 +}
86238 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
86239 new file mode 100644
86240 index 0000000..8bd6f995
86241 --- /dev/null
86242 +++ b/tools/gcc/constify_plugin.c
86243 @@ -0,0 +1,359 @@
86244 +/*
86245 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
86246 + * Copyright 2011-2013 by PaX Team <pageexec@freemail.hu>
86247 + * Licensed under the GPL v2, or (at your option) v3
86248 + *
86249 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
86250 + *
86251 + * Homepage:
86252 + * http://www.grsecurity.net/~ephox/const_plugin/
86253 + *
86254 + * Usage:
86255 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
86256 + * $ gcc -fplugin=constify_plugin.so test.c -O2
86257 + */
86258 +
86259 +#include "gcc-plugin.h"
86260 +#include "config.h"
86261 +#include "system.h"
86262 +#include "coretypes.h"
86263 +#include "tree.h"
86264 +#include "tree-pass.h"
86265 +#include "flags.h"
86266 +#include "intl.h"
86267 +#include "toplev.h"
86268 +#include "plugin.h"
86269 +#include "diagnostic.h"
86270 +#include "plugin-version.h"
86271 +#include "tm.h"
86272 +#include "function.h"
86273 +#include "basic-block.h"
86274 +#include "gimple.h"
86275 +#include "rtl.h"
86276 +#include "emit-rtl.h"
86277 +#include "tree-flow.h"
86278 +
86279 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
86280 +
86281 +int plugin_is_GPL_compatible;
86282 +
86283 +static struct plugin_info const_plugin_info = {
86284 + .version = "201302112000",
86285 + .help = "no-constify\tturn off constification\n",
86286 +};
86287 +
86288 +static tree get_field_type(tree field)
86289 +{
86290 + return strip_array_types(TREE_TYPE(field));
86291 +}
86292 +
86293 +static bool walk_struct(tree node, bool all);
86294 +static void deconstify_tree(tree node);
86295 +
86296 +static void deconstify_type(tree type)
86297 +{
86298 + tree field;
86299 +
86300 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
86301 + tree fieldtype = get_field_type(field);
86302 +
86303 + if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
86304 + continue;
86305 + if (!TYPE_READONLY(fieldtype))
86306 + continue;
86307 + if (!walk_struct(fieldtype, true))
86308 + continue;
86309 +
86310 + deconstify_tree(field);
86311 + TREE_READONLY(field) = 0;
86312 + }
86313 + TYPE_READONLY(type) = 0;
86314 + C_TYPE_FIELDS_READONLY(type) = 0;
86315 +}
86316 +
86317 +static void deconstify_tree(tree node)
86318 +{
86319 + tree old_type, new_type, field;
86320 +
86321 +// TREE_READONLY(node) = 0;
86322 + old_type = TREE_TYPE(node);
86323 + while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
86324 + node = old_type;
86325 + old_type = TREE_TYPE(old_type);
86326 + }
86327 +
86328 + gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
86329 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
86330 +
86331 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
86332 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
86333 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
86334 + DECL_FIELD_CONTEXT(field) = new_type;
86335 +
86336 + deconstify_type(new_type);
86337 +
86338 + TREE_TYPE(node) = new_type;
86339 +}
86340 +
86341 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86342 +{
86343 + tree type;
86344 +
86345 + *no_add_attrs = true;
86346 + if (TREE_CODE(*node) == FUNCTION_DECL) {
86347 + error("%qE attribute does not apply to functions", name);
86348 + return NULL_TREE;
86349 + }
86350 +
86351 + if (TREE_CODE(*node) == VAR_DECL) {
86352 + error("%qE attribute does not apply to variables", name);
86353 + return NULL_TREE;
86354 + }
86355 +
86356 + if (TYPE_P(*node)) {
86357 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
86358 + *no_add_attrs = false;
86359 + else
86360 + error("%qE attribute applies to struct and union types only", name);
86361 + return NULL_TREE;
86362 + }
86363 +
86364 + type = TREE_TYPE(*node);
86365 +
86366 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
86367 + error("%qE attribute applies to struct and union types only", name);
86368 + return NULL_TREE;
86369 + }
86370 +
86371 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
86372 + error("%qE attribute is already applied to the type", name);
86373 + return NULL_TREE;
86374 + }
86375 +
86376 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
86377 + error("%qE attribute used on type that is not constified", name);
86378 + return NULL_TREE;
86379 + }
86380 +
86381 + if (TREE_CODE(*node) == TYPE_DECL) {
86382 + deconstify_tree(*node);
86383 + return NULL_TREE;
86384 + }
86385 +
86386 + return NULL_TREE;
86387 +}
86388 +
86389 +static void constify_type(tree type)
86390 +{
86391 + TYPE_READONLY(type) = 1;
86392 + C_TYPE_FIELDS_READONLY(type) = 1;
86393 +}
86394 +
86395 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86396 +{
86397 + *no_add_attrs = true;
86398 + if (!TYPE_P(*node)) {
86399 + error("%qE attribute applies to types only", name);
86400 + return NULL_TREE;
86401 + }
86402 +
86403 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
86404 + error("%qE attribute applies to struct and union types only", name);
86405 + return NULL_TREE;
86406 + }
86407 +
86408 + *no_add_attrs = false;
86409 + constify_type(*node);
86410 + return NULL_TREE;
86411 +}
86412 +
86413 +static struct attribute_spec no_const_attr = {
86414 + .name = "no_const",
86415 + .min_length = 0,
86416 + .max_length = 0,
86417 + .decl_required = false,
86418 + .type_required = false,
86419 + .function_type_required = false,
86420 + .handler = handle_no_const_attribute,
86421 +#if BUILDING_GCC_VERSION >= 4007
86422 + .affects_type_identity = true
86423 +#endif
86424 +};
86425 +
86426 +static struct attribute_spec do_const_attr = {
86427 + .name = "do_const",
86428 + .min_length = 0,
86429 + .max_length = 0,
86430 + .decl_required = false,
86431 + .type_required = false,
86432 + .function_type_required = false,
86433 + .handler = handle_do_const_attribute,
86434 +#if BUILDING_GCC_VERSION >= 4007
86435 + .affects_type_identity = true
86436 +#endif
86437 +};
86438 +
86439 +static void register_attributes(void *event_data, void *data)
86440 +{
86441 + register_attribute(&no_const_attr);
86442 + register_attribute(&do_const_attr);
86443 +}
86444 +
86445 +static bool is_fptr(tree field)
86446 +{
86447 + tree ptr = get_field_type(field);
86448 +
86449 + if (TREE_CODE(ptr) != POINTER_TYPE)
86450 + return false;
86451 +
86452 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
86453 +}
86454 +
86455 +static bool walk_struct(tree node, bool all)
86456 +{
86457 + tree field;
86458 +
86459 + if (TYPE_FIELDS(node) == NULL_TREE)
86460 + return false;
86461 +
86462 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node)))
86463 + return true;
86464 +
86465 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
86466 + gcc_assert(!TYPE_READONLY(node));
86467 + deconstify_type(node);
86468 + return false;
86469 + }
86470 +
86471 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
86472 + tree type = get_field_type(field);
86473 + enum tree_code code = TREE_CODE(type);
86474 +
86475 + if (node == type)
86476 + return false;
86477 + if (code == RECORD_TYPE || code == UNION_TYPE) {
86478 + if (!(walk_struct(type, all)))
86479 + return false;
86480 + } else if (!is_fptr(field) && (!all || !TREE_READONLY(field)))
86481 + return false;
86482 + }
86483 + return true;
86484 +}
86485 +
86486 +static void finish_type(void *event_data, void *data)
86487 +{
86488 + tree type = (tree)event_data;
86489 +
86490 + if (type == NULL_TREE || type == error_mark_node)
86491 + return;
86492 +
86493 + if (TYPE_READONLY(type))
86494 + return;
86495 +
86496 + if (walk_struct(type, true))
86497 + constify_type(type);
86498 + else
86499 + deconstify_type(type);
86500 +}
86501 +
86502 +static unsigned int check_local_variables(void)
86503 +{
86504 + unsigned int ret = 0;
86505 + tree var;
86506 +
86507 +#if BUILDING_GCC_VERSION == 4005
86508 + tree vars;
86509 +#else
86510 + unsigned int i;
86511 +#endif
86512 +
86513 +#if BUILDING_GCC_VERSION == 4005
86514 + for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
86515 + var = TREE_VALUE(vars);
86516 +#else
86517 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
86518 +#endif
86519 + tree type = TREE_TYPE(var);
86520 +
86521 + gcc_assert(DECL_P(var));
86522 + if (is_global_var(var))
86523 + continue;
86524 +
86525 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
86526 + continue;
86527 +
86528 + if (!TYPE_READONLY(type))
86529 + continue;
86530 +
86531 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
86532 +// continue;
86533 +
86534 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
86535 + continue;
86536 +
86537 + if (walk_struct(type, false)) {
86538 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
86539 + ret = 1;
86540 + }
86541 + }
86542 + return ret;
86543 +}
86544 +
86545 +struct gimple_opt_pass pass_local_variable = {
86546 + {
86547 + .type = GIMPLE_PASS,
86548 + .name = "check_local_variables",
86549 +#if BUILDING_GCC_VERSION >= 4008
86550 + .optinfo_flags = OPTGROUP_NONE,
86551 +#endif
86552 + .gate = NULL,
86553 + .execute = check_local_variables,
86554 + .sub = NULL,
86555 + .next = NULL,
86556 + .static_pass_number = 0,
86557 + .tv_id = TV_NONE,
86558 + .properties_required = 0,
86559 + .properties_provided = 0,
86560 + .properties_destroyed = 0,
86561 + .todo_flags_start = 0,
86562 + .todo_flags_finish = 0
86563 + }
86564 +};
86565 +
86566 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86567 +{
86568 + const char * const plugin_name = plugin_info->base_name;
86569 + const int argc = plugin_info->argc;
86570 + const struct plugin_argument * const argv = plugin_info->argv;
86571 + int i;
86572 + bool constify = true;
86573 +
86574 + struct register_pass_info local_variable_pass_info = {
86575 + .pass = &pass_local_variable.pass,
86576 + .reference_pass_name = "ssa",
86577 + .ref_pass_instance_number = 1,
86578 + .pos_op = PASS_POS_INSERT_BEFORE
86579 + };
86580 +
86581 + if (!plugin_default_version_check(version, &gcc_version)) {
86582 + error(G_("incompatible gcc/plugin versions"));
86583 + return 1;
86584 + }
86585 +
86586 + for (i = 0; i < argc; ++i) {
86587 + if (!(strcmp(argv[i].key, "no-constify"))) {
86588 + constify = false;
86589 + continue;
86590 + }
86591 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86592 + }
86593 +
86594 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
86595 + if (constify) {
86596 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
86597 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
86598 + }
86599 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
86600 +
86601 + return 0;
86602 +}
86603 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
86604 new file mode 100644
86605 index 0000000..e518932
86606 --- /dev/null
86607 +++ b/tools/gcc/generate_size_overflow_hash.sh
86608 @@ -0,0 +1,94 @@
86609 +#!/bin/bash
86610 +
86611 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
86612 +
86613 +header1="size_overflow_hash.h"
86614 +database="size_overflow_hash.data"
86615 +n=65536
86616 +
86617 +usage() {
86618 +cat <<EOF
86619 +usage: $0 options
86620 +OPTIONS:
86621 + -h|--help help
86622 + -o header file
86623 + -d database file
86624 + -n hash array size
86625 +EOF
86626 + return 0
86627 +}
86628 +
86629 +while true
86630 +do
86631 + case "$1" in
86632 + -h|--help) usage && exit 0;;
86633 + -n) n=$2; shift 2;;
86634 + -o) header1="$2"; shift 2;;
86635 + -d) database="$2"; shift 2;;
86636 + --) shift 1; break ;;
86637 + *) break ;;
86638 + esac
86639 +done
86640 +
86641 +create_defines() {
86642 + for i in `seq 0 31`
86643 + do
86644 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
86645 + done
86646 + echo >> "$header1"
86647 +}
86648 +
86649 +create_structs() {
86650 + rm -f "$header1"
86651 +
86652 + create_defines
86653 +
86654 + cat "$database" | while read data
86655 + do
86656 + data_array=($data)
86657 + struct_hash_name="${data_array[0]}"
86658 + funcn="${data_array[1]}"
86659 + params="${data_array[2]}"
86660 + next="${data_array[4]}"
86661 +
86662 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
86663 +
86664 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
86665 + echo -en "\t.param\t= " >> "$header1"
86666 + line=
86667 + for param_num in ${params//-/ };
86668 + do
86669 + line="${line}PARAM"$param_num"|"
86670 + done
86671 +
86672 + echo -e "${line%?},\n};\n" >> "$header1"
86673 + done
86674 +}
86675 +
86676 +create_headers() {
86677 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
86678 +}
86679 +
86680 +create_array_elements() {
86681 + index=0
86682 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
86683 + do
86684 + data_array=($data)
86685 + i="${data_array[3]}"
86686 + hash="${data_array[0]}"
86687 + while [[ $index -lt $i ]]
86688 + do
86689 + echo -e "\t["$index"]\t= NULL," >> "$header1"
86690 + index=$(($index + 1))
86691 + done
86692 + index=$(($index + 1))
86693 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
86694 + done
86695 + echo '};' >> $header1
86696 +}
86697 +
86698 +create_structs
86699 +create_headers
86700 +create_array_elements
86701 +
86702 +exit 0
86703 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
86704 new file mode 100644
86705 index 0000000..568b360
86706 --- /dev/null
86707 +++ b/tools/gcc/kallocstat_plugin.c
86708 @@ -0,0 +1,170 @@
86709 +/*
86710 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
86711 + * Licensed under the GPL v2
86712 + *
86713 + * Note: the choice of the license means that the compilation process is
86714 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86715 + * but for the kernel it doesn't matter since it doesn't link against
86716 + * any of the gcc libraries
86717 + *
86718 + * gcc plugin to find the distribution of k*alloc sizes
86719 + *
86720 + * TODO:
86721 + *
86722 + * BUGS:
86723 + * - none known
86724 + */
86725 +#include "gcc-plugin.h"
86726 +#include "config.h"
86727 +#include "system.h"
86728 +#include "coretypes.h"
86729 +#include "tree.h"
86730 +#include "tree-pass.h"
86731 +#include "flags.h"
86732 +#include "intl.h"
86733 +#include "toplev.h"
86734 +#include "plugin.h"
86735 +//#include "expr.h" where are you...
86736 +#include "diagnostic.h"
86737 +#include "plugin-version.h"
86738 +#include "tm.h"
86739 +#include "function.h"
86740 +#include "basic-block.h"
86741 +#include "gimple.h"
86742 +#include "rtl.h"
86743 +#include "emit-rtl.h"
86744 +
86745 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86746 +
86747 +int plugin_is_GPL_compatible;
86748 +
86749 +static const char * const kalloc_functions[] = {
86750 + "__kmalloc",
86751 + "kmalloc",
86752 + "kmalloc_large",
86753 + "kmalloc_node",
86754 + "kmalloc_order",
86755 + "kmalloc_order_trace",
86756 + "kmalloc_slab",
86757 + "kzalloc",
86758 + "kzalloc_node",
86759 +};
86760 +
86761 +static struct plugin_info kallocstat_plugin_info = {
86762 + .version = "201302112000",
86763 +};
86764 +
86765 +static unsigned int execute_kallocstat(void);
86766 +
86767 +static struct gimple_opt_pass kallocstat_pass = {
86768 + .pass = {
86769 + .type = GIMPLE_PASS,
86770 + .name = "kallocstat",
86771 +#if BUILDING_GCC_VERSION >= 4008
86772 + .optinfo_flags = OPTGROUP_NONE,
86773 +#endif
86774 + .gate = NULL,
86775 + .execute = execute_kallocstat,
86776 + .sub = NULL,
86777 + .next = NULL,
86778 + .static_pass_number = 0,
86779 + .tv_id = TV_NONE,
86780 + .properties_required = 0,
86781 + .properties_provided = 0,
86782 + .properties_destroyed = 0,
86783 + .todo_flags_start = 0,
86784 + .todo_flags_finish = 0
86785 + }
86786 +};
86787 +
86788 +static bool is_kalloc(const char *fnname)
86789 +{
86790 + size_t i;
86791 +
86792 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
86793 + if (!strcmp(fnname, kalloc_functions[i]))
86794 + return true;
86795 + return false;
86796 +}
86797 +
86798 +static unsigned int execute_kallocstat(void)
86799 +{
86800 + basic_block bb;
86801 +
86802 + // 1. loop through BBs and GIMPLE statements
86803 + FOR_EACH_BB(bb) {
86804 + gimple_stmt_iterator gsi;
86805 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86806 + // gimple match:
86807 + tree fndecl, size;
86808 + gimple call_stmt;
86809 + const char *fnname;
86810 +
86811 + // is it a call
86812 + call_stmt = gsi_stmt(gsi);
86813 + if (!is_gimple_call(call_stmt))
86814 + continue;
86815 + fndecl = gimple_call_fndecl(call_stmt);
86816 + if (fndecl == NULL_TREE)
86817 + continue;
86818 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
86819 + continue;
86820 +
86821 + // is it a call to k*alloc
86822 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
86823 + if (!is_kalloc(fnname))
86824 + continue;
86825 +
86826 + // is the size arg the result of a simple const assignment
86827 + size = gimple_call_arg(call_stmt, 0);
86828 + while (true) {
86829 + gimple def_stmt;
86830 + expanded_location xloc;
86831 + size_t size_val;
86832 +
86833 + if (TREE_CODE(size) != SSA_NAME)
86834 + break;
86835 + def_stmt = SSA_NAME_DEF_STMT(size);
86836 + if (!def_stmt || !is_gimple_assign(def_stmt))
86837 + break;
86838 + if (gimple_num_ops(def_stmt) != 2)
86839 + break;
86840 + size = gimple_assign_rhs1(def_stmt);
86841 + if (!TREE_CONSTANT(size))
86842 + continue;
86843 + xloc = expand_location(gimple_location(def_stmt));
86844 + if (!xloc.file)
86845 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
86846 + size_val = TREE_INT_CST_LOW(size);
86847 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
86848 + break;
86849 + }
86850 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
86851 +//debug_tree(gimple_call_fn(call_stmt));
86852 +//print_node(stderr, "pax", fndecl, 4);
86853 + }
86854 + }
86855 +
86856 + return 0;
86857 +}
86858 +
86859 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86860 +{
86861 + const char * const plugin_name = plugin_info->base_name;
86862 + struct register_pass_info kallocstat_pass_info = {
86863 + .pass = &kallocstat_pass.pass,
86864 + .reference_pass_name = "ssa",
86865 + .ref_pass_instance_number = 1,
86866 + .pos_op = PASS_POS_INSERT_AFTER
86867 + };
86868 +
86869 + if (!plugin_default_version_check(version, &gcc_version)) {
86870 + error(G_("incompatible gcc/plugin versions"));
86871 + return 1;
86872 + }
86873 +
86874 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
86875 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
86876 +
86877 + return 0;
86878 +}
86879 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
86880 new file mode 100644
86881 index 0000000..0408e06
86882 --- /dev/null
86883 +++ b/tools/gcc/kernexec_plugin.c
86884 @@ -0,0 +1,465 @@
86885 +/*
86886 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
86887 + * Licensed under the GPL v2
86888 + *
86889 + * Note: the choice of the license means that the compilation process is
86890 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86891 + * but for the kernel it doesn't matter since it doesn't link against
86892 + * any of the gcc libraries
86893 + *
86894 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
86895 + *
86896 + * TODO:
86897 + *
86898 + * BUGS:
86899 + * - none known
86900 + */
86901 +#include "gcc-plugin.h"
86902 +#include "config.h"
86903 +#include "system.h"
86904 +#include "coretypes.h"
86905 +#include "tree.h"
86906 +#include "tree-pass.h"
86907 +#include "flags.h"
86908 +#include "intl.h"
86909 +#include "toplev.h"
86910 +#include "plugin.h"
86911 +//#include "expr.h" where are you...
86912 +#include "diagnostic.h"
86913 +#include "plugin-version.h"
86914 +#include "tm.h"
86915 +#include "function.h"
86916 +#include "basic-block.h"
86917 +#include "gimple.h"
86918 +#include "rtl.h"
86919 +#include "emit-rtl.h"
86920 +#include "tree-flow.h"
86921 +
86922 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86923 +extern rtx emit_move_insn(rtx x, rtx y);
86924 +
86925 +#if BUILDING_GCC_VERSION <= 4006
86926 +#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
86927 +#endif
86928 +
86929 +#if BUILDING_GCC_VERSION >= 4008
86930 +#define TODO_dump_func 0
86931 +#endif
86932 +
86933 +int plugin_is_GPL_compatible;
86934 +
86935 +static struct plugin_info kernexec_plugin_info = {
86936 + .version = "201302112000",
86937 + .help = "method=[bts|or]\tinstrumentation method\n"
86938 +};
86939 +
86940 +static unsigned int execute_kernexec_reload(void);
86941 +static unsigned int execute_kernexec_fptr(void);
86942 +static unsigned int execute_kernexec_retaddr(void);
86943 +static bool kernexec_cmodel_check(void);
86944 +
86945 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
86946 +static void (*kernexec_instrument_retaddr)(rtx);
86947 +
86948 +static struct gimple_opt_pass kernexec_reload_pass = {
86949 + .pass = {
86950 + .type = GIMPLE_PASS,
86951 + .name = "kernexec_reload",
86952 +#if BUILDING_GCC_VERSION >= 4008
86953 + .optinfo_flags = OPTGROUP_NONE,
86954 +#endif
86955 + .gate = kernexec_cmodel_check,
86956 + .execute = execute_kernexec_reload,
86957 + .sub = NULL,
86958 + .next = NULL,
86959 + .static_pass_number = 0,
86960 + .tv_id = TV_NONE,
86961 + .properties_required = 0,
86962 + .properties_provided = 0,
86963 + .properties_destroyed = 0,
86964 + .todo_flags_start = 0,
86965 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
86966 + }
86967 +};
86968 +
86969 +static struct gimple_opt_pass kernexec_fptr_pass = {
86970 + .pass = {
86971 + .type = GIMPLE_PASS,
86972 + .name = "kernexec_fptr",
86973 +#if BUILDING_GCC_VERSION >= 4008
86974 + .optinfo_flags = OPTGROUP_NONE,
86975 +#endif
86976 + .gate = kernexec_cmodel_check,
86977 + .execute = execute_kernexec_fptr,
86978 + .sub = NULL,
86979 + .next = NULL,
86980 + .static_pass_number = 0,
86981 + .tv_id = TV_NONE,
86982 + .properties_required = 0,
86983 + .properties_provided = 0,
86984 + .properties_destroyed = 0,
86985 + .todo_flags_start = 0,
86986 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
86987 + }
86988 +};
86989 +
86990 +static struct rtl_opt_pass kernexec_retaddr_pass = {
86991 + .pass = {
86992 + .type = RTL_PASS,
86993 + .name = "kernexec_retaddr",
86994 +#if BUILDING_GCC_VERSION >= 4008
86995 + .optinfo_flags = OPTGROUP_NONE,
86996 +#endif
86997 + .gate = kernexec_cmodel_check,
86998 + .execute = execute_kernexec_retaddr,
86999 + .sub = NULL,
87000 + .next = NULL,
87001 + .static_pass_number = 0,
87002 + .tv_id = TV_NONE,
87003 + .properties_required = 0,
87004 + .properties_provided = 0,
87005 + .properties_destroyed = 0,
87006 + .todo_flags_start = 0,
87007 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
87008 + }
87009 +};
87010 +
87011 +static bool kernexec_cmodel_check(void)
87012 +{
87013 + tree section;
87014 +
87015 + if (ix86_cmodel != CM_KERNEL)
87016 + return false;
87017 +
87018 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
87019 + if (!section || !TREE_VALUE(section))
87020 + return true;
87021 +
87022 + section = TREE_VALUE(TREE_VALUE(section));
87023 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
87024 + return true;
87025 +
87026 + return false;
87027 +}
87028 +
87029 +/*
87030 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
87031 + */
87032 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
87033 +{
87034 + gimple asm_movabs_stmt;
87035 +
87036 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
87037 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
87038 + gimple_asm_set_volatile(asm_movabs_stmt, true);
87039 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
87040 + update_stmt(asm_movabs_stmt);
87041 +}
87042 +
87043 +/*
87044 + * find all asm() stmts that clobber r10 and add a reload of r10
87045 + */
87046 +static unsigned int execute_kernexec_reload(void)
87047 +{
87048 + basic_block bb;
87049 +
87050 + // 1. loop through BBs and GIMPLE statements
87051 + FOR_EACH_BB(bb) {
87052 + gimple_stmt_iterator gsi;
87053 +
87054 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87055 + // gimple match: __asm__ ("" : : : "r10");
87056 + gimple asm_stmt;
87057 + size_t nclobbers;
87058 +
87059 + // is it an asm ...
87060 + asm_stmt = gsi_stmt(gsi);
87061 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
87062 + continue;
87063 +
87064 + // ... clobbering r10
87065 + nclobbers = gimple_asm_nclobbers(asm_stmt);
87066 + while (nclobbers--) {
87067 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
87068 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
87069 + continue;
87070 + kernexec_reload_fptr_mask(&gsi);
87071 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
87072 + break;
87073 + }
87074 + }
87075 + }
87076 +
87077 + return 0;
87078 +}
87079 +
87080 +/*
87081 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
87082 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
87083 + */
87084 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
87085 +{
87086 + gimple assign_intptr, assign_new_fptr, call_stmt;
87087 + tree intptr, old_fptr, new_fptr, kernexec_mask;
87088 +
87089 + call_stmt = gsi_stmt(*gsi);
87090 + old_fptr = gimple_call_fn(call_stmt);
87091 +
87092 + // create temporary unsigned long variable used for bitops and cast fptr to it
87093 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
87094 +#if BUILDING_GCC_VERSION <= 4007
87095 + add_referenced_var(intptr);
87096 + mark_sym_for_renaming(intptr);
87097 +#endif
87098 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
87099 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
87100 + update_stmt(assign_intptr);
87101 +
87102 + // apply logical or to temporary unsigned long and bitmask
87103 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
87104 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
87105 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
87106 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
87107 + update_stmt(assign_intptr);
87108 +
87109 + // cast temporary unsigned long back to a temporary fptr variable
87110 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
87111 +#if BUILDING_GCC_VERSION <= 4007
87112 + add_referenced_var(new_fptr);
87113 + mark_sym_for_renaming(new_fptr);
87114 +#endif
87115 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
87116 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
87117 + update_stmt(assign_new_fptr);
87118 +
87119 + // replace call stmt fn with the new fptr
87120 + gimple_call_set_fn(call_stmt, new_fptr);
87121 + update_stmt(call_stmt);
87122 +}
87123 +
87124 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
87125 +{
87126 + gimple asm_or_stmt, call_stmt;
87127 + tree old_fptr, new_fptr, input, output;
87128 +#if BUILDING_GCC_VERSION <= 4007
87129 + VEC(tree, gc) *inputs = NULL;
87130 + VEC(tree, gc) *outputs = NULL;
87131 +#else
87132 + vec<tree, va_gc> *inputs = NULL;
87133 + vec<tree, va_gc> *outputs = NULL;
87134 +#endif
87135 +
87136 + call_stmt = gsi_stmt(*gsi);
87137 + old_fptr = gimple_call_fn(call_stmt);
87138 +
87139 + // create temporary fptr variable
87140 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
87141 +#if BUILDING_GCC_VERSION <= 4007
87142 + add_referenced_var(new_fptr);
87143 + mark_sym_for_renaming(new_fptr);
87144 +#endif
87145 +
87146 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
87147 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
87148 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
87149 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
87150 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
87151 +#if BUILDING_GCC_VERSION <= 4007
87152 + VEC_safe_push(tree, gc, inputs, input);
87153 + VEC_safe_push(tree, gc, outputs, output);
87154 +#else
87155 + vec_safe_push(inputs, input);
87156 + vec_safe_push(outputs, output);
87157 +#endif
87158 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
87159 + gimple_asm_set_volatile(asm_or_stmt, true);
87160 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
87161 + update_stmt(asm_or_stmt);
87162 +
87163 + // replace call stmt fn with the new fptr
87164 + gimple_call_set_fn(call_stmt, new_fptr);
87165 + update_stmt(call_stmt);
87166 +}
87167 +
87168 +/*
87169 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
87170 + */
87171 +static unsigned int execute_kernexec_fptr(void)
87172 +{
87173 + basic_block bb;
87174 +
87175 + // 1. loop through BBs and GIMPLE statements
87176 + FOR_EACH_BB(bb) {
87177 + gimple_stmt_iterator gsi;
87178 +
87179 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87180 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
87181 + tree fn;
87182 + gimple call_stmt;
87183 +
87184 + // is it a call ...
87185 + call_stmt = gsi_stmt(gsi);
87186 + if (!is_gimple_call(call_stmt))
87187 + continue;
87188 + fn = gimple_call_fn(call_stmt);
87189 + if (TREE_CODE(fn) == ADDR_EXPR)
87190 + continue;
87191 + if (TREE_CODE(fn) != SSA_NAME)
87192 + gcc_unreachable();
87193 +
87194 + // ... through a function pointer
87195 + if (SSA_NAME_VAR(fn) != NULL_TREE) {
87196 + fn = SSA_NAME_VAR(fn);
87197 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
87198 + debug_tree(fn);
87199 + gcc_unreachable();
87200 + }
87201 + }
87202 + fn = TREE_TYPE(fn);
87203 + if (TREE_CODE(fn) != POINTER_TYPE)
87204 + continue;
87205 + fn = TREE_TYPE(fn);
87206 + if (TREE_CODE(fn) != FUNCTION_TYPE)
87207 + continue;
87208 +
87209 + kernexec_instrument_fptr(&gsi);
87210 +
87211 +//debug_tree(gimple_call_fn(call_stmt));
87212 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87213 + }
87214 + }
87215 +
87216 + return 0;
87217 +}
87218 +
87219 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
87220 +static void kernexec_instrument_retaddr_bts(rtx insn)
87221 +{
87222 + rtx btsq;
87223 + rtvec argvec, constraintvec, labelvec;
87224 + int line;
87225 +
87226 + // create asm volatile("btsq $63,(%%rsp)":::)
87227 + argvec = rtvec_alloc(0);
87228 + constraintvec = rtvec_alloc(0);
87229 + labelvec = rtvec_alloc(0);
87230 + line = expand_location(RTL_LOCATION(insn)).line;
87231 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87232 + MEM_VOLATILE_P(btsq) = 1;
87233 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
87234 + emit_insn_before(btsq, insn);
87235 +}
87236 +
87237 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
87238 +static void kernexec_instrument_retaddr_or(rtx insn)
87239 +{
87240 + rtx orq;
87241 + rtvec argvec, constraintvec, labelvec;
87242 + int line;
87243 +
87244 + // create asm volatile("orq %%r10,(%%rsp)":::)
87245 + argvec = rtvec_alloc(0);
87246 + constraintvec = rtvec_alloc(0);
87247 + labelvec = rtvec_alloc(0);
87248 + line = expand_location(RTL_LOCATION(insn)).line;
87249 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87250 + MEM_VOLATILE_P(orq) = 1;
87251 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
87252 + emit_insn_before(orq, insn);
87253 +}
87254 +
87255 +/*
87256 + * find all asm level function returns and forcibly set the highest bit of the return address
87257 + */
87258 +static unsigned int execute_kernexec_retaddr(void)
87259 +{
87260 + rtx insn;
87261 +
87262 + // 1. find function returns
87263 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
87264 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
87265 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
87266 + // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
87267 + rtx body;
87268 +
87269 + // is it a retn
87270 + if (!JUMP_P(insn))
87271 + continue;
87272 + body = PATTERN(insn);
87273 + if (GET_CODE(body) == PARALLEL)
87274 + body = XVECEXP(body, 0, 0);
87275 + if (!ANY_RETURN_P(body))
87276 + continue;
87277 + kernexec_instrument_retaddr(insn);
87278 + }
87279 +
87280 +// print_simple_rtl(stderr, get_insns());
87281 +// print_rtl(stderr, get_insns());
87282 +
87283 + return 0;
87284 +}
87285 +
87286 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87287 +{
87288 + const char * const plugin_name = plugin_info->base_name;
87289 + const int argc = plugin_info->argc;
87290 + const struct plugin_argument * const argv = plugin_info->argv;
87291 + int i;
87292 + struct register_pass_info kernexec_reload_pass_info = {
87293 + .pass = &kernexec_reload_pass.pass,
87294 + .reference_pass_name = "ssa",
87295 + .ref_pass_instance_number = 1,
87296 + .pos_op = PASS_POS_INSERT_AFTER
87297 + };
87298 + struct register_pass_info kernexec_fptr_pass_info = {
87299 + .pass = &kernexec_fptr_pass.pass,
87300 + .reference_pass_name = "ssa",
87301 + .ref_pass_instance_number = 1,
87302 + .pos_op = PASS_POS_INSERT_AFTER
87303 + };
87304 + struct register_pass_info kernexec_retaddr_pass_info = {
87305 + .pass = &kernexec_retaddr_pass.pass,
87306 + .reference_pass_name = "pro_and_epilogue",
87307 + .ref_pass_instance_number = 1,
87308 + .pos_op = PASS_POS_INSERT_AFTER
87309 + };
87310 +
87311 + if (!plugin_default_version_check(version, &gcc_version)) {
87312 + error(G_("incompatible gcc/plugin versions"));
87313 + return 1;
87314 + }
87315 +
87316 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
87317 +
87318 + if (TARGET_64BIT == 0)
87319 + return 0;
87320 +
87321 + for (i = 0; i < argc; ++i) {
87322 + if (!strcmp(argv[i].key, "method")) {
87323 + if (!argv[i].value) {
87324 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87325 + continue;
87326 + }
87327 + if (!strcmp(argv[i].value, "bts")) {
87328 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
87329 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
87330 + } else if (!strcmp(argv[i].value, "or")) {
87331 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
87332 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
87333 + fix_register("r10", 1, 1);
87334 + } else
87335 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87336 + continue;
87337 + }
87338 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87339 + }
87340 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
87341 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
87342 +
87343 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
87344 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
87345 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
87346 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
87347 +
87348 + return 0;
87349 +}
87350 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
87351 new file mode 100644
87352 index 0000000..1276616
87353 --- /dev/null
87354 +++ b/tools/gcc/latent_entropy_plugin.c
87355 @@ -0,0 +1,321 @@
87356 +/*
87357 + * Copyright 2012-2013 by the PaX Team <pageexec@freemail.hu>
87358 + * Licensed under the GPL v2
87359 + *
87360 + * Note: the choice of the license means that the compilation process is
87361 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87362 + * but for the kernel it doesn't matter since it doesn't link against
87363 + * any of the gcc libraries
87364 + *
87365 + * gcc plugin to help generate a little bit of entropy from program state,
87366 + * used during boot in the kernel
87367 + *
87368 + * TODO:
87369 + * - add ipa pass to identify not explicitly marked candidate functions
87370 + * - mix in more program state (function arguments/return values, loop variables, etc)
87371 + * - more instrumentation control via attribute parameters
87372 + *
87373 + * BUGS:
87374 + * - LTO needs -flto-partition=none for now
87375 + */
87376 +#include "gcc-plugin.h"
87377 +#include "config.h"
87378 +#include "system.h"
87379 +#include "coretypes.h"
87380 +#include "tree.h"
87381 +#include "tree-pass.h"
87382 +#include "flags.h"
87383 +#include "intl.h"
87384 +#include "toplev.h"
87385 +#include "plugin.h"
87386 +//#include "expr.h" where are you...
87387 +#include "diagnostic.h"
87388 +#include "plugin-version.h"
87389 +#include "tm.h"
87390 +#include "function.h"
87391 +#include "basic-block.h"
87392 +#include "gimple.h"
87393 +#include "rtl.h"
87394 +#include "emit-rtl.h"
87395 +#include "tree-flow.h"
87396 +
87397 +#if BUILDING_GCC_VERSION >= 4008
87398 +#define TODO_dump_func 0
87399 +#endif
87400 +
87401 +int plugin_is_GPL_compatible;
87402 +
87403 +static tree latent_entropy_decl;
87404 +
87405 +static struct plugin_info latent_entropy_plugin_info = {
87406 + .version = "201302112000",
87407 + .help = NULL
87408 +};
87409 +
87410 +static unsigned int execute_latent_entropy(void);
87411 +static bool gate_latent_entropy(void);
87412 +
87413 +static struct gimple_opt_pass latent_entropy_pass = {
87414 + .pass = {
87415 + .type = GIMPLE_PASS,
87416 + .name = "latent_entropy",
87417 +#if BUILDING_GCC_VERSION >= 4008
87418 + .optinfo_flags = OPTGROUP_NONE,
87419 +#endif
87420 + .gate = gate_latent_entropy,
87421 + .execute = execute_latent_entropy,
87422 + .sub = NULL,
87423 + .next = NULL,
87424 + .static_pass_number = 0,
87425 + .tv_id = TV_NONE,
87426 + .properties_required = PROP_gimple_leh | PROP_cfg,
87427 + .properties_provided = 0,
87428 + .properties_destroyed = 0,
87429 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
87430 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
87431 + }
87432 +};
87433 +
87434 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87435 +{
87436 + if (TREE_CODE(*node) != FUNCTION_DECL) {
87437 + *no_add_attrs = true;
87438 + error("%qE attribute only applies to functions", name);
87439 + }
87440 + return NULL_TREE;
87441 +}
87442 +
87443 +static struct attribute_spec latent_entropy_attr = {
87444 + .name = "latent_entropy",
87445 + .min_length = 0,
87446 + .max_length = 0,
87447 + .decl_required = true,
87448 + .type_required = false,
87449 + .function_type_required = false,
87450 + .handler = handle_latent_entropy_attribute,
87451 +#if BUILDING_GCC_VERSION >= 4007
87452 + .affects_type_identity = false
87453 +#endif
87454 +};
87455 +
87456 +static void register_attributes(void *event_data, void *data)
87457 +{
87458 + register_attribute(&latent_entropy_attr);
87459 +}
87460 +
87461 +static bool gate_latent_entropy(void)
87462 +{
87463 + tree latent_entropy_attr;
87464 +
87465 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
87466 + return latent_entropy_attr != NULL_TREE;
87467 +}
87468 +
87469 +static unsigned HOST_WIDE_INT seed;
87470 +static unsigned HOST_WIDE_INT get_random_const(void)
87471 +{
87472 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
87473 + return seed;
87474 +}
87475 +
87476 +static enum tree_code get_op(tree *rhs)
87477 +{
87478 + static enum tree_code op;
87479 + unsigned HOST_WIDE_INT random_const;
87480 +
87481 + random_const = get_random_const();
87482 +
87483 + switch (op) {
87484 + case BIT_XOR_EXPR:
87485 + op = PLUS_EXPR;
87486 + break;
87487 +
87488 + case PLUS_EXPR:
87489 + if (rhs) {
87490 + op = LROTATE_EXPR;
87491 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
87492 + break;
87493 + }
87494 +
87495 + case LROTATE_EXPR:
87496 + default:
87497 + op = BIT_XOR_EXPR;
87498 + break;
87499 + }
87500 + if (rhs)
87501 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
87502 + return op;
87503 +}
87504 +
87505 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
87506 +{
87507 + gimple_stmt_iterator gsi;
87508 + gimple assign;
87509 + tree addxorrol, rhs;
87510 + enum tree_code op;
87511 +
87512 + op = get_op(&rhs);
87513 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
87514 + assign = gimple_build_assign(local_entropy, addxorrol);
87515 +#if BUILDING_GCC_VERSION <= 4007
87516 + find_referenced_vars_in(assign);
87517 +#endif
87518 +//debug_bb(bb);
87519 + gsi = gsi_after_labels(bb);
87520 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
87521 + update_stmt(assign);
87522 +}
87523 +
87524 +static void perturb_latent_entropy(basic_block bb, tree rhs)
87525 +{
87526 + gimple_stmt_iterator gsi;
87527 + gimple assign;
87528 + tree addxorrol, temp;
87529 +
87530 + // 1. create temporary copy of latent_entropy
87531 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
87532 +#if BUILDING_GCC_VERSION <= 4007
87533 + add_referenced_var(temp);
87534 + mark_sym_for_renaming(temp);
87535 +#endif
87536 +
87537 + // 2. read...
87538 + assign = gimple_build_assign(temp, latent_entropy_decl);
87539 +#if BUILDING_GCC_VERSION <= 4007
87540 + find_referenced_vars_in(assign);
87541 +#endif
87542 + gsi = gsi_after_labels(bb);
87543 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
87544 + update_stmt(assign);
87545 +
87546 + // 3. ...modify...
87547 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
87548 + assign = gimple_build_assign(temp, addxorrol);
87549 +#if BUILDING_GCC_VERSION <= 4007
87550 + find_referenced_vars_in(assign);
87551 +#endif
87552 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
87553 + update_stmt(assign);
87554 +
87555 + // 4. ...write latent_entropy
87556 + assign = gimple_build_assign(latent_entropy_decl, temp);
87557 +#if BUILDING_GCC_VERSION <= 4007
87558 + find_referenced_vars_in(assign);
87559 +#endif
87560 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
87561 + update_stmt(assign);
87562 +}
87563 +
87564 +static unsigned int execute_latent_entropy(void)
87565 +{
87566 + basic_block bb;
87567 + gimple assign;
87568 + gimple_stmt_iterator gsi;
87569 + tree local_entropy;
87570 +
87571 + if (!latent_entropy_decl) {
87572 + struct varpool_node *node;
87573 +
87574 +#if BUILDING_GCC_VERSION <= 4007
87575 + for (node = varpool_nodes; node; node = node->next) {
87576 + tree var = node->decl;
87577 +#else
87578 + FOR_EACH_VARIABLE(node) {
87579 + tree var = node->symbol.decl;
87580 +#endif
87581 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
87582 + continue;
87583 + latent_entropy_decl = var;
87584 +// debug_tree(var);
87585 + break;
87586 + }
87587 + if (!latent_entropy_decl) {
87588 +// debug_tree(current_function_decl);
87589 + return 0;
87590 + }
87591 + }
87592 +
87593 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
87594 +
87595 + // 1. create local entropy variable
87596 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
87597 +#if BUILDING_GCC_VERSION <= 4007
87598 + add_referenced_var(local_entropy);
87599 + mark_sym_for_renaming(local_entropy);
87600 +#endif
87601 +
87602 + // 2. initialize local entropy variable
87603 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
87604 + if (dom_info_available_p(CDI_DOMINATORS))
87605 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
87606 + gsi = gsi_start_bb(bb);
87607 +
87608 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
87609 +// gimple_set_location(assign, loc);
87610 +#if BUILDING_GCC_VERSION <= 4007
87611 + find_referenced_vars_in(assign);
87612 +#endif
87613 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
87614 + update_stmt(assign);
87615 + bb = bb->next_bb;
87616 +
87617 + // 3. instrument each BB with an operation on the local entropy variable
87618 + while (bb != EXIT_BLOCK_PTR) {
87619 + perturb_local_entropy(bb, local_entropy);
87620 + bb = bb->next_bb;
87621 + };
87622 +
87623 + // 4. mix local entropy into the global entropy variable
87624 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
87625 + return 0;
87626 +}
87627 +
87628 +static void start_unit_callback(void *gcc_data, void *user_data)
87629 +{
87630 +#if BUILDING_GCC_VERSION >= 4007
87631 + seed = get_random_seed(false);
87632 +#else
87633 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
87634 + seed *= seed;
87635 +#endif
87636 +
87637 + if (in_lto_p)
87638 + return;
87639 +
87640 + // extern u64 latent_entropy
87641 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
87642 +
87643 + TREE_STATIC(latent_entropy_decl) = 1;
87644 + TREE_PUBLIC(latent_entropy_decl) = 1;
87645 + TREE_USED(latent_entropy_decl) = 1;
87646 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
87647 + DECL_EXTERNAL(latent_entropy_decl) = 1;
87648 + DECL_ARTIFICIAL(latent_entropy_decl) = 0;
87649 + DECL_INITIAL(latent_entropy_decl) = NULL;
87650 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
87651 +// varpool_finalize_decl(latent_entropy_decl);
87652 +// varpool_mark_needed_node(latent_entropy_decl);
87653 +}
87654 +
87655 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87656 +{
87657 + const char * const plugin_name = plugin_info->base_name;
87658 + struct register_pass_info latent_entropy_pass_info = {
87659 + .pass = &latent_entropy_pass.pass,
87660 + .reference_pass_name = "optimized",
87661 + .ref_pass_instance_number = 1,
87662 + .pos_op = PASS_POS_INSERT_BEFORE
87663 + };
87664 +
87665 + if (!plugin_default_version_check(version, &gcc_version)) {
87666 + error(G_("incompatible gcc/plugin versions"));
87667 + return 1;
87668 + }
87669 +
87670 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
87671 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
87672 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
87673 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
87674 +
87675 + return 0;
87676 +}
87677 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
87678 new file mode 100644
87679 index 0000000..5921fd7
87680 --- /dev/null
87681 +++ b/tools/gcc/size_overflow_hash.data
87682 @@ -0,0 +1,3713 @@
87683 +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
87684 +ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
87685 +batadv_orig_node_del_if_4 batadv_orig_node_del_if 2 4 NULL
87686 +ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
87687 +xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
87688 +recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
87689 +sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
87690 +rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
87691 +diva_os_malloc_16406 diva_os_malloc 2 16406 NULL
87692 +compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
87693 +xfs_dir2_leaf_getdents_23841 xfs_dir2_leaf_getdents 3 23841 NULL
87694 +ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
87695 +carl9170_alloc_27 carl9170_alloc 1 27 NULL
87696 +dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
87697 +create_log_8225 create_log 2 8225 NULL
87698 +ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
87699 +rproc_name_read_32805 rproc_name_read 3 32805 NULL
87700 +rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
87701 +mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
87702 +il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL
87703 +sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
87704 +padzero_55 padzero 1 55 &sel_read_policyvers_55
87705 +cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
87706 +alloc_wr_24635 alloc_wr 1-2 24635 NULL
87707 +read_file_blob_57406 read_file_blob 3 57406 NULL
87708 +add_rx_skb_8257 add_rx_skb 3 8257 NULL
87709 +enclosure_register_57412 enclosure_register 3 57412 NULL
87710 +t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
87711 +_req_append_segment_41031 _req_append_segment 2 41031 NULL
87712 +gre_manip_pkt_57416 gre_manip_pkt 4 57416 NULL
87713 +netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
87714 +mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
87715 +DepcaSignature_80 DepcaSignature 2 80 NULL nohasharray
87716 +crypto_authenc_setkey_80 crypto_authenc_setkey 3 80 &DepcaSignature_80
87717 +init_cdev_8274 init_cdev 1 8274 NULL
87718 +shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
87719 +compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL nohasharray
87720 +alloc_ftrace_hash_57431 alloc_ftrace_hash 1 57431 &compat_keyctl_instantiate_key_iov_57431
87721 +copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
87722 +rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
87723 +snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
87724 +load_msg_95 load_msg 2 95 NULL
87725 +rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
87726 +new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
87727 +acpi_tb_check_xsdt_21862 acpi_tb_check_xsdt 1 21862 NULL
87728 +sys_pselect6_57449 sys_pselect6 1 57449 NULL
87729 +biovec_create_pools_9575 biovec_create_pools 2 9575 NULL
87730 +ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
87731 +tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
87732 +ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 NULL nohasharray
87733 +cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 &ath6kl_usb_submit_ctrl_in_32880
87734 +cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
87735 +send_midi_async_57463 send_midi_async 3 57463 NULL
87736 +sisusb_clear_vram_57466 sisusb_clear_vram 3-2 57466 NULL
87737 +ath6kl_usb_post_recv_transfers_32892 ath6kl_usb_post_recv_transfers 2 32892 NULL
87738 +ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL nohasharray
87739 +sep_lock_user_pages_57470 sep_lock_user_pages 2-3 57470 &ieee80211_if_read_flags_57470
87740 +rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
87741 +construct_key_and_link_8321 construct_key_and_link 4 8321 NULL
87742 +ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
87743 +init_q_132 init_q 4 132 NULL
87744 +roccat_read_41093 roccat_read 3 41093 NULL nohasharray
87745 +nvme_map_user_pages_41093 nvme_map_user_pages 3-4 41093 &roccat_read_41093
87746 +ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
87747 +unifi_net_data_malloc_24716 unifi_net_data_malloc 3 24716 NULL
87748 +memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
87749 +il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
87750 +uio_read_49300 uio_read 3 49300 NULL
87751 +f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
87752 +tracing_trace_options_write_153 tracing_trace_options_write 3 153 NULL
87753 +bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
87754 +firmwareUpload_32794 firmwareUpload 3 32794 NULL
87755 +simple_attr_read_24738 simple_attr_read 3 24738 NULL
87756 +play_iframe_8219 play_iframe 3 8219 NULL
87757 +qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
87758 +ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
87759 +ath_rxbuf_alloc_24745 ath_rxbuf_alloc 2 24745 NULL
87760 +nvme_create_queue_170 nvme_create_queue 3 170 NULL
87761 +init_tag_map_57515 init_tag_map 3 57515 NULL
87762 +il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 NULL
87763 +srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-3-4 49330 NULL
87764 +kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
87765 +lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
87766 +xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
87767 +DoC_Probe_57534 DoC_Probe 1 57534 NULL
87768 +cfpkt_setlen_49343 cfpkt_setlen 2 49343 NULL
87769 +agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
87770 +mI_alloc_skb_24770 mI_alloc_skb 1 24770 NULL
87771 +iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
87772 +virtblk_add_req_197 virtblk_add_req 2-3 197 NULL
87773 +il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
87774 +rds_tcp_data_recv_53476 rds_tcp_data_recv 3 53476 NULL
87775 +xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
87776 +skb_make_writable_24783 skb_make_writable 2 24783 NULL
87777 +datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
87778 +dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
87779 +cache_read_24790 cache_read 3 24790 NULL
87780 +px_raw_event_49371 px_raw_event 4 49371 NULL
87781 +tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
87782 +compat_filldir_32999 compat_filldir 3 32999 NULL
87783 +hci_si_event_1404 hci_si_event 3 1404 NULL
87784 +compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
87785 +dfs_file_write_41196 dfs_file_write 3 41196 NULL
87786 +afs_cell_create_27346 afs_cell_create 2 27346 NULL
87787 +iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
87788 +applesmc_create_nodes_49392 applesmc_create_nodes 2 49392 NULL
87789 +snd_usb_ctl_msg_8436 snd_usb_ctl_msg 8 8436 NULL
87790 +comedi_buf_alloc_24822 comedi_buf_alloc 3 24822 NULL
87791 +rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
87792 +tnode_alloc_49407 tnode_alloc 1 49407 NULL
87793 +tun_alloc_skb_41216 tun_alloc_skb 2-4-3 41216 NULL
87794 +proc_scsi_write_proc_267 proc_scsi_write_proc 3 267 NULL
87795 +__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
87796 +sk_stream_alloc_skb_57622 sk_stream_alloc_skb 2 57622 NULL
87797 +tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL
87798 +iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
87799 +mfd_add_devices_16668 mfd_add_devices 4 16668 NULL
87800 +packet_recv_error_16669 packet_recv_error 3 16669 NULL
87801 +osdmap_set_max_osd_57630 osdmap_set_max_osd 2 57630 NULL nohasharray
87802 +sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 &osdmap_set_max_osd_57630
87803 +mem_read_57631 mem_read 3 57631 NULL
87804 +afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
87805 +ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL
87806 +read_file_war_stats_292 read_file_war_stats 3 292 NULL
87807 +pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
87808 +l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
87809 +hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
87810 +stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
87811 +sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
87812 +queues_read_24877 queues_read 3 24877 NULL
87813 +__fprog_create_41263 __fprog_create 2 41263 NULL
87814 +syslog_print_307 syslog_print 2 307 NULL
87815 +platform_device_add_data_310 platform_device_add_data 3 310 NULL
87816 +agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
87817 +dn_setsockopt_314 dn_setsockopt 5 314 NULL
87818 +sys_mq_timedsend_57661 sys_mq_timedsend 3 57661 NULL
87819 +r3964_write_57662 r3964_write 4 57662 NULL
87820 +xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
87821 +savu_sysfs_read_49473 savu_sysfs_read 6 49473 NULL
87822 +dn_nsp_do_disc_49474 dn_nsp_do_disc 6-2 49474 NULL
87823 +alloc_context_41283 alloc_context 1 41283 NULL
87824 +__lgwrite_57669 __lgwrite 4 57669 NULL
87825 +ath9k_wmi_cmd_327 ath9k_wmi_cmd 4 327 NULL
87826 +codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
87827 +isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
87828 +alloc_pg_vec_8533 alloc_pg_vec 2 8533 NULL
87829 +pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
87830 +ieee80211_fragment_33112 ieee80211_fragment 4 33112 NULL
87831 +arch_gnttab_map_shared_41306 arch_gnttab_map_shared 3 41306 NULL
87832 +v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL
87833 +write_node_33121 write_node 4 33121 NULL
87834 +vring_new_virtqueue_54673 vring_new_virtqueue 2 54673 NULL
87835 +i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
87836 +profile_remove_8556 profile_remove 3 8556 NULL
87837 +rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
87838 +iscsi_recv_pdu_16755 iscsi_recv_pdu 4 16755 NULL
87839 +arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
87840 +cmtp_send_interopmsg_376 cmtp_send_interopmsg 7 376 NULL
87841 +ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL
87842 +mga_ioremap_8571 mga_ioremap 1-2 8571 NULL
87843 +isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
87844 +sys_rt_sigpending_24961 sys_rt_sigpending 2 24961 NULL
87845 +tower_write_8580 tower_write 3 8580 NULL
87846 +cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
87847 +compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 NULL nohasharray
87848 +pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 &compat_sys_set_mempolicy_57742
87849 +jbd2_alloc_41359 jbd2_alloc 1 41359 NULL
87850 +debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
87851 +nf_nat_sdp_port_24977 nf_nat_sdp_port 7 24977 NULL
87852 +smk_write_access_49561 smk_write_access 3 49561 NULL
87853 +llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
87854 +kmp_init_41373 kmp_init 2 41373 NULL
87855 +context_alloc_24645 context_alloc 3 24645 NULL
87856 +lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
87857 +get_server_iovec_16804 get_server_iovec 2 16804 NULL
87858 +alloc_chunk_49575 alloc_chunk 1 49575 NULL
87859 +tipc_send2name_16809 tipc_send2name 6 16809 NULL
87860 +sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
87861 +key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
87862 +shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
87863 +il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
87864 +dm_vcalloc_16814 dm_vcalloc 1-2 16814 NULL
87865 +it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
87866 +isr_commands_read_41398 isr_commands_read 3 41398 NULL
87867 +pp_read_33210 pp_read 3 33210 NULL
87868 +sys_flistxattr_41407 sys_flistxattr 3 41407 NULL
87869 +scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
87870 +ivtv_read_57796 ivtv_read 3 57796 NULL
87871 +isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
87872 +nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
87873 +xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
87874 +heap_init_49617 heap_init 2 49617 NULL
87875 +xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
87876 +ieee80211_send_probe_req_38307 ieee80211_send_probe_req 6-4 38307 NULL
87877 +isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
87878 +smk_write_doi_49621 smk_write_doi 3 49621 NULL
87879 +_alloc_get_attr_desc_470 _alloc_get_attr_desc 2 470 NULL
87880 +lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
87881 +btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
87882 +iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
87883 +ntfs_file_buffered_write_41442 ntfs_file_buffered_write 4-6 41442 NULL
87884 +pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
87885 +dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
87886 +bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
87887 +st_write_16874 st_write 3 16874 NULL
87888 +copy_to_user_57835 copy_to_user 3 57835 NULL
87889 +rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
87890 +pidlist_resize_496 pidlist_resize 2 496 NULL
87891 +flash_read_57843 flash_read 3 57843 NULL
87892 +read_vbt_r0_503 read_vbt_r0 1 503 NULL
87893 +rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
87894 +cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
87895 +rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
87896 +arcfb_write_8702 arcfb_write 3 8702 NULL
87897 +gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
87898 +smp_send_cmd_512 smp_send_cmd 3 512 NULL
87899 +rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
87900 +rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
87901 +vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
87902 +HDLC_irq_8709 HDLC_irq 2 8709 NULL
87903 +ctrl_out_8712 ctrl_out 3-5 8712 NULL
87904 +cxio_hal_init_rhdl_resource_25104 cxio_hal_init_rhdl_resource 1 25104 NULL
87905 +sock_wmalloc_16472 sock_wmalloc 2 16472 NULL
87906 +snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
87907 +aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
87908 +wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
87909 +hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
87910 +mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
87911 +psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 NULL nohasharray
87912 +snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 &psb_unlocked_ioctl_16926
87913 +iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
87914 +sys_gethostname_49698 sys_gethostname 2 49698 NULL
87915 +cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
87916 +ieee80211_rx_mgmt_probe_resp_6918 ieee80211_rx_mgmt_probe_resp 3 6918 NULL
87917 +devres_alloc_551 devres_alloc 2 551 NULL
87918 +ldisc_receive_41516 ldisc_receive 4 41516 NULL
87919 +tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
87920 +ip_append_data_16942 ip_append_data 5-6 16942 NULL
87921 +xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
87922 +_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
87923 +squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
87924 +emi26_writememory_57908 emi26_writememory 4 57908 NULL
87925 +start_isoc_chain_565 start_isoc_chain 2 565 NULL
87926 +iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL
87927 +gsm_mux_rx_netchar_33336 gsm_mux_rx_netchar 3 33336 NULL
87928 +brcmu_pkt_buf_get_skb_5556 brcmu_pkt_buf_get_skb 1 5556 NULL
87929 +joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
87930 +sys_prctl_8766 sys_prctl 4 8766 NULL
87931 +joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
87932 +sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL
87933 +compat_sys_preadv_583 compat_sys_preadv 3 583 NULL
87934 +sys_fsetxattr_49736 sys_fsetxattr 4 49736 NULL
87935 +keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
87936 +create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
87937 +irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
87938 +sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
87939 +sys_fgetxattr_25166 sys_fgetxattr 4 25166 NULL
87940 +ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL
87941 +zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
87942 +tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
87943 +ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
87944 +ath6kl_buf_alloc_57304 ath6kl_buf_alloc 1 57304 NULL
87945 +cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
87946 +gserial_setup_41558 gserial_setup 2 41558 NULL
87947 +rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
87948 +rx_57944 rx 4 57944 NULL
87949 +sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
87950 +nci_skb_alloc_49757 nci_skb_alloc 2 49757 NULL
87951 +key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
87952 +cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
87953 +sctp_ulpevent_new_33377 sctp_ulpevent_new 1 33377 NULL
87954 +fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
87955 +mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
87956 +isku_sysfs_write_49767 isku_sysfs_write 6 49767 NULL
87957 +i915_cache_sharing_write_57961 i915_cache_sharing_write 3 57961 NULL
87958 +batadv_receive_client_update_packet_41578 batadv_receive_client_update_packet 3 41578 NULL
87959 +ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
87960 +handle_response_icmp_39574 handle_response_icmp 7 39574 NULL
87961 +wbcir_tx_19219 wbcir_tx 3 19219 NULL
87962 +hfc_empty_fifo_57972 hfc_empty_fifo 2 57972 NULL
87963 +ceph_dns_resolve_name_62488 ceph_dns_resolve_name 2 62488 NULL
87964 +metronomefb_write_8823 metronomefb_write 3 8823 NULL
87965 +icmpv6_manip_pkt_8833 icmpv6_manip_pkt 4 8833 NULL
87966 +copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
87967 +read_file_rx_chainmask_41605 read_file_rx_chainmask 3 41605 NULL
87968 +vmw_du_crtc_cursor_set_28479 vmw_du_crtc_cursor_set 4-5 28479 NULL
87969 +_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
87970 +nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
87971 +a2mp_send_41615 a2mp_send 4 41615 NULL
87972 +ceph_copy_user_to_page_vector_656 ceph_copy_user_to_page_vector 4-3 656 NULL
87973 +rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
87974 +arch_gnttab_map_status_49812 arch_gnttab_map_status 3 49812 NULL
87975 +mon_stat_read_25238 mon_stat_read 3 25238 NULL
87976 +jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
87977 +tcf_csum_ipv6_udp_25241 tcf_csum_ipv6_udp 4 25241 NULL
87978 +wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
87979 +macvtap_alloc_skb_50629 macvtap_alloc_skb 2-4-3 50629 NULL
87980 +mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
87981 +ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6 49829 NULL
87982 +add_uuid_49831 add_uuid 4 49831 NULL
87983 +send_pages_8872 send_pages 3 8872 NULL
87984 +ath6kl_fwlog_block_read_49836 ath6kl_fwlog_block_read 3 49836 NULL
87985 +__btrfs_map_block_49839 __btrfs_map_block 3 49839 NULL
87986 +dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
87987 +mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
87988 +simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
87989 +rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
87990 +__kmalloc_reserve_17080 __kmalloc_reserve 1 17080 NULL
87991 +timeradd_entry_49850 timeradd_entry 3 49850 NULL
87992 +crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
87993 +vfs_writev_25278 vfs_writev 3 25278 NULL
87994 +rtl8169_try_rx_copy_705 rtl8169_try_rx_copy 3 705 NULL
87995 +alloc_async_14208 alloc_async 1 14208 NULL
87996 +ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
87997 +persistent_ram_vmap_709 persistent_ram_vmap 2-1 709 NULL
87998 +l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
87999 +create_entry_33479 create_entry 2 33479 NULL
88000 +mce_async_out_58056 mce_async_out 3 58056 NULL
88001 +alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
88002 +sys_preadv_17100 sys_preadv 3 17100 NULL
88003 +sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
88004 +ip_setsockopt_33487 ip_setsockopt 5 33487 NULL
88005 +netxen_nic_hw_write_wx_128M_33488 netxen_nic_hw_write_wx_128M 2 33488 NULL
88006 +aac_src_ioremap_41688 aac_src_ioremap 2 41688 NULL
88007 +dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
88008 +res_counter_read_33499 res_counter_read 4 33499 NULL
88009 +sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
88010 +cm4040_write_58079 cm4040_write 3 58079 NULL
88011 +fb_read_33506 fb_read 3 33506 NULL
88012 +help_25316 help 5 25316 NULL nohasharray
88013 +ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 &help_25316
88014 +rfcomm_wmalloc_58090 rfcomm_wmalloc 2 58090 NULL
88015 +mwifiex_get_common_rates_17131 mwifiex_get_common_rates 3 17131 NULL
88016 +musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
88017 +ddp_set_map_751 ddp_set_map 4 751 NULL
88018 +driver_stats_read_8944 driver_stats_read 3 8944 NULL
88019 +ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
88020 +dvb_video_write_754 dvb_video_write 3 754 NULL
88021 +nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
88022 +osd_req_add_get_attr_list_49278 osd_req_add_get_attr_list 3 49278 NULL
88023 +aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
88024 +bdx_tx_db_init_41719 bdx_tx_db_init 2 41719 NULL
88025 +nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
88026 +udi_log_event_58105 udi_log_event 3 58105 NULL
88027 +sys_pwritev_41722 sys_pwritev 3 41722 NULL
88028 +l2cap_sock_alloc_skb_cb_33532 l2cap_sock_alloc_skb_cb 2 33532 NULL
88029 +ib_send_cm_mra_60202 ib_send_cm_mra 4 60202 NULL nohasharray
88030 +qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 &ib_send_cm_mra_60202
88031 +read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
88032 +__copy_from_user_inatomic_nocache_49921 __copy_from_user_inatomic_nocache 3 49921 NULL
88033 +tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL
88034 +usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
88035 +tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
88036 +venus_mkdir_8967 venus_mkdir 4 8967 NULL
88037 +vol_cdev_read_8968 vol_cdev_read 3 8968 NULL nohasharray
88038 +seq_open_net_8968 seq_open_net 4 8968 &vol_cdev_read_8968
88039 +sep_read_17161 sep_read 3 17161 NULL
88040 +befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
88041 +tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
88042 +dup_array_33551 dup_array 3 33551 NULL
88043 +vxge_device_register_7752 vxge_device_register 4 7752 NULL
88044 +solo_enc_read_33553 solo_enc_read 3 33553 NULL
88045 +fillonedir_41746 fillonedir 3 41746 NULL
88046 +init_bch_64130 init_bch 1-2 64130 NULL
88047 +ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
88048 +slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
88049 +sel_read_mls_25369 sel_read_mls 3 25369 NULL
88050 +btrfs_alloc_free_block_8986 btrfs_alloc_free_block 3 8986 NULL
88051 +savemem_58129 savemem 3 58129 NULL
88052 +batadv_tt_realloc_packet_buff_49960 batadv_tt_realloc_packet_buff 4 49960 NULL
88053 +rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
88054 +driver_state_read_17194 driver_state_read 3 17194 NULL nohasharray
88055 +iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 &driver_state_read_17194
88056 +dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL
88057 +if_writecmd_815 if_writecmd 2 815 NULL
88058 +aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
88059 +read_fifo_826 read_fifo 3 826 NULL
88060 +keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
88061 +scsi_execute_33596 scsi_execute 5 33596 NULL
88062 +dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
88063 +ms_rw_17220 ms_rw 3-4 17220 NULL
88064 +read_tree_block_841 read_tree_block 3 841 NULL
88065 +hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
88066 +l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
88067 +dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
88068 +__pskb_copy_9038 __pskb_copy 2 9038 NULL
88069 +garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
88070 +asix_write_cmd_58192 asix_write_cmd 5 58192 NULL
88071 +um_idi_read_850 um_idi_read 3 850 NULL
88072 +__module_alloc_50004 __module_alloc 1 50004 NULL
88073 +sco_send_frame_41815 sco_send_frame 3 41815 NULL
88074 +ts_read_44687 ts_read 3 44687 NULL
88075 +nci_send_cmd_58206 nci_send_cmd 3 58206 NULL
88076 +snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
88077 +snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
88078 +provide_user_output_41105 provide_user_output 3 41105 NULL
88079 +error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL
88080 +o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
88081 +iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
88082 +fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
88083 +alloc_ep_17269 alloc_ep 1 17269 NULL
88084 +ath6kl_wmi_beginscan_cmd_25462 ath6kl_wmi_beginscan_cmd 8 25462 NULL
88085 +ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 NULL
88086 +generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
88087 +do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
88088 +raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
88089 +alloc_ebda_hpc_50046 alloc_ebda_hpc 1-2 50046 NULL
88090 +keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
88091 +create_queues_9088 create_queues 2-3 9088 NULL
88092 +irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
88093 +neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
88094 +btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
88095 +minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
88096 +rbd_alloc_coll_33678 rbd_alloc_coll 1 33678 NULL
88097 +read_file_debug_58256 read_file_debug 3 58256 NULL
88098 +skb_pad_17302 skb_pad 2 17302 NULL
88099 +tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
88100 +btmrvl_hsstate_read_920 btmrvl_hsstate_read 3 920 NULL
88101 +profile_load_58267 profile_load 3 58267 NULL
88102 +pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
88103 +ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4 25502 NULL
88104 +acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
88105 +snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
88106 +dev_set_alias_50084 dev_set_alias 3 50084 NULL
88107 +pcpu_get_vm_areas_50085 pcpu_get_vm_areas 3 50085 NULL
88108 +sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
88109 +altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
88110 +sys_keyctl_33708 sys_keyctl 4 33708 NULL nohasharray
88111 +netlink_sendmsg_33708 netlink_sendmsg 4 33708 &sys_keyctl_33708
88112 +ata_host_alloc_pinfo_17325 ata_host_alloc_pinfo 3 17325 NULL
88113 +iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
88114 +carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
88115 +pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
88116 +get_packet_41914 get_packet 3 41914 NULL
88117 +get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
88118 +ceph_get_direct_page_vector_41917 ceph_get_direct_page_vector 2 41917 NULL
88119 +read_file_slot_50111 read_file_slot 3 50111 NULL
88120 +netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
88121 +ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL
88122 +ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
88123 +serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
88124 +ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
88125 +tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
88126 +copy_items_50140 copy_items 6 50140 NULL
88127 +omfs_readpages_42490 omfs_readpages 4 42490 NULL
88128 +pcim_iomap_58334 pcim_iomap 3 58334 NULL
88129 +diva_init_dma_map_58336 diva_init_dma_map 3 58336 NULL
88130 +map_addr_56144 map_addr 7 56144 NULL
88131 +vifs_state_read_33762 vifs_state_read 3 33762 NULL
88132 +btmrvl_psstate_read_50683 btmrvl_psstate_read 3 50683 NULL
88133 +hdlcdev_rx_997 hdlcdev_rx 3 997 NULL
88134 +portnames_read_41958 portnames_read 3 41958 NULL
88135 +ubi_self_check_all_ff_41959 ubi_self_check_all_ff 4 41959 NULL
88136 +hashtab_create_33769 hashtab_create 3 33769 NULL
88137 +alloc_group_attrs_9194 alloc_group_attrs 2 9194 NULL nohasharray
88138 +altera_swap_ir_9194 altera_swap_ir 2 9194 &alloc_group_attrs_9194
88139 +vmalloc_to_sg_58354 vmalloc_to_sg 2 58354 NULL
88140 +aac_nark_ioremap_50163 aac_nark_ioremap 2 50163 NULL nohasharray
88141 +kmalloc_node_50163 kmalloc_node 1 50163 &aac_nark_ioremap_50163
88142 +cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
88143 +odev_update_50169 odev_update 2 50169 NULL
88144 +ubi_resize_volume_50172 ubi_resize_volume 2 50172 NULL
88145 +smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
88146 +__devres_alloc_25598 __devres_alloc 2 25598 NULL
88147 +snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
88148 +netpoll_send_udp_58955 netpoll_send_udp 3 58955 NULL
88149 +tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
88150 +ib_send_cm_drep_50186 ib_send_cm_drep 3 50186 NULL
88151 +do_write_orph_node_64343 do_write_orph_node 2 64343 NULL
88152 +qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
88153 +lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
88154 +il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
88155 +cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL
88156 +lguest_map_42008 lguest_map 1-2 42008 NULL
88157 +proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
88158 +sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
88159 +pool_allocate_42012 pool_allocate 3 42012 NULL
88160 +l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
88161 +sctp_make_init_58401 sctp_make_init 4 58401 NULL
88162 +ieee80211_skb_resize_50211 ieee80211_skb_resize 3 50211 NULL
88163 +gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
88164 +sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
88165 +skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
88166 +udplite_manip_pkt_33832 udplite_manip_pkt 4 33832 NULL
88167 +tcf_csum_ipv4_icmp_9258 tcf_csum_ipv4_icmp 3 9258 NULL
88168 +acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
88169 +mce_request_packet_1073 mce_request_packet 3 1073 NULL
88170 +agp_create_memory_1075 agp_create_memory 1 1075 NULL
88171 +sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
88172 +__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
88173 +iscsi_offload_mesg_58425 iscsi_offload_mesg 5 58425 NULL
88174 +mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
88175 +_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
88176 +oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
88177 +nfs_pgarray_set_1085 nfs_pgarray_set 2 1085 NULL
88178 +irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
88179 +sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
88180 +ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
88181 +llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
88182 +probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
88183 +InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
88184 +__alloc_session_17485 __alloc_session 2-1 17485 NULL
88185 +TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
88186 +pm860x_bulk_write_43875 pm860x_bulk_write 3 43875 NULL
88187 +afs_extract_data_50261 afs_extract_data 5 50261 NULL
88188 +config_proc_write_33878 config_proc_write 3 33878 NULL
88189 +capabilities_read_58457 capabilities_read 3 58457 NULL
88190 +sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
88191 +iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
88192 +lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
88193 +compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
88194 +scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
88195 +hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
88196 +rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
88197 +sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
88198 +rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL
88199 +ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
88200 +snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
88201 +event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
88202 +batadv_bla_is_backbone_gw_58488 batadv_bla_is_backbone_gw 3 58488 NULL
88203 +v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
88204 +submit_inquiry_42108 submit_inquiry 3 42108 NULL
88205 +sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
88206 +__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 &sel_write_context_25726
88207 +sysfs_read_file_42113 sysfs_read_file 3 42113 NULL
88208 +mcs_unwrap_fir_25733 mcs_unwrap_fir 3 25733 NULL
88209 +vme_user_write_15587 vme_user_write 3 15587 NULL
88210 +xlog_do_log_recovery_17550 xlog_do_log_recovery 3 17550 NULL
88211 +__copy_to_user_17551 __copy_to_user 3 17551 NULL
88212 +cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
88213 +sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
88214 +lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
88215 +read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
88216 +nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
88217 +lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
88218 +v9fs_alloc_rdir_buf_42150 v9fs_alloc_rdir_buf 2 42150 NULL
88219 +roccat_common2_send_with_status_50343 roccat_common2_send_with_status 4 50343 NULL
88220 +ipc_alloc_1192 ipc_alloc 1 1192 NULL
88221 +mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
88222 +ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
88223 +rndis_add_response_58544 rndis_add_response 2 58544 NULL
88224 +isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
88225 +read_9397 read 3 9397 NULL
88226 +i2cdev_read_1206 i2cdev_read 3 1206 NULL
88227 +read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
88228 +printer_write_60276 printer_write 3 60276 NULL
88229 +acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
88230 +neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
88231 +rts51x_write_mem_17598 rts51x_write_mem 4 17598 NULL
88232 +vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
88233 +roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
88234 +oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
88235 +osst_execute_17607 osst_execute 7-6 17607 NULL
88236 +nf_nat_sip_expect_9418 nf_nat_sip_expect 8 9418 NULL
88237 +sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
88238 +ipw_packet_received_skb_1230 ipw_packet_received_skb 2 1230 NULL
88239 +ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL
88240 +rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
88241 +sys32_rt_sigpending_25814 sys32_rt_sigpending 2 25814 NULL
88242 +bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
88243 +acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
88244 +joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
88245 +ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
88246 +xip_file_read_58592 xip_file_read 3 58592 NULL
88247 +iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
88248 +kmalloc_array_9444 kmalloc_array 1-2 9444 NULL
88249 +__ntfs_malloc_34022 __ntfs_malloc 1 34022 NULL
88250 +l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
88251 +mcs_unwrap_mir_9455 mcs_unwrap_mir 3 9455 NULL
88252 +ppp_write_34034 ppp_write 3 34034 NULL
88253 +qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
88254 +iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
88255 +tty_insert_flip_string_34042 tty_insert_flip_string 3 34042 NULL
88256 +packet_setsockopt_17662 packet_setsockopt 5 17662 NULL
88257 +batadv_tt_prepare_packet_buff_1280 batadv_tt_prepare_packet_buff 4 1280 NULL
88258 +do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
88259 +module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL nohasharray
88260 +efi_ioremap_58634 efi_ioremap 1-2 58634 &module_alloc_update_bounds_rx_58634
88261 +btmrvl_hsmode_write_42252 btmrvl_hsmode_write 3 42252 NULL
88262 +rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
88263 +dsp_tone_hw_message_17678 dsp_tone_hw_message 3 17678 NULL
88264 +netxen_nic_map_indirect_address_128M_42257 netxen_nic_map_indirect_address_128M 2 42257 NULL
88265 +ulog_alloc_skb_23427 ulog_alloc_skb 1 23427 NULL
88266 +__alloc_preds_9492 __alloc_preds 2 9492 NULL
88267 +pgctrl_write_50453 pgctrl_write 3 50453 NULL
88268 +pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
88269 +read_file_ant_diversity_34071 read_file_ant_diversity 3 34071 NULL
88270 +tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
88271 +ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
88272 +tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
88273 +savu_sysfs_write_42273 savu_sysfs_write 6 42273 NULL
88274 +uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 NULL
88275 +lp_write_9511 lp_write 3 9511 NULL
88276 +__einj_error_trigger_17707 __einj_error_trigger 1 17707 NULL nohasharray
88277 +venus_rename_17707 venus_rename 5-4 17707 &__einj_error_trigger_17707
88278 +cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
88279 +nfs_readdir_make_qstr_12509 nfs_readdir_make_qstr 3 12509 NULL
88280 +lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
88281 +scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
88282 +do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
88283 +do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
88284 +read_file_dma_9530 read_file_dma 3 9530 NULL
88285 +sel_read_perm_42302 sel_read_perm 3 42302 NULL
88286 +rcname_read_25919 rcname_read 3 25919 NULL
88287 +sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
88288 +ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
88289 +tps6586x_writes_58689 tps6586x_writes 3 58689 NULL
88290 +il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
88291 +xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL
88292 +exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
88293 +pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
88294 +snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
88295 +key_flags_read_25931 key_flags_read 3 25931 NULL
88296 +audit_log_n_untrustedstring_9548 audit_log_n_untrustedstring 3 9548 NULL
88297 +sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
88298 +ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
88299 +hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
88300 +islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
88301 +pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL
88302 +fw_node_create_9559 fw_node_create 2 9559 NULL
88303 +fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
88304 +ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
88305 +kobj_map_9566 kobj_map 2-3 9566 NULL
88306 +snd_pcm_plug_alloc_42339 snd_pcm_plug_alloc 2 42339 NULL
88307 +acpi_map_58725 acpi_map 1-2 58725 NULL
88308 +brcmf_usb_attach_17766 brcmf_usb_attach 2-3 17766 NULL
88309 +sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
88310 +fwnet_receive_packet_50537 fwnet_receive_packet 9 50537 NULL
88311 +do_msgsnd_1387 do_msgsnd 4 1387 NULL
88312 +ieee80211_tdls_mgmt_9581 ieee80211_tdls_mgmt 8 9581 NULL
88313 +snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
88314 +ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
88315 +udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
88316 +file_read_actor_1401 file_read_actor 4 1401 NULL
88317 +av7110_ipack_init_46655 av7110_ipack_init 2 46655 NULL
88318 +ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
88319 +ubifs_leb_change_17789 ubifs_leb_change 4 17789 NULL
88320 +udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
88321 +do_sync_9604 do_sync 1 9604 NULL
88322 +snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5 9605 NULL
88323 +scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
88324 +agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
88325 +__do_config_autodelink_58763 __do_config_autodelink 3 58763 NULL
88326 +sctp_sf_abort_violation_1420 sctp_sf_abort_violation 7 1420 NULL
88327 +afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
88328 +il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
88329 +lpfc_sli_probe_sriov_nr_virtfn_26004 lpfc_sli_probe_sriov_nr_virtfn 2 26004 NULL
88330 +qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
88331 +fat_readpages_50582 fat_readpages 4 50582 NULL nohasharray
88332 +pep_reply_50582 pep_reply 5 50582 &fat_readpages_50582
88333 +iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
88334 +saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
88335 +_snd_pcm_lib_alloc_vmalloc_buffer_17820 _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 NULL
88336 +xfs_readdir_41200 xfs_readdir 3 41200 NULL
88337 +sge_rx_50594 sge_rx 3 50594 NULL
88338 +stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
88339 +compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
88340 +skb_padto_50759 skb_padto 2 50759 NULL
88341 +raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
88342 +mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
88343 +selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
88344 +isku_sysfs_read_58806 isku_sysfs_read 6 58806 NULL
88345 +tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
88346 +uvc_alloc_buffers_9656 uvc_alloc_buffers 2-3 9656 NULL
88347 +queue_received_packet_9657 queue_received_packet 5 9657 NULL
88348 +ep_read_58813 ep_read 3 58813 NULL
88349 +xprt_alloc_1475 xprt_alloc 2 1475 NULL
88350 +gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
88351 +snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
88352 +pci_enable_sriov_35745 pci_enable_sriov 2 35745 NULL
88353 +sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
88354 +simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
88355 +key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
88356 +dns_query_9676 dns_query 3 9676 NULL
88357 +keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
88358 +sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
88359 +ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
88360 +orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL
88361 +bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
88362 +command_write_58841 command_write 3 58841 NULL
88363 +short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
88364 +dev_config_8506 dev_config 3 8506 NULL
88365 +compat_sys_pwritev_17886 compat_sys_pwritev 3 17886 NULL
88366 +sys_readv_50664 sys_readv 3 50664 NULL
88367 +bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
88368 +__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
88369 +ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
88370 +rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL
88371 +recover_head_17904 recover_head 3 17904 NULL
88372 +dccp_feat_register_sp_17914 dccp_feat_register_sp 5 17914 NULL
88373 +xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
88374 +brcmf_sdbrcm_bus_txctl_42492 brcmf_sdbrcm_bus_txctl 3 42492 NULL
88375 +srp_iu_pool_alloc_17920 srp_iu_pool_alloc 2 17920 NULL
88376 +gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
88377 +pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
88378 +cs553x_init_one_58886 cs553x_init_one 3 58886 NULL
88379 +ddb_input_read_9743 ddb_input_read 3 9743 NULL
88380 +skb_cow_26138 skb_cow 2 26138 NULL
88381 +smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
88382 +snd_emux_create_port_42533 snd_emux_create_port 3 42533 NULL
88383 +do_sigpending_9766 do_sigpending 2 9766 NULL
88384 +hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL
88385 +pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
88386 +blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
88387 +__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
88388 +copy_oldmem_page_26164 copy_oldmem_page 3-1 26164 NULL
88389 +i915_ring_stop_read_42549 i915_ring_stop_read 3 42549 NULL nohasharray
88390 +ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 &i915_ring_stop_read_42549
88391 +ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 NULL
88392 +snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
88393 +fc_frame_alloc_1596 fc_frame_alloc 2 1596 NULL
88394 +rngapi_reset_34366 rngapi_reset 3 34366 NULL nohasharray
88395 +p54_alloc_skb_34366 p54_alloc_skb 3 34366 &rngapi_reset_34366
88396 +iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
88397 +ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
88398 +solo_v4l2_read_59247 solo_v4l2_read 3 59247 NULL
88399 +smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
88400 +packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
88401 +reiserfs_resize_34377 reiserfs_resize 2 34377 NULL
88402 +get_registers_26187 get_registers 3 26187 NULL
88403 +cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
88404 +ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
88405 +btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
88406 +av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
88407 +usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
88408 +snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3 27332 NULL
88409 +udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 NULL nohasharray
88410 +ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 &udpv6_recvmsg_9813
88411 +tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL nohasharray
88412 +pipe_handler_request_50774 pipe_handler_request 5 50774 &tm6000_read_write_usb_50774
88413 +xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
88414 +mce_write_26201 mce_write 3 26201 NULL
88415 +iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
88416 +bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
88417 +alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
88418 +oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
88419 +smk_write_load2_52155 smk_write_load2 3 52155 NULL
88420 +__pskb_pull_42602 __pskb_pull 2 42602 NULL
88421 +sctp_make_heartbeat_ack_34411 sctp_make_heartbeat_ack 4 34411 NULL
88422 +tpm_write_50798 tpm_write 3 50798 NULL
88423 +btmrvl_hsmode_read_1647 btmrvl_hsmode_read 3 1647 NULL
88424 +tun_do_read_50800 tun_do_read 4 50800 NULL
88425 +handle_rx_packet_58993 handle_rx_packet 3 58993 NULL
88426 +write_flush_50803 write_flush 3 50803 NULL
88427 +_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
88428 +rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 NULL
88429 +ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
88430 +dvb_play_50814 dvb_play 3 50814 NULL
88431 +cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
88432 +sys_move_pages_42626 sys_move_pages 2 42626 NULL
88433 +ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
88434 +pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
88435 +btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
88436 +usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
88437 +dma_attach_50831 dma_attach 6-7 50831 NULL
88438 +scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
88439 +br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
88440 +vhci_put_user_12604 vhci_put_user 4 12604 NULL
88441 +packet_came_18072 packet_came 3 18072 NULL
88442 +init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
88443 +kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
88444 +sctp_make_abort_34459 sctp_make_abort 3 34459 NULL
88445 +_regmap_raw_write_42652 _regmap_raw_write 4 42652 NULL
88446 +selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
88447 +get_vm_area_18080 get_vm_area 1 18080 NULL
88448 +dvb_dvr_set_buffer_size_9840 dvb_dvr_set_buffer_size 2 9840 NULL
88449 +bm_register_write_9893 bm_register_write 3 9893 NULL nohasharray
88450 +snd_midi_event_new_9893 snd_midi_event_new 1 9893 &bm_register_write_9893
88451 +self_check_write_50856 self_check_write 5 50856 NULL
88452 +line6_dumpreq_init_34473 line6_dumpreq_init 3 34473 NULL
88453 +i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
88454 +mpi_alloc_18094 mpi_alloc 1 18094 NULL
88455 +coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
88456 +l2tp_xmit_skb_42672 l2tp_xmit_skb 3 42672 NULL
88457 +bitmap_resize_33054 bitmap_resize 2 33054 NULL
88458 +mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
88459 +mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
88460 +sis190_try_rx_copy_57069 sis190_try_rx_copy 3 57069 NULL
88461 +dfs_file_read_18116 dfs_file_read 3 18116 NULL
88462 +request_key_and_link_42693 request_key_and_link 4 42693 NULL
88463 +vb2_read_42703 vb2_read 3 42703 NULL
88464 +pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
88465 +hvc_alloc_12579 hvc_alloc 4 12579 NULL
88466 +tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
88467 +snd_pcm_plugin_alloc_12580 snd_pcm_plugin_alloc 2 12580 NULL
88468 +pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
88469 +read_file_misc_9948 read_file_misc 3 9948 NULL
88470 +xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
88471 +set_rxd_buffer_pointer_9950 set_rxd_buffer_pointer 8 9950 NULL
88472 +selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
88473 +csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
88474 +tracing_stats_read_34537 tracing_stats_read 3 34537 NULL
88475 +hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
88476 +dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
88477 +cosa_write_1774 cosa_write 3 1774 NULL
88478 +set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
88479 +hugetlbfs_read_actor_34547 hugetlbfs_read_actor 2-5-4 34547 NULL
88480 +ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
88481 +btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
88482 +bnx2fc_cmd_mgr_alloc_24873 bnx2fc_cmd_mgr_alloc 3-2 24873 NULL
88483 +cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
88484 +ath6kl_usb_submit_ctrl_out_9978 ath6kl_usb_submit_ctrl_out 6 9978 NULL
88485 +dup_to_netobj_26363 dup_to_netobj 3 26363 NULL
88486 +sock_bindtodevice_50942 sock_bindtodevice 3 50942 NULL
88487 +pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
88488 +fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
88489 +alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
88490 +mld_newpack_50950 mld_newpack 2 50950 NULL
88491 +framebuffer_alloc_59145 framebuffer_alloc 1 59145 NULL
88492 +i915_ring_stop_write_59010 i915_ring_stop_write 3 59010 NULL
88493 +radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
88494 +cfpkt_create_18197 cfpkt_create 1 18197 NULL
88495 +velocity_rx_copy_34583 velocity_rx_copy 2 34583 NULL
88496 +x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
88497 +init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
88498 +tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4 37428 NULL
88499 +xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
88500 +orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
88501 +gsm_control_message_18209 gsm_control_message 4 18209 NULL
88502 +do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
88503 +handle_request_10024 handle_request 9 10024 NULL
88504 +__tty_alloc_driver_53799 __tty_alloc_driver 1 53799 NULL
88505 +setup_window_59178 setup_window 4-2-5-7 59178 NULL
88506 +timeout_write_50991 timeout_write 3 50991 NULL
88507 +batadv_orig_hash_add_if_10033 batadv_orig_hash_add_if 2 10033 NULL
88508 +fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
88509 +ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
88510 +proc_write_51003 proc_write 3 51003 NULL
88511 +drm_ioctl_42813 drm_ioctl 2 42813 NULL
88512 +gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
88513 +iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
88514 +set_arg_42824 set_arg 3 42824 NULL
88515 +xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
88516 +fast_rx_path_59214 fast_rx_path 3 59214 NULL
88517 +lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
88518 +cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
88519 +audit_log_n_string_31705 audit_log_n_string 3 31705 NULL
88520 +fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 NULL
88521 +pstore_mkfile_50830 pstore_mkfile 5 50830 NULL
88522 +qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
88523 +hidp_queue_report_1881 hidp_queue_report 3 1881 NULL
88524 +dt3155_read_59226 dt3155_read 3 59226 NULL
88525 +xfs_buf_read_uncached_42844 xfs_buf_read_uncached 3 42844 NULL
88526 +ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
88527 +dump_midi_51040 dump_midi 3 51040 NULL
88528 +srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
88529 +gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
88530 +nf_nat_mangle_udp_packet_34661 nf_nat_mangle_udp_packet 8-6 34661 NULL
88531 +alloc_ring_18278 alloc_ring 2-4 18278 NULL
88532 +tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
88533 +nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 NULL
88534 +ext4_readpages_18283 ext4_readpages 4 18283 NULL
88535 +mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
88536 +em28xx_v4l2_read_16701 em28xx_v4l2_read 3 16701 NULL
88537 +configfs_read_file_1683 configfs_read_file 3 1683 NULL
88538 +ulong_write_file_26485 ulong_write_file 3 26485 NULL
88539 +wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
88540 +dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
88541 +dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 NULL
88542 +isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
88543 +pskb_expand_head_42881 pskb_expand_head 2-3 42881 NULL
88544 +ip6ip6_err_18308 ip6ip6_err 5 18308 NULL
88545 +read_vmcore_26501 read_vmcore 3 26501 NULL
88546 +tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
88547 +garp_attr_create_3883 garp_attr_create 3 3883 NULL
88548 +tipc_port_recv_sections_42890 tipc_port_recv_sections 4 42890 NULL
88549 +vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 3-4 26507 NULL
88550 +xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
88551 +jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
88552 +ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
88553 +cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
88554 +SendTxCommandPacket_42901 SendTxCommandPacket 3 42901 NULL
88555 +btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
88556 +W6692_empty_Bfifo_47804 W6692_empty_Bfifo 2 47804 NULL
88557 +ath6kl_wmi_send_probe_response_cmd_31728 ath6kl_wmi_send_probe_response_cmd 6 31728 NULL
88558 +ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
88559 +exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
88560 +oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
88561 +btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
88562 +aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
88563 +rds_message_inc_copy_to_user_26540 rds_message_inc_copy_to_user 3 26540 NULL
88564 +iscsi_nop_out_rsp_51117 iscsi_nop_out_rsp 4 51117 NULL
88565 +platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
88566 +hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL
88567 +reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
88568 +sctp_make_datafrag_empty_34737 sctp_make_datafrag_empty 3 34737 NULL
88569 +pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
88570 +asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
88571 +__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
88572 +fd_copyout_59323 fd_copyout 3 59323 NULL
88573 +nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
88574 +proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
88575 +read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
88576 +sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
88577 +diva_alloc_dma_map_23798 diva_alloc_dma_map 2 23798 NULL
88578 +solos_param_store_34755 solos_param_store 4 34755 NULL
88579 +simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
88580 +jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
88581 +__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
88582 +rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
88583 +xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
88584 +ib_send_cm_rtu_63138 ib_send_cm_rtu 3 63138 NULL
88585 +compat_sys_pwritev64_51151 compat_sys_pwritev64 3 51151 NULL
88586 +rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL nohasharray
88587 +batadv_receive_server_sync_packet_26577 batadv_receive_server_sync_packet 3 26577 &rts51x_read_mem_26577
88588 +xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
88589 +vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
88590 +batadv_tt_commit_changes_2008 batadv_tt_commit_changes 4 2008 NULL
88591 +sep_prepare_input_dma_table_2009 sep_prepare_input_dma_table 2-3 2009 NULL
88592 +qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
88593 +ubifs_write_node_11258 ubifs_write_node 5-3 11258 NULL
88594 +reada_tree_block_flagged_18402 reada_tree_block_flagged 3 18402 NULL
88595 +iscsi_if_send_reply_52219 iscsi_if_send_reply 7 52219 NULL
88596 +write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
88597 +__copy_in_user_34790 __copy_in_user 3 34790 NULL
88598 +crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
88599 +nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL
88600 +mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
88601 +BcmCopySection_2035 BcmCopySection 5 2035 NULL
88602 +devm_ioremap_nocache_2036 devm_ioremap_nocache 2-3 2036 NULL
88603 +carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
88604 +hdlc_rpr_irq_10240 hdlc_rpr_irq 2 10240 NULL
88605 +batadv_orig_node_add_if_18433 batadv_orig_node_add_if 2 18433 NULL
88606 +ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
88607 +pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
88608 +nfc_alloc_recv_skb_10244 nfc_alloc_recv_skb 1 10244 NULL
88609 +pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
88610 +mangle_sdp_packet_30381 mangle_sdp_packet 10 30381 NULL
88611 +isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
88612 +cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
88613 +hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
88614 +b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
88615 +subbuf_read_actor_2071 subbuf_read_actor 3 2071 NULL
88616 +fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
88617 +irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL
88618 +regset_tls_set_18459 regset_tls_set 4 18459 NULL
88619 +nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
88620 +receive_DataRequest_9904 receive_DataRequest 3 9904 NULL
88621 +acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
88622 +tipc_send_51238 tipc_send 4 51238 NULL
88623 +drm_property_create_51239 drm_property_create 4 51239 NULL
88624 +snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
88625 +squashfs_read_data_59440 squashfs_read_data 6 59440 NULL
88626 +idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
88627 +audit_expand_2098 audit_expand 2 2098 NULL
88628 +st_read_51251 st_read 3 51251 NULL
88629 +fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL
88630 +udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
88631 +iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
88632 +ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
88633 +compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
88634 +nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
88635 +rtsx_read_cfg_seq_48139 rtsx_read_cfg_seq 5-3 48139 NULL
88636 +__find_xattr_2117 __find_xattr 6 2117 NULL nohasharray
88637 +enable_read_2117 enable_read 3 2117 &__find_xattr_2117
88638 +dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
88639 +pcf50633_write_block_2124 pcf50633_write_block 3 2124 NULL
88640 +ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
88641 +ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
88642 +c4_add_card_54968 c4_add_card 3 54968 NULL
88643 +rtllib_authentication_req_26713 rtllib_authentication_req 3 26713 NULL
88644 +snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
88645 +check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
88646 +fd_do_readv_51297 fd_do_readv 3 51297 NULL
88647 +nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 5-6-9 18530 NULL
88648 +nfc_hci_send_cmd_async_26723 nfc_hci_send_cmd_async 5 26723 NULL
88649 +mlx4_init_icm_table_2151 mlx4_init_icm_table 5-4 2151 NULL
88650 +bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
88651 +ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
88652 +ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
88653 +seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
88654 +sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
88655 +ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
88656 +_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
88657 +nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
88658 +alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
88659 +pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL
88660 +ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
88661 +sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
88662 +fb_sys_write_33130 fb_sys_write 3 33130 NULL
88663 +sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
88664 +set_bypass_pwoff_pfs_27669 set_bypass_pwoff_pfs 3 27669 NULL
88665 +smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
88666 +srp_ring_alloc_26760 srp_ring_alloc 2 26760 NULL
88667 +read_file_dfs_43145 read_file_dfs 3 43145 NULL
88668 +ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
88669 +ntfs_malloc_nofs_nofail_63631 ntfs_malloc_nofs_nofail 1 63631 NULL
88670 +cosa_net_setup_rx_38594 cosa_net_setup_rx 2 38594 NULL
88671 +skb_gro_header_slow_34958 skb_gro_header_slow 2 34958 NULL
88672 +debug_output_18575 debug_output 3 18575 NULL
88673 +Realloc_34961 Realloc 2 34961 NULL
88674 +il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
88675 +do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
88676 +_rtl92s_firmware_downloadcode_14021 _rtl92s_firmware_downloadcode 3 14021 NULL
88677 +__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
88678 +slabinfo_write_18600 slabinfo_write 3 18600 NULL
88679 +ssb_bus_ssbbus_register_2217 ssb_bus_ssbbus_register 2 2217 NULL
88680 +radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
88681 +iowarrior_write_18604 iowarrior_write 3 18604 NULL
88682 +vhci_write_2224 vhci_write 3 2224 NULL
88683 +ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
88684 +acpi_os_ioremap_49523 acpi_os_ioremap 1-2 49523 NULL
88685 +rb_alloc_3102 rb_alloc 1 3102 NULL
88686 +uf_create_device_nodes_24948 uf_create_device_nodes 2 24948 NULL
88687 +rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
88688 +l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
88689 +write_pbl_59583 write_pbl 4 59583 NULL
88690 +from_buffer_18625 from_buffer 3 18625 NULL
88691 +uio_write_43202 uio_write 3 43202 NULL
88692 +memdup_user_59590 memdup_user 2 59590 NULL
88693 +ieee80211_wx_set_gen_ie_51399 ieee80211_wx_set_gen_ie 3 51399 NULL
88694 +iso_callback_43208 iso_callback 3 43208 NULL
88695 +ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
88696 +smk_write_load_26829 smk_write_load 3 26829 NULL
88697 +sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
88698 +do_update_counters_2259 do_update_counters 4 2259 NULL
88699 +coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
88700 +cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
88701 +ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
88702 +blk_register_region_51424 blk_register_region 1-2 51424 NULL
88703 +ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
88704 +mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
88705 +mtrr_write_59622 mtrr_write 3 59622 NULL
88706 +event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
88707 +ip_vs_icmp_xmit_59624 ip_vs_icmp_xmit 4 59624 NULL
88708 +netxen_nic_hw_read_wx_128M_26858 netxen_nic_hw_read_wx_128M 2 26858 NULL
88709 +edge_tty_recv_18667 edge_tty_recv 4 18667 NULL nohasharray
88710 +xfs_iext_insert_18667 xfs_iext_insert 3 18667 &edge_tty_recv_18667
88711 +btmrvl_gpiogap_write_35053 btmrvl_gpiogap_write 3 35053 NULL
88712 +tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
88713 +ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
88714 +debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
88715 +twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
88716 +fixup_leb_43256 fixup_leb 3 43256 NULL
88717 +dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
88718 +ubifs_recover_log_leb_12079 ubifs_recover_log_leb 3 12079 NULL
88719 +ubifs_setxattr_59650 ubifs_setxattr 4 59650 NULL nohasharray
88720 +hidraw_read_59650 hidraw_read 3 59650 &ubifs_setxattr_59650
88721 +kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
88722 +ca91cx42_alloc_resource_10502 ca91cx42_alloc_resource 2 10502 NULL
88723 +intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
88724 +qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
88725 +evtchn_write_43278 evtchn_write 3 43278 NULL
88726 +sel_write_disable_10511 sel_write_disable 3 10511 NULL
88727 +store_ifalias_35088 store_ifalias 4 35088 NULL
88728 +tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
88729 +osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
88730 +____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
88731 +iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
88732 +rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
88733 +ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
88734 +blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
88735 +get_vm_area_caller_10527 get_vm_area_caller 1 10527 NULL
88736 +capi_write_35104 capi_write 3 35104 NULL nohasharray
88737 +tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
88738 +mpage_alloc_43299 mpage_alloc 3 43299 NULL
88739 +sys_semtimedop_4486 sys_semtimedop 3 4486 NULL
88740 +ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
88741 +osd_req_write_sg_50908 osd_req_write_sg 5 50908 NULL
88742 +x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
88743 +zr364xx_read_2354 zr364xx_read 3 2354 NULL
88744 +mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
88745 +scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
88746 +pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
88747 +pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 NULL nohasharray
88748 +sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 &pwr_ps_enter_read_26935
88749 +o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
88750 +tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 NULL
88751 +viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
88752 +hecubafb_write_26942 hecubafb_write 3 26942 NULL
88753 +wep_packets_read_18751 wep_packets_read 3 18751 NULL
88754 +xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL nohasharray
88755 +rose_recvmsg_2368 rose_recvmsg 4 2368 &xfs_buf_map_from_irec_2368
88756 +il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
88757 +ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
88758 +do_trimming_26952 do_trimming 3 26952 NULL
88759 +ath6kl_wmi_set_ie_cmd_37260 ath6kl_wmi_set_ie_cmd 6 37260 NULL
88760 +read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
88761 +prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
88762 +iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
88763 +alloc_buf_34532 alloc_buf 1 34532 NULL
88764 +sock_rmalloc_59740 sock_rmalloc 2 59740 NULL nohasharray
88765 +ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 &sock_rmalloc_59740
88766 +__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
88767 +icn_writecmd_38629 icn_writecmd 2 38629 NULL
88768 +otp_read_10594 otp_read 2-4-5 10594 NULL
88769 +rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
88770 +sctp_manip_pkt_59749 sctp_manip_pkt 4 59749 NULL
88771 +icmp_manip_pkt_51560 icmp_manip_pkt 4 51560 NULL
88772 +brcmf_sdbrcm_read_control_22721 brcmf_sdbrcm_read_control 3 22721 NULL
88773 +supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
88774 +isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
88775 +roccat_common2_send_2422 roccat_common2_send 4 2422 NULL
88776 +ieee80211_auth_challenge_18810 ieee80211_auth_challenge 3 18810 NULL
88777 +ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
88778 +cxgb3_get_cpl_reply_skb_10620 cxgb3_get_cpl_reply_skb 2 10620 NULL
88779 +xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
88780 +venus_remove_59781 venus_remove 4 59781 NULL
88781 +ioremap_nocache_2439 ioremap_nocache 1-2 2439 NULL
88782 +sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
88783 +unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
88784 +tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
88785 +xlog_do_recover_59789 xlog_do_recover 3 59789 NULL
88786 +aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
88787 +rfcomm_tty_write_51603 rfcomm_tty_write 3 51603 NULL
88788 +xenfb_write_43412 xenfb_write 3 43412 NULL
88789 +chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
88790 +nfs4_alloc_slots_2454 nfs4_alloc_slots 1 2454 NULL nohasharray
88791 +ath6kl_usb_bmi_write_2454 ath6kl_usb_bmi_write 3 2454 &nfs4_alloc_slots_2454
88792 +rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL
88793 +mtf_test_write_18844 mtf_test_write 3 18844 NULL
88794 +__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL nohasharray
88795 +gdm_wimax_netif_rx_43423 gdm_wimax_netif_rx 3 43423 &__alloc_bootmem_low_43423
88796 +rtllib_wx_set_gen_ie_59808 rtllib_wx_set_gen_ie 3 59808 NULL
88797 +error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL
88798 +udp_manip_pkt_45467 udp_manip_pkt 4 45467 NULL
88799 +nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL
88800 +xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
88801 +ni65_alloc_mem_10664 ni65_alloc_mem 3 10664 NULL
88802 +b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
88803 +usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
88804 +cmd_complete_51629 cmd_complete 6 51629 NULL
88805 +sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
88806 +btmrvl_txdnldready_read_413 btmrvl_txdnldready_read 3 413 NULL
88807 +ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
88808 +set_fd_set_35249 set_fd_set 1 35249 NULL
88809 +wiphy_new_2482 wiphy_new 2 2482 NULL
88810 +bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
88811 +ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
88812 +__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
88813 +ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
88814 +tcp_push_10680 tcp_push 3 10680 NULL
88815 +sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
88816 +c101_run_37279 c101_run 2 37279 NULL
88817 +iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
88818 +dma_show_regs_35266 dma_show_regs 3 35266 NULL
88819 +tun_put_user_59849 tun_put_user 4 59849 NULL
88820 +squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
88821 +alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
88822 +irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
88823 +dm_write_2513 dm_write 3 2513 NULL
88824 +v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
88825 +isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
88826 +selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 NULL
88827 +ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
88828 +ntfs_malloc_nofs_49572 ntfs_malloc_nofs 1 49572 NULL
88829 +nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
88830 +pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 NULL nohasharray
88831 +shash_async_setkey_10720 shash_async_setkey 3 10720 &pvr2_ioread_read_10720
88832 +ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
88833 +sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
88834 +__iscsi_complete_pdu_10726 __iscsi_complete_pdu 4 10726 NULL
88835 +sfi_sysfs_install_table_51688 sfi_sysfs_install_table 1 51688 NULL
88836 +tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
88837 +pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
88838 +__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
88839 +l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
88840 +brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 NULL nohasharray
88841 +__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 &brcmf_sdio_forensic_read_35311
88842 +tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
88843 +sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL
88844 +compat_sys_msgsnd_10738 compat_sys_msgsnd 2 10738 NULL
88845 +ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
88846 +sel_write_access_51704 sel_write_access 3 51704 NULL
88847 +sys_syslog_10746 sys_syslog 3 10746 NULL
88848 +alloc_one_pg_vec_page_10747 alloc_one_pg_vec_page 1 10747 NULL
88849 +new_bind_ctl_35324 new_bind_ctl 2 35324 NULL
88850 +do_readlink_43518 do_readlink 2 43518 NULL
88851 +tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
88852 +gem_alloc_skb_51715 gem_alloc_skb 2 51715 NULL
88853 +fallback_on_nodma_alloc_35332 fallback_on_nodma_alloc 2 35332 NULL
88854 +read_file_reset_52310 read_file_reset 3 52310 NULL
88855 +pms_capture_27142 pms_capture 4 27142 NULL
88856 +btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
88857 +vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
88858 +gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
88859 +sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
88860 +msg_set_51725 msg_set 3 51725 NULL
88861 +cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
88862 +tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL
88863 +hpi_alloc_control_cache_35351 hpi_alloc_control_cache 1 35351 NULL
88864 +hid_parse_report_51737 hid_parse_report 3 51737 NULL
88865 +compat_filldir64_35354 compat_filldir64 3 35354 NULL
88866 +alc_auto_create_extra_outs_18975 alc_auto_create_extra_outs 2 18975 NULL
88867 +i2400m_net_rx_27170 i2400m_net_rx 5 27170 NULL
88868 +l3_alloc_skb_32289 l3_alloc_skb 1 32289 NULL
88869 +ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
88870 +ath_rx_init_43564 ath_rx_init 2 43564 NULL
88871 +il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 NULL nohasharray
88872 +dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 &il_dbgfs_rxon_flags_read_59950
88873 +sys_bind_10799 sys_bind 3 10799 NULL
88874 +_fc_frame_alloc_43568 _fc_frame_alloc 1 43568 NULL
88875 +nfcwilink_send_bts_cmd_10802 nfcwilink_send_bts_cmd 3 10802 NULL
88876 +ioremap_prot_51764 ioremap_prot 1-2 51764 NULL
88877 +rpc_malloc_43573 rpc_malloc 2 43573 NULL
88878 +dataflash_read_fact_otp_33204 dataflash_read_fact_otp 2-3 33204 NULL
88879 +smk_write_logging_2618 smk_write_logging 3 2618 NULL
88880 +rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
88881 +drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
88882 +send_command_10832 send_command 4 10832 NULL
88883 +lro_gen_skb_2644 lro_gen_skb 6 2644 NULL
88884 +osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
88885 +lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
88886 +pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
88887 +proc_read_43614 proc_read 3 43614 NULL
88888 +rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
88889 +rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
88890 +drm_fb_helper_init_19044 drm_fb_helper_init 3-4 19044 NULL
88891 +fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
88892 +xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
88893 +rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
88894 +mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
88895 +rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
88896 +buffer_to_user_35439 buffer_to_user 3 35439 NULL
88897 +lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
88898 +vmalloc_15464 vmalloc 1 15464 NULL
88899 +buffer_from_user_51826 buffer_from_user 3 51826 NULL
88900 +snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
88901 +ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
88902 +osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
88903 +cfpkt_add_trail_27260 cfpkt_add_trail 3 27260 NULL
88904 +xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
88905 +sys_sched_getaffinity_60033 sys_sched_getaffinity 2 60033 NULL
88906 +read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 NULL
88907 +do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
88908 +do_readv_writev_51849 do_readv_writev 4 51849 NULL
88909 +adu_write_30487 adu_write 3 30487 NULL
88910 +ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
88911 +get_scq_10897 get_scq 2 10897 NULL
88912 +sys_process_vm_readv_19090 sys_process_vm_readv 3-5 19090 NULL nohasharray
88913 +brcmf_usbdev_qinit_19090 brcmf_usbdev_qinit 2 19090 &sys_process_vm_readv_19090
88914 +memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
88915 +cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
88916 +pointer_size_read_51863 pointer_size_read 3 51863 NULL
88917 +load_module_60056 load_module 2 60056 NULL nohasharray
88918 +gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 &load_module_60056
88919 +__videobuf_alloc_cached_12740 __videobuf_alloc_cached 1 12740 NULL
88920 +get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
88921 +dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4-2 43679 NULL
88922 +ieee80211_build_probe_req_60064 ieee80211_build_probe_req 8-6 60064 NULL
88923 +compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
88924 +sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
88925 +__netdev_alloc_skb_ip_align_55067 __netdev_alloc_skb_ip_align 2 55067 NULL
88926 +read_file_regidx_33370 read_file_regidx 3 33370 NULL
88927 +cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
88928 +__copy_from_user_10918 __copy_from_user 3 10918 NULL
88929 +user_read_51881 user_read 3 51881 NULL
88930 +copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
88931 +__xip_file_write_2733 __xip_file_write 4-3 2733 NULL
88932 +cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL
88933 +ath6kl_wmi_test_cmd_27312 ath6kl_wmi_test_cmd 3 27312 NULL
88934 +max77693_bulk_write_43698 max77693_bulk_write 3 43698 NULL
88935 +rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
88936 +hidp_send_ctrl_message_43702 hidp_send_ctrl_message 4 43702 NULL
88937 +async_setkey_35521 async_setkey 3 35521 NULL
88938 +set_dev_class_39645 set_dev_class 4 39645 NULL nohasharray
88939 +dm_exception_table_init_39645 dm_exception_table_init 2 39645 &set_dev_class_39645
88940 +cxgb4_pktgl_to_skb_61899 cxgb4_pktgl_to_skb 2 61899 NULL
88941 +alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
88942 +iio_read_first_n_sw_rb_51911 iio_read_first_n_sw_rb 2 51911 NULL
88943 +hid_report_raw_event_2762 hid_report_raw_event 4 2762 NULL
88944 +add_tty_40055 add_tty 1 40055 NULL nohasharray
88945 +l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 &add_tty_40055
88946 +iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
88947 +rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
88948 +mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL
88949 +snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
88950 +ttm_bo_kmap_60118 ttm_bo_kmap 3-2 60118 NULL
88951 +sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
88952 +alloc_context_3194 alloc_context 1 3194 NULL
88953 +ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
88954 +bm_entry_read_10976 bm_entry_read 3 10976 NULL
88955 +smk_write_access2_19170 smk_write_access2 3 19170 NULL
88956 +pcbit_stat_27364 pcbit_stat 2 27364 NULL
88957 +i915_min_freq_write_10981 i915_min_freq_write 3 10981 NULL
88958 +sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
88959 +gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
88960 +sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
88961 +scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
88962 +koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
88963 +scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
88964 +xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
88965 +ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
88966 +rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL
88967 +init_state_60165 init_state 2 60165 NULL
88968 +udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
88969 +sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
88970 +__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3 19214 NULL
88971 +dev_counters_read_19216 dev_counters_read 3 19216 NULL
88972 +ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
88973 +sg_build_sgat_60179 sg_build_sgat 3 60179 NULL nohasharray
88974 +jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 &sg_build_sgat_60179
88975 +ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
88976 +ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
88977 +calc_hmac_32010 calc_hmac 3 32010 NULL
88978 +ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
88979 +dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
88980 +btrfs_copy_from_user_43806 btrfs_copy_from_user 3-1 43806 NULL
88981 +kernel_readv_35617 kernel_readv 3 35617 NULL
88982 +hci_send_cmd_43810 hci_send_cmd 3 43810 NULL
88983 +sys_lgetxattr_45531 sys_lgetxattr 4 45531 NULL
88984 +dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
88985 +bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
88986 +nouveau_gpio_create__11048 nouveau_gpio_create_ 4 11048 NULL
88987 +dccp_manip_pkt_476 dccp_manip_pkt 4 476 NULL
88988 +tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
88989 +set_tap_pfs_60203 set_tap_pfs 3 60203 NULL
88990 +sfq_alloc_2861 sfq_alloc 1 2861 NULL
88991 +skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
88992 +carl9170_handle_mpdu_11056 carl9170_handle_mpdu 3 11056 NULL
88993 +move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
88994 +ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
88995 +vxge_rx_alloc_52024 vxge_rx_alloc 3 52024 NULL
88996 +ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
88997 +__ip_append_data_16864 __ip_append_data 8-9 16864 NULL
88998 +p54_download_eeprom_43842 p54_download_eeprom 4 43842 NULL
88999 +spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
89000 +store_debug_level_35652 store_debug_level 3 35652 NULL
89001 +l2tp_ip6_sendmsg_7461 l2tp_ip6_sendmsg 4 7461 NULL
89002 +read_flush_43851 read_flush 3 43851 NULL
89003 +dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
89004 +cmm_write_2896 cmm_write 3 2896 NULL
89005 +il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
89006 +io_mapping_map_wc_19284 io_mapping_map_wc 2 19284 NULL
89007 +tunables_write_59563 tunables_write 3 59563 NULL
89008 +compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
89009 +rtsx_write_cfg_seq_27485 rtsx_write_cfg_seq 5-3 27485 NULL
89010 +v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
89011 +kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
89012 +isofs_readpages_52067 isofs_readpages 4 52067 NULL
89013 +lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
89014 +dm_table_create_35687 dm_table_create 3 35687 NULL
89015 +qib_create_cq_27497 qib_create_cq 2 27497 NULL
89016 +nfc_hci_execute_cmd_43882 nfc_hci_execute_cmd 5 43882 NULL
89017 +rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
89018 +tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
89019 +xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
89020 +tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
89021 +garmin_read_process_27509 garmin_read_process 3 27509 NULL
89022 +alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
89023 +nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
89024 +debug_read_19322 debug_read 3 19322 NULL
89025 +v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
89026 +__pskb_pull_tail_60287 __pskb_pull_tail 2 60287 NULL
89027 +gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
89028 +cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL
89029 +ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
89030 +dn_nsp_return_disc_60296 dn_nsp_return_disc 2 60296 NULL
89031 +o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
89032 +prism2_sta_send_mgmt_43916 prism2_sta_send_mgmt 5 43916 NULL
89033 +mgmt_device_found_14146 mgmt_device_found 10 14146 NULL
89034 +snd_seq_oss_readq_new_14283 snd_seq_oss_readq_new 2 14283 NULL
89035 +doc_probe_23285 doc_probe 1 23285 NULL
89036 +ppp_cp_event_2965 ppp_cp_event 6 2965 NULL
89037 +SendString_43928 SendString 3 43928 NULL
89038 +acpi_os_map_memory_11161 acpi_os_map_memory 1-2 11161 NULL
89039 +ceph_parse_server_name_60318 ceph_parse_server_name 2 60318 NULL
89040 +retry_count_read_52129 retry_count_read 3 52129 NULL
89041 +xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
89042 +ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL
89043 +read_zero_19366 read_zero 3 19366 NULL
89044 +bch_alloc_4593 bch_alloc 1 4593 NULL
89045 +stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
89046 +iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
89047 +libipw_alloc_txb_27579 libipw_alloc_txb 1-2-3 27579 NULL
89048 +raid5_resize_63306 raid5_resize 2 63306 NULL
89049 +interpret_user_input_19393 interpret_user_input 2 19393 NULL
89050 +handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
89051 +ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
89052 +do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
89053 +udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
89054 +depth_write_3021 depth_write 3 3021 NULL
89055 +dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
89056 +read_file_stations_35795 read_file_stations 3 35795 NULL
89057 +il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
89058 +tipc_cfg_reply_alloc_27606 tipc_cfg_reply_alloc 1 27606 NULL
89059 +bcm_recvmsg_43992 bcm_recvmsg 4 43992 NULL
89060 +proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
89061 +ubi_eba_atomic_leb_change_60379 ubi_eba_atomic_leb_change 5 60379 NULL
89062 +iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 NULL
89063 +dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
89064 +il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
89065 +il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
89066 +mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
89067 +write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
89068 +driver_names_read_60399 driver_names_read 3 60399 NULL
89069 +read_flush_procfs_27642 read_flush_procfs 3 27642 NULL
89070 +add_new_gdb_27643 add_new_gdb 3 27643 NULL
89071 +dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
89072 +hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
89073 +_alloc_mISDN_skb_52232 _alloc_mISDN_skb 3 52232 NULL
89074 +qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
89075 +tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
89076 +cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
89077 +do_dmabuf_dirty_ldu_52241 do_dmabuf_dirty_ldu 6 52241 NULL
89078 +mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
89079 +rx_data_60442 rx_data 4 60442 NULL
89080 +ttusb2_msg_3100 ttusb2_msg 4 3100 NULL
89081 +efivar_create_sysfs_entry_19485 efivar_create_sysfs_entry 2 19485 NULL
89082 +tcf_csum_ipv4_igmp_60446 tcf_csum_ipv4_igmp 3 60446 NULL
89083 +mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
89084 +rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
89085 +sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
89086 +simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
89087 +__tty_buffer_request_room_27700 __tty_buffer_request_room 2 27700 NULL
89088 +ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
89089 +fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL
89090 +tcp_mark_head_lost_35895 tcp_mark_head_lost 2 35895 NULL
89091 +skb_realloc_headroom_19516 skb_realloc_headroom 2 19516 NULL
89092 +atm_alloc_charge_19517 atm_alloc_charge 2 19517 NULL nohasharray
89093 +dev_alloc_skb_19517 dev_alloc_skb 1 19517 &atm_alloc_charge_19517
89094 +construct_key_11329 construct_key 3 11329 NULL
89095 +evm_write_key_27715 evm_write_key 3 27715 NULL
89096 +persistent_ram_buffer_map_11332 persistent_ram_buffer_map 1-2 11332 NULL
89097 +fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
89098 +filldir_55137 filldir 3 55137 NULL
89099 +igmpv3_newpack_35912 igmpv3_newpack 2 35912 NULL
89100 +kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
89101 +reg_w_buf_27724 reg_w_buf 3 27724 NULL
89102 +nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
89103 +compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
89104 +a4t_cs_init_27734 a4t_cs_init 3 27734 NULL
89105 +sel_write_create_11353 sel_write_create 3 11353 NULL
89106 +tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
89107 +request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL
89108 +hwflags_read_52318 hwflags_read 3 52318 NULL
89109 +nfc_alloc_send_skb_3167 nfc_alloc_send_skb 4 3167 NULL
89110 +batadv_skb_head_push_11360 batadv_skb_head_push 2 11360 NULL
89111 +put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
89112 +vmw_gmr_bind_44130 vmw_gmr_bind 3 44130 NULL
89113 +ath_tx_init_60515 ath_tx_init 2 60515 NULL
89114 +drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
89115 +ntfs_rl_split_52328 ntfs_rl_split 2-4 52328 NULL
89116 +qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
89117 +ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
89118 +test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
89119 +nfsd_read_19568 nfsd_read 5 19568 NULL
89120 +cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
89121 +hysdn_sched_rx_60533 hysdn_sched_rx 3 60533 NULL
89122 +mempool_create_node_3191 mempool_create_node 1 3191 NULL
89123 +kcalloc_27770 kcalloc 1-2 27770 NULL
89124 +shmem_pread_slow_3198 shmem_pread_slow 3 3198 NULL
89125 +bm_status_read_19583 bm_status_read 3 19583 NULL
89126 +v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
89127 +zd_mac_rx_38296 zd_mac_rx 3 38296 NULL
89128 +nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
89129 +ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL
89130 +acl_alloc_35979 acl_alloc 1 35979 NULL
89131 +copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
89132 +___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
89133 +str_to_user_11411 str_to_user 2 11411 NULL
89134 +mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
89135 +koneplus_sysfs_write_35993 koneplus_sysfs_write 6 35993 NULL
89136 +trace_options_read_11419 trace_options_read 3 11419 NULL
89137 +ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL
89138 +mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
89139 +xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
89140 +isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
89141 +kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
89142 +write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
89143 +iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
89144 +do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
89145 +console_store_36007 console_store 4 36007 NULL
89146 +bttv_read_11432 bttv_read 3 11432 NULL
89147 +key_key_read_3241 key_key_read 3 3241 NULL
89148 +aer_inject_write_52399 aer_inject_write 3 52399 NULL
89149 +il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
89150 +__feat_register_sp_64712 __feat_register_sp 6 64712 NULL
89151 +ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
89152 +check_vendor_extension_3254 check_vendor_extension 1 3254 NULL
89153 +ieee80211_amsdu_to_8023s_15561 ieee80211_amsdu_to_8023s 5 15561 NULL
89154 +sys_listxattr_27833 sys_listxattr 3 27833 NULL
89155 +aac_rx_ioremap_52410 aac_rx_ioremap 2 52410 NULL
89156 +ubi_eba_write_leb_36029 ubi_eba_write_leb 5-6 36029 NULL
89157 +um_idi_write_18293 um_idi_write 3 18293 NULL
89158 +cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
89159 +srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
89160 +usbvision_rvmalloc_19655 usbvision_rvmalloc 1 19655 NULL
89161 +line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
89162 +LoadBitmap_19658 LoadBitmap 2 19658 NULL
89163 +wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
89164 +sys_init_module_36047 sys_init_module 2 36047 NULL
89165 +read_profile_27859 read_profile 3 27859 NULL
89166 +acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
89167 +sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 NULL
89168 +enlarge_skb_44248 enlarge_skb 2 44248 NULL nohasharray
89169 +xfs_buf_readahead_map_44248 xfs_buf_readahead_map 3 44248 &enlarge_skb_44248
89170 +scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
89171 +refill_pool_19477 refill_pool 2 19477 NULL
89172 +ubifs_recover_leb_60639 ubifs_recover_leb 3 60639 NULL
89173 +ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL
89174 +iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
89175 +xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
89176 +__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
89177 +tcp_sacktag_walk_49703 tcp_sacktag_walk 6 49703 NULL
89178 +ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
89179 +arvo_sysfs_write_3311 arvo_sysfs_write 6 3311 NULL
89180 +sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
89181 +unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
89182 +kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
89183 +hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
89184 +dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
89185 +ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1-2 52477 NULL
89186 +ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
89187 +blk_init_tags_30592 blk_init_tags 1 30592 NULL
89188 +venus_symlink_23570 venus_symlink 4-6 23570 NULL
89189 +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
89190 +aac_rkt_ioremap_3333 aac_rkt_ioremap 2 3333 NULL
89191 +sctp_make_init_ack_3335 sctp_make_init_ack 4 3335 NULL
89192 +read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
89193 +tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
89194 +ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
89195 +vga_arb_write_36112 vga_arb_write 3 36112 NULL
89196 +int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
89197 +acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL
89198 +simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
89199 +il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
89200 +memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
89201 +gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
89202 +ath6kl_usb_ctrl_msg_exchange_33327 ath6kl_usb_ctrl_msg_exchange 4 33327 NULL
89203 +dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
89204 +pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
89205 +mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
89206 +gpio_power_read_36059 gpio_power_read 3 36059 NULL
89207 +vmalloc_exec_36132 vmalloc_exec 1 36132 NULL
89208 +init_data_container_60709 init_data_container 1 60709 NULL
89209 +p9_client_read_19750 p9_client_read 5 19750 NULL
89210 +skb_cow_data_11565 skb_cow_data 2 11565 NULL
89211 +pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
89212 +ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
89213 +ext3_readpages_36144 ext3_readpages 4 36144 NULL
89214 +mlx4_init_cmpt_table_11569 mlx4_init_cmpt_table 3 11569 NULL
89215 +iwl_trans_txq_alloc_36147 iwl_trans_txq_alloc 3 36147 NULL
89216 +alloc_vm_area_36149 alloc_vm_area 1 36149 NULL
89217 +ubi_eba_write_leb_st_44343 ubi_eba_write_leb_st 5 44343 NULL
89218 +tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
89219 +b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
89220 +oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
89221 +mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
89222 +nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 NULL nohasharray
89223 +blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 &nfs_fscache_get_super_cookie_44355
89224 +saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
89225 +send_stream_3397 send_stream 4 3397 NULL
89226 +snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
89227 +fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
89228 +ipx_recvmsg_44366 ipx_recvmsg 4 44366 NULL
89229 +hycapi_rx_capipkt_11602 hycapi_rx_capipkt 3 11602 NULL
89230 +msix_map_region_3411 msix_map_region 3 3411 NULL
89231 +sys_kexec_load_14222 sys_kexec_load 2 14222 NULL
89232 +rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
89233 +iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
89234 +pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
89235 +crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
89236 +sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
89237 +opticon_write_60775 opticon_write 4 60775 NULL
89238 +snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
89239 +acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
89240 +aoedev_flush_44398 aoedev_flush 2 44398 NULL
89241 +irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
89242 +drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
89243 +pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
89244 +vip_read_19832 vip_read 3 19832 NULL
89245 +osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
89246 +llc_shdlc_alloc_skb_11645 llc_shdlc_alloc_skb 2 11645 NULL
89247 +security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
89248 +sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
89249 +nfqnl_mangle_36226 nfqnl_mangle 4-2 36226 NULL
89250 +atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
89251 +crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
89252 +ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
89253 +sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
89254 +alloc_skb_fclone_3467 alloc_skb_fclone 1 3467 NULL
89255 +cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
89256 +viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
89257 +cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2 28053 NULL
89258 +ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
89259 +llcp_allocate_pdu_19866 llcp_allocate_pdu 3 19866 NULL
89260 +lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
89261 +btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
89262 +compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
89263 +security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
89264 +sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
89265 +blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
89266 +split_11691 split 2 11691 NULL
89267 +brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
89268 +snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
89269 +pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
89270 +usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL
89271 +__kfifo_alloc_22173 __kfifo_alloc 2-3 22173 NULL
89272 +codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
89273 +gdth_init_isa_28091 gdth_init_isa 1 28091 NULL
89274 +readahead_tree_block_36285 readahead_tree_block 3 36285 NULL
89275 +mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL nohasharray
89276 +ieee80211_wx_set_gen_ie_rsl_3521 ieee80211_wx_set_gen_ie_rsl 3 3521 &mem_tx_free_mem_blks_read_3521
89277 +nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
89278 +vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
89279 +ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
89280 +lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
89281 +ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
89282 +rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
89283 +ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
89284 +iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 NULL
89285 +tcf_csum_ipv6_icmp_11738 tcf_csum_ipv6_icmp 4 11738 NULL
89286 +smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
89287 +vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
89288 +spidev_write_44510 spidev_write 3 44510 NULL
89289 +macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
89290 +dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
89291 +iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
89292 +fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
89293 +iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
89294 +nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
89295 +iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
89296 +kone_receive_4690 kone_receive 4 4690 NULL
89297 +alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
89298 +jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
89299 +evtchn_read_3569 evtchn_read 3 3569 NULL
89300 +video_read_28148 video_read 3 28148 NULL
89301 +compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
89302 +sys_msgsnd_44537 sys_msgsnd 3 44537 NULL nohasharray
89303 +comm_write_44537 comm_write 3 44537 &sys_msgsnd_44537
89304 +stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
89305 +ax25_send_frame_19964 ax25_send_frame 2 19964 NULL
89306 +blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
89307 +relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
89308 +ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
89309 +vc_resize_3585 vc_resize 2-3 3585 NULL
89310 +gluebi_write_27905 gluebi_write 3 27905 NULL
89311 +ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
89312 +c4iw_reject_cr_28174 c4iw_reject_cr 3 28174 NULL
89313 +rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
89314 +attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
89315 +compat_sys_semtimedop_3606 compat_sys_semtimedop 3 3606 NULL
89316 +sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
89317 +macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
89318 +edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
89319 +key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
89320 +pti_char_write_60960 pti_char_write 3 60960 NULL
89321 +tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
89322 +nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
89323 +pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
89324 +read_vbt_r10_60679 read_vbt_r10 1 60679 NULL
89325 +aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
89326 +afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
89327 +error_error_frame_read_39947 error_error_frame_read 3 39947 NULL nohasharray
89328 +fwnet_pd_new_39947 fwnet_pd_new 4 39947 &error_error_frame_read_39947
89329 +snd_pcm_alloc_vmalloc_buffer_44595 snd_pcm_alloc_vmalloc_buffer 2 44595 NULL
89330 +zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
89331 +sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
89332 +rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
89333 +__a2mp_build_60987 __a2mp_build 3 60987 NULL
89334 +split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
89335 +hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
89336 +cm_copy_private_data_3649 cm_copy_private_data 2 3649 NULL
89337 +ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
89338 +ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 NULL nohasharray
89339 +mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &ieee80211_if_read_auto_open_plinks_38268
89340 +ip_set_alloc_57953 ip_set_alloc 1 57953 NULL
89341 +i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
89342 +mb_cache_create_17307 mb_cache_create 2 17307 NULL
89343 +ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
89344 +cfpkt_add_body_44630 cfpkt_add_body 3 44630 NULL
89345 +nf_nat_sdp_media_11863 nf_nat_sdp_media 9 11863 NULL
89346 +alloc_extent_buffer_52824 alloc_extent_buffer 3 52824 NULL
89347 +skb_cow_head_52495 skb_cow_head 2 52495 NULL
89348 +ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
89349 +sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
89350 +alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
89351 +alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
89352 +pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
89353 +ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
89354 +sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
89355 +rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
89356 +fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
89357 +btmrvl_psmode_write_3703 btmrvl_psmode_write 3 3703 NULL
89358 +symtab_init_61050 symtab_init 2 61050 NULL
89359 +team_options_register_20091 team_options_register 3 20091 NULL
89360 +videobuf_pages_to_sg_3708 videobuf_pages_to_sg 2 3708 NULL
89361 +mon_bin_get_event_52863 mon_bin_get_event 4 52863 NULL
89362 +oom_adj_read_21847 oom_adj_read 3 21847 NULL
89363 +b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
89364 +mpi_resize_44674 mpi_resize 2 44674 NULL
89365 +ip6_append_data_36490 ip6_append_data 4-5 36490 NULL nohasharray
89366 +tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 &ip6_append_data_36490
89367 +kmalloc_slab_11917 kmalloc_slab 1 11917 NULL
89368 +rng_dev_read_41581 rng_dev_read 3 41581 NULL
89369 +nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 NULL
89370 +cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
89371 +fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
89372 +hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
89373 +xfs_trans_read_buf_map_37487 xfs_trans_read_buf_map 5 37487 NULL
89374 +ci_ll_write_3740 ci_ll_write 4 3740 NULL
89375 +snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
89376 +kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
89377 +ima_show_htable_value_57136 ima_show_htable_value 2 57136 NULL
89378 +mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
89379 +dccp_feat_clone_sp_val_11942 dccp_feat_clone_sp_val 3 11942 NULL
89380 +pms_read_53873 pms_read 3 53873 NULL
89381 +ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
89382 +get_derived_key_61100 get_derived_key 4 61100 NULL
89383 +bm_entry_write_28338 bm_entry_write 3 28338 NULL
89384 +_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
89385 +tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
89386 +clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
89387 +dm_read_15674 dm_read 3 15674 NULL
89388 +cpu_type_read_36540 cpu_type_read 3 36540 NULL
89389 +__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL
89390 +nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
89391 +kone_send_63435 kone_send 4 63435 NULL
89392 +alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
89393 +key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
89394 +tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
89395 +iblock_get_bio_52936 iblock_get_bio 3 52936 NULL
89396 +__kfifo_to_user_36555 __kfifo_to_user 3 36555 NULL nohasharray
89397 +macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
89398 +wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
89399 +create_trace_probe_20175 create_trace_probe 1 20175 NULL
89400 +sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
89401 +afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
89402 +tnode_new_44757 tnode_new 3 44757 NULL nohasharray
89403 +pty_write_44757 pty_write 3 44757 &tnode_new_44757
89404 +ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
89405 +kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
89406 +iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
89407 +send_packet_52960 send_packet 4 52960 NULL
89408 +dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
89409 +ssb_bus_scan_36578 ssb_bus_scan 2 36578 NULL
89410 +ncp_file_write_3813 ncp_file_write 3 3813 NULL
89411 +tipc_port_reject_sections_55229 tipc_port_reject_sections 5 55229 NULL
89412 +tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
89413 +set_bypass_pfs_28395 set_bypass_pfs 3 28395 NULL
89414 +put_cmsg_36589 put_cmsg 4 36589 NULL
89415 +__vmalloc_61168 __vmalloc 1 61168 NULL
89416 +llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
89417 +sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
89418 +read_file_tx_chainmask_3829 read_file_tx_chainmask 3 3829 NULL
89419 +pcnet32_realloc_rx_ring_36598 pcnet32_realloc_rx_ring 3 36598 NULL
89420 +event_oom_late_read_61175 event_oom_late_read 3 61175 NULL nohasharray
89421 +pair_device_61175 pair_device 4 61175 &event_oom_late_read_61175
89422 +sys_lsetxattr_61177 sys_lsetxattr 4 61177 NULL
89423 +tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
89424 +nfs4_alloc_pages_48426 nfs4_alloc_pages 1 48426 NULL
89425 +rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
89426 +batadv_check_management_packet_52993 batadv_check_management_packet 3 52993 NULL
89427 +tpci200_slot_map_space_3848 tpci200_slot_map_space 2 3848 NULL
89428 +regmap_bulk_write_59049 regmap_bulk_write 4 59049 NULL
89429 +create_one_cdev_3852 create_one_cdev 2 3852 NULL
89430 +fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
89431 +smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
89432 +mpage_readpages_28436 mpage_readpages 3 28436 NULL
89433 +cfpkt_append_61206 cfpkt_append 3 61206 NULL
89434 +btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
89435 +rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
89436 +get_fd_set_3866 get_fd_set 1 3866 NULL
89437 +megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
89438 +rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
89439 +unlink_queued_645 unlink_queued 3-4 645 NULL
89440 +il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
89441 +sisusb_write_44834 sisusb_write 3 44834 NULL
89442 +smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
89443 +raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
89444 +alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
89445 +ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
89446 +hdlc_empty_fifo_18397 hdlc_empty_fifo 2 18397 NULL
89447 +uea_send_modem_cmd_3888 uea_send_modem_cmd 3 3888 NULL
89448 +h5_prepare_pkt_12085 h5_prepare_pkt 4 12085 NULL
89449 +nvram_write_3894 nvram_write 3 3894 NULL
89450 +osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
89451 +pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
89452 +iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
89453 +vcs_write_3910 vcs_write 3 3910 NULL
89454 +sctp_make_abort_violation_27959 sctp_make_abort_violation 4 27959 NULL
89455 +mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
89456 +dtim_interval_read_654 dtim_interval_read 3 654 NULL
89457 +btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL
89458 +packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
89459 +alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
89460 +do_tty_write_44896 do_tty_write 5 44896 NULL
89461 +set_powered_12129 set_powered 4 12129 NULL
89462 +qib_resize_cq_53090 qib_resize_cq 2 53090 NULL
89463 +snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
89464 +nfs_writedata_alloc_12133 nfs_writedata_alloc 2 12133 NULL
89465 +ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
89466 +ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
89467 +hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL
89468 +rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
89469 +vmw_fifo_reserve_12141 vmw_fifo_reserve 2 12141 NULL
89470 +i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
89471 +rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
89472 +btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
89473 +vmbus_open_12154 vmbus_open 2-3 12154 NULL
89474 +capinc_tty_write_28539 capinc_tty_write 3 28539 NULL
89475 +sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
89476 +mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 NULL
89477 +line6_dumpreq_initbuf_53123 line6_dumpreq_initbuf 3 53123 NULL
89478 +snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4 36740 NULL
89479 +gather_array_56641 gather_array 3 56641 NULL
89480 +cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
89481 +b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
89482 +dma_memcpy_to_iovec_12173 dma_memcpy_to_iovec 5 12173 NULL
89483 +debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
89484 +ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
89485 +ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
89486 +scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
89487 +regcache_rbtree_insert_to_block_58009 regcache_rbtree_insert_to_block 5 58009 NULL
89488 +do_add_counters_3992 do_add_counters 3 3992 NULL
89489 +mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 NULL
89490 +smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
89491 +st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
89492 +rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL
89493 +dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
89494 +ip4ip6_err_36772 ip4ip6_err 5 36772 NULL
89495 +mei_write_4005 mei_write 3 4005 NULL
89496 +snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
89497 +ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
89498 +__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL
89499 +tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
89500 +debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
89501 +receive_copy_12216 receive_copy 3 12216 NULL
89502 +aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 NULL
89503 +proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
89504 +bcsp_prepare_pkt_12961 bcsp_prepare_pkt 3 12961 NULL
89505 +ftdi_process_packet_45005 ftdi_process_packet 5 45005 NULL
89506 +change_xattr_61390 change_xattr 5 61390 NULL
89507 +find_skb_20431 find_skb 2 20431 NULL
89508 +hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
89509 +fmc_send_cmd_20435 fmc_send_cmd 5 20435 NULL
89510 +tcp_fragment_20436 tcp_fragment 3 20436 NULL
89511 +ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
89512 +ptrace_writedata_45021 ptrace_writedata 4 45021 NULL
89513 +simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
89514 +sys_sethostname_42962 sys_sethostname 2 42962 NULL
89515 +int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
89516 +tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
89517 +pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL
89518 +fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
89519 +shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
89520 +add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
89521 +sctp_make_asconf_4078 sctp_make_asconf 3 4078 NULL
89522 +vhci_get_user_45039 vhci_get_user 3 45039 NULL
89523 +ip_vs_icmp_xmit_v6_20464 ip_vs_icmp_xmit_v6 4 20464 NULL
89524 +compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
89525 +read_buf_20469 read_buf 2 20469 NULL
89526 +cm_write_36858 cm_write 3 36858 NULL
89527 +note_last_dentry_12285 note_last_dentry 3 12285 NULL
89528 +blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
89529 +il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 NULL
89530 +sel_write_user_45060 sel_write_user 3 45060 NULL
89531 +tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
89532 +__dev_alloc_skb_28681 __dev_alloc_skb 1 28681 NULL
89533 +svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
89534 +snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
89535 +fast_user_write_20494 fast_user_write 5 20494 NULL
89536 +unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
89537 +sctp_make_fwdtsn_53265 sctp_make_fwdtsn 3 53265 NULL
89538 +ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
89539 +hidraw_report_event_20503 hidraw_report_event 3 20503 NULL
89540 +bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
89541 +selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
89542 +lirc_buffer_init_53282 lirc_buffer_init 3-2 53282 NULL
89543 +tipc_msg_build_12326 tipc_msg_build 4 12326 NULL
89544 +xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
89545 +drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
89546 +pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
89547 +OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
89548 +osst_read_40237 osst_read 3 40237 NULL
89549 +tm6000_read_4151 tm6000_read 3 4151 NULL
89550 +amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
89551 +usbdev_read_45114 usbdev_read 3 45114 NULL
89552 +drm_plane_init_28731 drm_plane_init 6 28731 NULL
89553 +spi_execute_28736 spi_execute 5 28736 NULL
89554 +snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
89555 +mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
89556 +get_alua_req_4166 get_alua_req 3 4166 NULL
89557 +scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
89558 +blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
89559 +venus_create_20555 venus_create 4 20555 NULL
89560 +__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
89561 +batadv_interface_rx_53325 batadv_interface_rx 4 53325 NULL
89562 +receive_packet_12367 receive_packet 2 12367 NULL
89563 +squashfs_cache_init_41656 squashfs_cache_init 2 41656 NULL
89564 +mem_write_22232 mem_write 3 22232 NULL
89565 +read_file_bool_4180 read_file_bool 3 4180 NULL
89566 +send_to_tty_45141 send_to_tty 3 45141 NULL
89567 +fops_read_40672 fops_read 3 40672 NULL
89568 +cxio_init_resource_fifo_28764 cxio_init_resource_fifo 3 28764 NULL
89569 +write_leb_36957 write_leb 5 36957 NULL
89570 +xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
89571 +device_write_45156 device_write 3 45156 NULL
89572 +i915_max_freq_read_20581 i915_max_freq_read 3 20581 NULL
89573 +tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
89574 +sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
89575 +batadv_tt_append_diff_20588 batadv_tt_append_diff 4 20588 NULL
89576 +dvb_net_sec_callback_28786 dvb_net_sec_callback 2 28786 NULL
89577 +excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
89578 +isp1760_register_628 isp1760_register 1-2 628 NULL
89579 +dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
89580 +lirc_write_20604 lirc_write 3 20604 NULL
89581 +sel_write_member_28800 sel_write_member 3 28800 NULL
89582 +ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
89583 +ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
89584 +cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
89585 +sys_msgrcv_959 sys_msgrcv 3 959 NULL
89586 +snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL
89587 +pskb_network_may_pull_35336 pskb_network_may_pull 2 35336 NULL
89588 +auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
89589 +ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
89590 +setxattr_37006 setxattr 4 37006 NULL
89591 +add_child_45201 add_child 4 45201 NULL
89592 +seq_open_private_61589 seq_open_private 3 61589 NULL
89593 +iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
89594 +__get_vm_area_61599 __get_vm_area 1 61599 NULL
89595 +iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
89596 +nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
89597 +kfifo_copy_to_user_20646 kfifo_copy_to_user 3 20646 NULL
89598 +spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
89599 +ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
89600 +vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
89601 +oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
89602 +configfs_write_file_61621 configfs_write_file 3 61621 NULL
89603 +ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL
89604 +ieee80211_rx_bss_info_61630 ieee80211_rx_bss_info 3 61630 NULL
89605 +isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
89606 +ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
89607 +i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
89608 +snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
89609 +x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
89610 +dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 4 20682 NULL
89611 +get_packet_pg_28023 get_packet_pg 4 28023 NULL
89612 +rtllib_auth_challenge_12493 rtllib_auth_challenge 3 12493 NULL
89613 +acpi_tb_parse_root_table_53455 acpi_tb_parse_root_table 1 53455 NULL
89614 +resize_stripes_61650 resize_stripes 2 61650 NULL
89615 +n2_run_53459 n2_run 3 53459 NULL
89616 +packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
89617 +parse_command_37079 parse_command 2 37079 NULL
89618 +read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
89619 +alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
89620 +ttm_page_pool_free_61661 ttm_page_pool_free 2 61661 NULL
89621 +input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
89622 +pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
89623 +bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
89624 +insert_one_name_61668 insert_one_name 7 61668 NULL
89625 +nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
89626 +pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
89627 +iowarrior_read_53483 iowarrior_read 3 53483 NULL
89628 +osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
89629 +lock_loop_61681 lock_loop 1 61681 NULL
89630 +snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
89631 +security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
89632 +brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
89633 +ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
89634 +vring_add_indirect_20737 vring_add_indirect 3-4 20737 NULL
89635 +push_rx_28939 push_rx 3 28939 NULL
89636 +__copy_from_user_inatomic_4365 __copy_from_user_inatomic 3 4365 NULL
89637 +vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
89638 +idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
89639 +sys_setdomainname_4373 sys_setdomainname 2 4373 NULL
89640 +fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
89641 +copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
89642 +nouveau_dmaobj_create__61730 nouveau_dmaobj_create_ 6 61730 NULL
89643 +btrfs_trim_block_group_28963 btrfs_trim_block_group 3 28963 NULL
89644 +irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
89645 +aac_srcv_ioremap_6659 aac_srcv_ioremap 2 6659 NULL
89646 +ubi_leb_change_10289 ubi_leb_change 4 10289 NULL
89647 +read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
89648 +alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
89649 +pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
89650 +read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
89651 +read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
89652 +btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
89653 +fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
89654 +iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
89655 +cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 NULL
89656 +libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
89657 +hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
89658 +tstats_write_60432 tstats_write 3 60432 NULL nohasharray
89659 +kmalloc_60432 kmalloc 1 60432 &tstats_write_60432
89660 +bin_uuid_28999 bin_uuid 3 28999 NULL
89661 +sys_sendto_20809 sys_sendto 6 20809 NULL
89662 +alloc_page_cgroup_2919 alloc_page_cgroup 1 2919 NULL
89663 +set_registers_53582 set_registers 3 53582 NULL
89664 +fc_fcp_frame_alloc_12624 fc_fcp_frame_alloc 2 12624 NULL
89665 +do_pages_stat_4437 do_pages_stat 2 4437 NULL
89666 +lane2_associate_req_45398 lane2_associate_req 4 45398 NULL
89667 +tcf_csum_ipv4_udp_30777 tcf_csum_ipv4_udp 4 30777 NULL
89668 +pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
89669 +bchannel_get_rxbuf_37213 bchannel_get_rxbuf 2 37213 NULL
89670 +keymap_store_45406 keymap_store 4 45406 NULL
89671 +pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
89672 +dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
89673 +wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
89674 +il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL
89675 +pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
89676 +xz_dec_init_29029 xz_dec_init 2 29029 NULL
89677 +regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
89678 +tcp_dma_try_early_copy_4457 tcp_dma_try_early_copy 3 4457 NULL
89679 +__do_replace_37227 __do_replace 5 37227 NULL
89680 +dn_alloc_send_pskb_4465 dn_alloc_send_pskb 2 4465 NULL
89681 +ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
89682 +rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
89683 +at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
89684 +rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL
89685 +tso_fragment_29050 tso_fragment 3 29050 NULL
89686 +__alloc_pred_stack_26687 __alloc_pred_stack 2 26687 NULL
89687 +sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
89688 +sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
89689 +rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
89690 +xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
89691 +ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
89692 +__iio_allocate_sw_ring_buffer_4843 __iio_allocate_sw_ring_buffer 3 4843 NULL
89693 +init_per_cpu_17880 init_per_cpu 1 17880 NULL
89694 +iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
89695 +intel_render_ring_init_dri_45446 intel_render_ring_init_dri 2-3 45446 NULL
89696 +udp_sendmsg_4492 udp_sendmsg 4 4492 NULL
89697 +ieee80211_probereq_get_29069 ieee80211_probereq_get 4-6 29069 NULL
89698 +vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
89699 +bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
89700 +_alloc_cdb_cont_23609 _alloc_cdb_cont 2 23609 NULL
89701 +set_link_security_4502 set_link_security 4 4502 NULL
89702 +nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
89703 +l1oip_socket_parse_4507 l1oip_socket_parse 4 4507 NULL
89704 +tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
89705 +fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL
89706 +key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
89707 +srp_target_alloc_37288 srp_target_alloc 3 37288 NULL
89708 +mmio_read_40348 mmio_read 4 40348 NULL
89709 +vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
89710 +ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
89711 +compat_sys_readv_20911 compat_sys_readv 3 20911 NULL
89712 +ivtv_write_12721 ivtv_write 3 12721 NULL
89713 +fuse_fill_write_pages_53682 fuse_fill_write_pages 4 53682 NULL
89714 +islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
89715 +sys_llistxattr_4532 sys_llistxattr 3 4532 NULL
89716 +isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
89717 +da9052_group_write_4534 da9052_group_write 3 4534 NULL
89718 +v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
89719 +jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
89720 +key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
89721 +tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
89722 +videobuf_vmalloc_to_sg_4548 videobuf_vmalloc_to_sg 2 4548 NULL
89723 +rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
89724 +ieee80211_rtl_auth_challenge_61897 ieee80211_rtl_auth_challenge 3 61897 NULL
89725 +send_msg_37323 send_msg 4 37323 NULL
89726 +brcmf_sdbrcm_membytes_37324 brcmf_sdbrcm_membytes 3-5 37324 NULL
89727 +l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL
89728 +clear_refs_write_61904 clear_refs_write 3 61904 NULL
89729 +scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
89730 +rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
89731 +altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
89732 +virtqueue_add_buf_59470 virtqueue_add_buf 3-4 59470 NULL
89733 +proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
89734 +dsp_buffer_alloc_11684 dsp_buffer_alloc 2 11684 NULL
89735 +rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
89736 +reshape_ring_29147 reshape_ring 2 29147 NULL
89737 +cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
89738 +au0828_init_isoc_61917 au0828_init_isoc 3-2 61917 NULL
89739 +copy_macs_45534 copy_macs 4 45534 NULL
89740 +sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
89741 +listxattr_12769 listxattr 3 12769 NULL
89742 +xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
89743 +wdm_write_53735 wdm_write 3 53735 NULL
89744 +snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
89745 +send_bulk_static_data_61932 send_bulk_static_data 3 61932 NULL
89746 +cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
89747 +mempool_create_29437 mempool_create 1 29437 NULL
89748 +platform_create_bundle_12785 platform_create_bundle 4-6 12785 NULL
89749 +brcmf_tx_frame_20978 brcmf_tx_frame 3 20978 NULL
89750 +sock_alloc_send_pskb_21246 sock_alloc_send_pskb 2 21246 NULL
89751 +stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
89752 +alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
89753 +venus_rmdir_45564 venus_rmdir 4 45564 NULL
89754 +scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
89755 +rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL
89756 +squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
89757 +mgmt_event_12810 mgmt_event 4 12810 NULL
89758 +ntfs_rl_realloc_nofail_32173 ntfs_rl_realloc_nofail 3 32173 NULL
89759 +xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
89760 +ipath_create_cq_45586 ipath_create_cq 2 45586 NULL
89761 +wusb_prf_256_29203 wusb_prf_256 7 29203 NULL nohasharray
89762 +alloc_group_attrs_29203 alloc_group_attrs 3 29203 &wusb_prf_256_29203
89763 +comedi_alloc_subdevices_29207 comedi_alloc_subdevices 2 29207 NULL
89764 +rdma_set_ib_paths_45592 rdma_set_ib_paths 3 45592 NULL
89765 +compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
89766 +rds_iw_inc_copy_to_user_29214 rds_iw_inc_copy_to_user 3 29214 NULL
89767 +zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
89768 +TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
89769 +iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
89770 +virtnet_send_command_61993 virtnet_send_command 5-6 61993 NULL
89771 +sys_getxattr_37418 sys_getxattr 4 37418 NULL
89772 +regmap_raw_write_53803 regmap_raw_write 4 53803 NULL
89773 +hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
89774 +spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
89775 +t4vf_pktgl_to_skb_39005 t4vf_pktgl_to_skb 2 39005 NULL
89776 +audit_log_n_hex_45617 audit_log_n_hex 3 45617 NULL
89777 +devm_ioremap_29235 devm_ioremap 2-3 29235 NULL
89778 +irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
89779 +recover_peb_29238 recover_peb 6-7 29238 NULL
89780 +security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
89781 +proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
89782 +i915_gem_execbuffer_relocate_slow_25355 i915_gem_execbuffer_relocate_slow 7 25355 NULL
89783 +jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
89784 +tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
89785 +skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
89786 +cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
89787 +brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
89788 +pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
89789 +event_calibration_read_21083 event_calibration_read 3 21083 NULL
89790 +ppp_tx_cp_62044 ppp_tx_cp 5 62044 NULL
89791 +prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
89792 +sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
89793 +cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
89794 +compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
89795 +do_pselect_62061 do_pselect 1 62061 NULL
89796 +btmrvl_gpiogap_read_4718 btmrvl_gpiogap_read 3 4718 NULL
89797 +dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
89798 +kmem_realloc_37489 kmem_realloc 2 37489 NULL
89799 +ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
89800 +show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
89801 +ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
89802 +sn9c102_read_29305 sn9c102_read 3 29305 NULL
89803 +pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
89804 +smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
89805 +sg_read_25799 sg_read 3 25799 NULL
89806 +uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
89807 +ci_ll_init_12930 ci_ll_init 3 12930 NULL
89808 +unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL
89809 +nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
89810 +pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
89811 +vmalloc_32_user_37519 vmalloc_32_user 1 37519 NULL
89812 +fd_do_writev_29329 fd_do_writev 3 29329 NULL
89813 +hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
89814 +do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
89815 +ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
89816 +dvb_ca_en50221_init_45718 dvb_ca_en50221_init 4 45718 NULL
89817 +__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
89818 +jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
89819 +new_skb_21148 new_skb 1 21148 NULL
89820 +ath6kl_mgmt_tx_21153 ath6kl_mgmt_tx 9 21153 NULL
89821 +l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
89822 +bm_status_write_12964 bm_status_write 3 12964 NULL
89823 +mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
89824 +snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL
89825 +wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
89826 +ip6_ufo_append_data_4780 ip6_ufo_append_data 5-6-7 4780 NULL
89827 +sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL nohasharray
89828 +nf_nat_mangle_tcp_packet_37551 nf_nat_mangle_tcp_packet 6-8 37551 &sep_create_dcb_dmatables_context_37551
89829 +bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
89830 +rw_copy_check_uvector_45748 rw_copy_check_uvector 3 45748 NULL nohasharray
89831 +v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
89832 +qib_diag_write_62133 qib_diag_write 3 62133 NULL
89833 +gnttab_expand_15817 gnttab_expand 1 15817 NULL
89834 +lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
89835 +sctp_make_chunk_12986 sctp_make_chunk 4 12986 NULL
89836 +sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
89837 +TransmitTcb_12989 TransmitTcb 4 12989 NULL
89838 +mthca_setup_cmd_doorbells_53954 mthca_setup_cmd_doorbells 2 53954 NULL
89839 +ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
89840 +video_usercopy_62151 video_usercopy 2 62151 NULL
89841 +cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
89842 +repair_io_failure_4815 repair_io_failure 4 4815 NULL
89843 +xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
89844 +p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
89845 +bnx2i_send_nl_mesg_53353 bnx2i_send_nl_mesg 4 53353 NULL
89846 +ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
89847 +___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
89848 +subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
89849 +tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 NULL
89850 +raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
89851 +alloc_upcall_62186 alloc_upcall 2 62186 NULL
89852 +kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
89853 +drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
89854 +lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
89855 +pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
89856 +input_ff_create_21240 input_ff_create 2 21240 NULL
89857 +sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
89858 +key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
89859 +__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL
89860 +amthi_read_45831 amthi_read 4 45831 NULL
89861 +cmpk_message_handle_tx_54024 cmpk_message_handle_tx 4 54024 NULL
89862 +hid_register_field_4874 hid_register_field 2-3 4874 NULL
89863 +ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
89864 +vga_arb_read_4886 vga_arb_read 3 4886 NULL
89865 +sys_ipc_4889 sys_ipc 3 4889 NULL
89866 +bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
89867 +smp_build_cmd_45853 smp_build_cmd 3 45853 NULL
89868 +x509_process_extension_45854 x509_process_extension 5 45854 NULL
89869 +nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
89870 +pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
89871 +do_register_entry_29478 do_register_entry 4 29478 NULL
89872 +isdn_write_45863 isdn_write 3 45863 NULL
89873 +rproc_state_read_54057 rproc_state_read 3 54057 NULL
89874 +ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL
89875 +regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
89876 +alloc_smp_req_51337 alloc_smp_req 1 51337 NULL
89877 +vmw_gmr2_bind_21305 vmw_gmr2_bind 3 21305 NULL
89878 +get_rdac_req_45882 get_rdac_req 3 45882 NULL
89879 +_malloc_54077 _malloc 1 54077 NULL
89880 +add_res_range_21310 add_res_range 4 21310 NULL
89881 +bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
89882 +sys_process_vm_writev_4928 sys_process_vm_writev 3-5 4928 NULL
89883 +ntfs_rl_insert_4931 ntfs_rl_insert 2-4 4931 NULL
89884 +ip_make_skb_13129 ip_make_skb 5-6 13129 NULL
89885 +snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
89886 +ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
89887 +atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
89888 +altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL
89889 +il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
89890 +create_xattr_54106 create_xattr 5 54106 NULL
89891 +udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
89892 +ep_write_59008 ep_write 3 59008 NULL
89893 +dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
89894 +sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL
89895 +devm_kzalloc_4966 devm_kzalloc 2 4966 NULL
89896 +compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
89897 +udf_sb_alloc_partition_maps_62313 udf_sb_alloc_partition_maps 2 62313 NULL
89898 +alloc_mr_45935 alloc_mr 1 45935 NULL
89899 +read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
89900 +isku_receive_54130 isku_receive 4 54130 NULL
89901 +hfcpci_empty_bfifo_62323 hfcpci_empty_bfifo 4 62323 NULL
89902 +caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
89903 +ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
89904 +Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
89905 +ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
89906 +idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
89907 +alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
89908 +i915_max_freq_write_11350 i915_max_freq_write 3 11350 NULL
89909 +leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
89910 +dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
89911 +create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
89912 +btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
89913 +lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
89914 +video_ioctl2_21380 video_ioctl2 2 21380 NULL
89915 +dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
89916 +alloc_ldt_21972 alloc_ldt 2 21972 NULL
89917 +ipath_resize_cq_712 ipath_resize_cq 2 712 NULL
89918 +comedi_read_13199 comedi_read 3 13199 NULL
89919 +flash_write_62354 flash_write 3 62354 NULL
89920 +rb_simple_read_45972 rb_simple_read 3 45972 NULL
89921 +mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
89922 +i2400m_zrealloc_2x_54166 i2400m_zrealloc_2x 3 54166 NULL nohasharray
89923 +memcpy_toiovec_54166 memcpy_toiovec 3 54166 &i2400m_zrealloc_2x_54166
89924 +l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
89925 +proc_file_read_53905 proc_file_read 3 53905 NULL
89926 +mtd_device_parse_register_5024 mtd_device_parse_register 5 5024 NULL
89927 +acpi_tb_install_table_12988 acpi_tb_install_table 1 12988 NULL
89928 +set_wd_exp_mode_pfs_62372 set_wd_exp_mode_pfs 3 62372 NULL
89929 +reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
89930 +acpi_os_read_memory_54186 acpi_os_read_memory 1-3 54186 NULL
89931 +smk_read_logging_37804 smk_read_logging 3 37804 NULL
89932 +rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
89933 +mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
89934 +cru_detect_11272 cru_detect 1 11272 NULL
89935 +altera_irscan_62396 altera_irscan 2 62396 NULL
89936 +alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
89937 +aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
89938 +fw_download_code_13249 fw_download_code 3 13249 NULL
89939 +init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
89940 +tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
89941 +set_ssp_62411 set_ssp 4 62411 NULL
89942 +nfc_hci_send_event_21452 nfc_hci_send_event 5 21452 NULL
89943 +sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
89944 +get_free_entries_46030 get_free_entries 1 46030 NULL
89945 +__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
89946 +sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
89947 +snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
89948 +carl9170_rx_13272 carl9170_rx 3 13272 NULL
89949 +snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
89950 +il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 NULL
89951 +sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
89952 +kfifo_copy_from_user_5091 kfifo_copy_from_user 3 5091 NULL
89953 +netdev_alloc_skb_62437 netdev_alloc_skb 2 62437 NULL
89954 +dma_memcpy_pg_to_iovec_1725 dma_memcpy_pg_to_iovec 6 1725 NULL
89955 +platform_device_add_resources_13289 platform_device_add_resources 3 13289 NULL
89956 +xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
89957 +xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
89958 +read_file_xmit_21487 read_file_xmit 3 21487 NULL
89959 +e1000_check_copybreak_62448 e1000_check_copybreak 3 62448 NULL
89960 +ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
89961 +irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
89962 +wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
89963 +audio_write_54261 audio_write 4 54261 &wusb_prf_54261
89964 +sys_setxattr_37880 sys_setxattr 4 37880 NULL
89965 +dvb_net_sec_37884 dvb_net_sec 3 37884 NULL
89966 +mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
89967 +isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
89968 +mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
89969 +qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
89970 +v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL nohasharray
89971 +xz_dec_lzma2_create_36353 xz_dec_lzma2_create 2 36353 &v9fs_file_readn_36353
89972 +vfio_config_do_rw_46091 vfio_config_do_rw 3 46091 NULL
89973 +dma_skb_copy_datagram_iovec_21516 dma_skb_copy_datagram_iovec 3-5 21516 NULL
89974 +ata_host_alloc_46094 ata_host_alloc 2 46094 NULL
89975 +probes_write_29711 probes_write 3 29711 NULL
89976 +btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
89977 +us122l_ctl_msg_13330 us122l_ctl_msg 8 13330 NULL
89978 +altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
89979 +dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
89980 +kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
89981 +il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
89982 +il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 NULL
89983 +tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
89984 +mlx4_en_create_rx_ring_62498 mlx4_en_create_rx_ring 3 62498 NULL
89985 +emi62_writememory_29731 emi62_writememory 4 29731 NULL
89986 +iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
89987 +mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
89988 +pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL
89989 +hscx_empty_fifo_13360 hscx_empty_fifo 2 13360 NULL
89990 +rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
89991 +iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
89992 +hfcsusb_rx_frame_52745 hfcsusb_rx_frame 3 52745 NULL
89993 +pn_raw_send_54330 pn_raw_send 2 54330 NULL
89994 +pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
89995 +tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
89996 +sfi_map_memory_5183 sfi_map_memory 1-2 5183 NULL
89997 +iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
89998 +wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
89999 +test_iso_queue_62534 test_iso_queue 5 62534 NULL
90000 +__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
90001 +ddp_clear_map_46152 ddp_clear_map 4 46152 NULL
90002 +cxio_hal_init_resource_29771 cxio_hal_init_resource 2-6-7 29771 NULL nohasharray
90003 +ip_vs_conn_fill_param_sync_29771 ip_vs_conn_fill_param_sync 6 29771 &cxio_hal_init_resource_29771
90004 +__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
90005 +sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
90006 +_osd_realloc_seg_54352 _osd_realloc_seg 3 54352 NULL
90007 +pipe_set_size_5204 pipe_set_size 2 5204 NULL
90008 +tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
90009 +ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
90010 +isdn_read_50021 isdn_read 3 50021 NULL
90011 +vfs_readlink_54368 vfs_readlink 3 54368 NULL
90012 +pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
90013 +ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
90014 +subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
90015 +ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
90016 +netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
90017 +ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
90018 +ssb_ioremap_5228 ssb_ioremap 2 5228 NULL
90019 +xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
90020 +xlog_do_recovery_pass_21618 xlog_do_recovery_pass 3 21618 NULL
90021 +isdn_ppp_skb_push_5236 isdn_ppp_skb_push 2 5236 NULL
90022 +get_subdir_62581 get_subdir 3 62581 NULL
90023 +iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
90024 +sctp_abort_pkt_new_5241 sctp_abort_pkt_new 6 5241 NULL
90025 +vfs_readv_38011 vfs_readv 3 38011 NULL
90026 +keyring_read_13438 keyring_read 3 13438 NULL
90027 +sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL nohasharray
90028 +set_tap_pwup_pfs_13440 set_tap_pwup_pfs 3 13440 &sctp_setsockopt_peer_primary_addr_13440
90029 +ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 7-8-9 13443 NULL
90030 +crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
90031 +tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
90032 +packet_alloc_skb_62602 packet_alloc_skb 2-5-4 62602 NULL
90033 +prism2_send_mgmt_62605 prism2_send_mgmt 4 62605 NULL nohasharray
90034 +nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 &prism2_send_mgmt_62605
90035 +__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
90036 +aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
90037 +kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
90038 +ftrace_write_29551 ftrace_write 3 29551 NULL
90039 +il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 NULL
90040 +iscsi_post_host_event_13473 iscsi_post_host_event 4 13473 NULL
90041 +ems_pcmcia_add_card_62627 ems_pcmcia_add_card 2 62627 NULL
90042 +mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
90043 +dev_write_7708 dev_write 3 7708 NULL
90044 +_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL
90045 +nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
90046 +atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
90047 +ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
90048 +sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
90049 +lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
90050 +alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
90051 +nf_nat_ftp_46265 nf_nat_ftp 6 46265 NULL
90052 +nfc_targets_found_29886 nfc_targets_found 3 29886 NULL
90053 +mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
90054 +evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
90055 +request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
90056 +proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
90057 +smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
90058 +isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
90059 +bm_init_13529 bm_init 2 13529 NULL
90060 +check586_29914 check586 2 29914 NULL
90061 +snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
90062 +pep_alloc_skb_46303 pep_alloc_skb 3 46303 NULL
90063 +reiserfs_allocate_list_bitmaps_21732 reiserfs_allocate_list_bitmaps 3 21732 NULL
90064 +ioremap_wc_62695 ioremap_wc 1-2 62695 NULL
90065 +pg_read_17276 pg_read 3 17276 NULL
90066 +edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
90067 +ep0_read_38095 ep0_read 3 38095 NULL
90068 +batadv_iv_ogm_queue_add_46319 batadv_iv_ogm_queue_add 3 46319 NULL
90069 +__nf_nat_mangle_tcp_packet_21744 __nf_nat_mangle_tcp_packet 8-6 21744 NULL
90070 +ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
90071 +cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
90072 +bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
90073 +rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
90074 +cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
90075 +mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
90076 +l2down_create_21755 l2down_create 4 21755 NULL
90077 +alloc_tio_13564 alloc_tio 3 13564 NULL
90078 +viacam_read_54526 viacam_read 3 54526 NULL
90079 +btrfs_mksubvol_58240 btrfs_mksubvol 3 58240 NULL
90080 +tunables_read_36385 tunables_read 3 36385 NULL
90081 +opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
90082 +iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
90083 +read_file_antenna_13574 read_file_antenna 3 13574 NULL
90084 +__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 3-4 38153 NULL
90085 +setsockopt_54539 setsockopt 5 54539 NULL
90086 +gen_pool_add_21776 gen_pool_add 3 21776 NULL
90087 +iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
90088 +tty_register_device_4544 tty_register_device 2 4544 NULL
90089 +cache_write_13589 cache_write 3 13589 NULL
90090 +mpt_lan_receive_post_turbo_13592 mpt_lan_receive_post_turbo 2 13592 NULL
90091 +xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
90092 +key_replays_read_62746 key_replays_read 3 62746 NULL
90093 +smk_write_direct_46363 smk_write_direct 3 46363 NULL
90094 +aac_sa_ioremap_13596 aac_sa_ioremap 2 13596 NULL nohasharray
90095 +irias_new_octseq_value_13596 irias_new_octseq_value 2 13596 &aac_sa_ioremap_13596
90096 +mwifiex_usb_submit_rx_urb_54558 mwifiex_usb_submit_rx_urb 2 54558 NULL
90097 +irias_add_octseq_attrib_29983 irias_add_octseq_attrib 4 29983 NULL
90098 +cdev_add_38176 cdev_add 2-3 38176 NULL
90099 +brcmf_sdcard_recv_buf_38179 brcmf_sdcard_recv_buf 6 38179 NULL
90100 +__ioremap_caller_21800 __ioremap_caller 1-2 21800 NULL
90101 +alloc_and_copy_ftrace_hash_29368 alloc_and_copy_ftrace_hash 1 29368 NULL
90102 +ubi_dump_flash_46381 ubi_dump_flash 4 46381 NULL
90103 +swap_cgroup_swapon_13614 swap_cgroup_swapon 2 13614 NULL
90104 +wm8994_bulk_write_13615 wm8994_bulk_write 3 13615 NULL
90105 +init_chip_wc_pat_62768 init_chip_wc_pat 2 62768 NULL
90106 +nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
90107 +ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
90108 +rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
90109 +fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
90110 +vmalloc_user_32308 vmalloc_user 1 32308 NULL
90111 +get_ucode_user_38202 get_ucode_user 3 38202 NULL
90112 +ath6kl_wmi_startscan_cmd_33674 ath6kl_wmi_startscan_cmd 8 33674 NULL
90113 +fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
90114 +mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 NULL
90115 +packet_snd_13634 packet_snd 3 13634 NULL
90116 +alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
90117 +osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
90118 +sfi_map_table_5462 sfi_map_table 1 5462 NULL
90119 +blk_msg_write_13655 blk_msg_write 3 13655 NULL
90120 +scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
90121 +fwSendNullPacket_54618 fwSendNullPacket 2 54618 NULL
90122 +drp_wmove_30043 drp_wmove 4 30043 NULL
90123 +tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
90124 +cache_downcall_13666 cache_downcall 3 13666 NULL
90125 +xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
90126 +ubi_leb_write_5478 ubi_leb_write 4-5 5478 NULL
90127 +cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
90128 +cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
90129 +sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL
90130 +debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
90131 +tty_write_5494 tty_write 3 5494 NULL
90132 +iscsi_ping_comp_event_38263 iscsi_ping_comp_event 5 38263 NULL
90133 +tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL
90134 +rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
90135 +irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
90136 +teiup_create_43201 teiup_create 3 43201 NULL
90137 +dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
90138 +filldir64_46469 filldir64 3 46469 NULL
90139 +line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL nohasharray
90140 +set_dis_disc_pfs_28225 set_dis_disc_pfs 3 28225 &line6_alloc_sysex_buffer_28225
90141 +fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
90142 +ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
90143 +cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
90144 +snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
90145 +tg3_run_loopback_30093 tg3_run_loopback 2 30093 NULL
90146 +spidev_message_5518 spidev_message 3 5518 NULL
90147 +vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
90148 +bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
90149 +ezusb_writememory_45976 ezusb_writememory 4 45976 NULL
90150 +evm_read_key_54674 evm_read_key 3 54674 NULL
90151 +sctp_make_op_error_space_5528 sctp_make_op_error_space 3 5528 NULL
90152 +l2tp_ip6_recvmsg_62874 l2tp_ip6_recvmsg 4 62874 NULL
90153 +qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
90154 +do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
90155 +em28xx_init_isoc_62883 em28xx_init_isoc 4 62883 NULL nohasharray
90156 +aoechr_write_62883 aoechr_write 3 62883 &em28xx_init_isoc_62883
90157 +resize_info_buffer_62889 resize_info_buffer 2 62889 NULL
90158 +if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
90159 +u32_array_read_2219 u32_array_read 3 2219 NULL
90160 +pin_code_reply_46510 pin_code_reply 4 46510 NULL
90161 +mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
90162 +sys_add_key_61288 sys_add_key 4 61288 NULL
90163 +kmsg_read_46514 kmsg_read 3 46514 NULL
90164 +audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
90165 +isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
90166 +rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
90167 +recv_stream_30138 recv_stream 4 30138 NULL
90168 +u_memcpya_30139 u_memcpya 2-3 30139 NULL
90169 +getdqbuf_62908 getdqbuf 1 62908 NULL
90170 +bdx_rxdb_create_46525 bdx_rxdb_create 1 46525 NULL
90171 +pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
90172 +_add_sg_continuation_descriptor_54721 _add_sg_continuation_descriptor 3 54721 NULL
90173 +fc_frame_alloc_fill_59394 fc_frame_alloc_fill 2 59394 NULL
90174 +fir16_create_5574 fir16_create 3 5574 NULL
90175 +ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
90176 +pt_write_40159 pt_write 3 40159 NULL
90177 +bioset_create_5580 bioset_create 1 5580 NULL
90178 +ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
90179 +fb_sys_read_13778 fb_sys_read 3 13778 NULL
90180 +oz_ep_alloc_5587 oz_ep_alloc 2 5587 NULL
90181 +kzalloc_54740 kzalloc 1 54740 NULL
90182 +ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL nohasharray
90183 +mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 &ipath_reg_phys_mr_23918
90184 +do_msgrcv_5590 do_msgrcv 4 5590 NULL
90185 +wep_iv_read_54744 wep_iv_read 3 54744 NULL
90186 +link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
90187 +ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
90188 +iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
90189 +batadv_iv_ogm_aggregate_new_54761 batadv_iv_ogm_aggregate_new 2 54761 NULL
90190 +ldm_frag_add_5611 ldm_frag_add 2 5611 NULL
90191 +cx25821_video_ioctl_30188 cx25821_video_ioctl 2 30188 NULL
90192 +mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
90193 +rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL nohasharray
90194 +compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 3 22001 &rxpipe_descr_host_int_trig_rx_data_read_22001
90195 +drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
90196 +dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
90197 +usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
90198 +hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
90199 +hidp_output_raw_report_5629 hidp_output_raw_report 3 5629 NULL
90200 +nfs_idmap_request_key_30208 nfs_idmap_request_key 3 30208 NULL
90201 +read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
90202 +flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
90203 +snd_ac97_pcm_assign_30218 snd_ac97_pcm_assign 2 30218 NULL
90204 +ti_recv_22027 ti_recv 4 22027 NULL
90205 +ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2 34135 NULL
90206 +ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
90207 +nfsd_write_54809 nfsd_write 6 54809 NULL
90208 +evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
90209 +pcnet32_realloc_tx_ring_38428 pcnet32_realloc_tx_ring 3 38428 NULL
90210 +posix_clock_register_5662 posix_clock_register 2 5662 NULL
90211 +pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
90212 +get_skb_63008 get_skb 2 63008 NULL
90213 +zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
90214 +netlink_send_38434 netlink_send 5 38434 NULL
90215 +atalk_recvmsg_22053 atalk_recvmsg 4 22053 NULL
90216 +compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL nohasharray
90217 +alloc_trace_uprobe_13870 alloc_trace_uprobe 3 13870 &compat_ip_setsockopt_13870
90218 +aircable_process_packet_46639 aircable_process_packet 5 46639 NULL
90219 +generic_perform_write_54832 generic_perform_write 3 54832 NULL
90220 +write_rio_54837 write_rio 3 54837 NULL
90221 +nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 NULL
90222 +__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL
90223 +pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
90224 +get_arg_5694 get_arg 3 5694 NULL
90225 +isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
90226 +ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
90227 +compat_readv_30273 compat_readv 3 30273 NULL
90228 +printer_read_54851 printer_read 3 54851 NULL
90229 +mem_rw_22085 mem_rw 3 22085 NULL
90230 +i915_min_freq_read_38470 i915_min_freq_read 3 38470 NULL
90231 +alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
90232 +lowpan_fragment_xmit_22095 lowpan_fragment_xmit 3-4 22095 NULL
90233 +broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
90234 +skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
90235 +unlink1_63059 unlink1 3 63059 NULL
90236 +picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL
90237 +pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
90238 +__do_krealloc_54389 __do_krealloc 2 54389 NULL
90239 +tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
90240 +tcf_csum_ipv6_tcp_54877 tcf_csum_ipv6_tcp 4 54877 NULL
90241 +vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
90242 +replay_log_leb_18704 replay_log_leb 3 18704 NULL
90243 +rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
90244 +rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
90245 +alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
90246 +dev_names_read_38509 dev_names_read 3 38509 NULL
90247 +iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
90248 +sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 NULL
90249 +get_packet_5747 get_packet 3 5747 NULL
90250 +ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL
90251 +drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
90252 +event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
90253 +iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
90254 +ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
90255 +erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
90256 +ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
90257 +lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
90258 +xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
90259 +iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL
90260 +_l2_alloc_skb_11883 _l2_alloc_skb 1 11883 NULL
90261 +resource_from_user_30341 resource_from_user 3 30341 NULL
90262 +scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
90263 +sound_write_5102 sound_write 3 5102 NULL
90264 +pn533_dep_link_up_22154 pn533_dep_link_up 5 22154 NULL
90265 +iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
90266 +irq_domain_add_simple_46734 irq_domain_add_simple 2 46734 NULL
90267 +sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
90268 +__vmalloc_node_flags_30352 __vmalloc_node_flags 1 30352 NULL
90269 +btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
90270 +tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
90271 +com90xx_found_13974 com90xx_found 3 13974 NULL
90272 +compat_sys_writev_5784 compat_sys_writev 3 5784 NULL
90273 +qcam_read_13977 qcam_read 3 13977 NULL
90274 +__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
90275 +dvb_demux_read_13981 dvb_demux_read 3 13981 NULL
90276 +virtblk_add_buf_wait_54943 virtblk_add_buf_wait 3-4 54943 NULL
90277 +wl12xx_cmd_build_probe_req_54946 wl12xx_cmd_build_probe_req 6-8 54946 NULL
90278 +irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
90279 +il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL
90280 +generic_readlink_32654 generic_readlink 3 32654 NULL
90281 +ieee80211_bss_info_update_13991 ieee80211_bss_info_update 4 13991 NULL
90282 +sys_get_mempolicy_30379 sys_get_mempolicy 3 30379 NULL
90283 +iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
90284 +skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
90285 +wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
90286 +trace_options_core_read_47390 trace_options_core_read 3 47390 NULL
90287 +int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
90288 +c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
90289 +__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
90290 +__proc_file_read_54978 __proc_file_read 3 54978 NULL
90291 +concat_writev_21451 concat_writev 3 21451 NULL
90292 +smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
90293 +_queue_data_54983 _queue_data 4 54983 NULL
90294 +_sys_packet_req_46793 _sys_packet_req 4 46793 NULL
90295 +pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
90296 +extend_netdev_table_21453 extend_netdev_table 2 21453 NULL
90297 +rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 NULL
90298 +vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
90299 +ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
90300 +ixgb_check_copybreak_5847 ixgb_check_copybreak 3 5847 NULL
90301 +setup_req_5848 setup_req 3 5848 NULL
90302 +read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
90303 +rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
90304 +rds_ib_inc_copy_to_user_55007 rds_ib_inc_copy_to_user 3 55007 NULL
90305 +alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
90306 +rbd_create_rw_ops_55297 rbd_create_rw_ops 1 55297 NULL
90307 +compat_sys_move_pages_5861 compat_sys_move_pages 2 5861 NULL
90308 +cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
90309 +sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
90310 +compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
90311 +ext2_readpages_38640 ext2_readpages 4 38640 NULL
90312 +cma_create_area_38642 cma_create_area 2 38642 NULL
90313 +audit_init_entry_38644 audit_init_entry 1 38644 NULL
90314 +sriov_enable_59689 sriov_enable 2 59689 NULL
90315 +enable_write_30456 enable_write 3 30456 NULL
90316 +shmem_pwrite_fast_46842 shmem_pwrite_fast 3 46842 NULL
90317 +tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
90318 +mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
90319 +zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
90320 +tcp_manip_pkt_16563 tcp_manip_pkt 4 16563 NULL
90321 +qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
90322 +nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
90323 +ieee80211_mgmt_tx_46860 ieee80211_mgmt_tx 9 46860 NULL
90324 +port_show_regs_5904 port_show_regs 3 5904 NULL
90325 +nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
90326 +ptp_read_63251 ptp_read 4 63251 NULL
90327 +uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
90328 +compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
90329 +iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
90330 +__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL
90331 +stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
90332 +mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
90333 +ttm_bo_kmap_ttm_5922 ttm_bo_kmap_ttm 3 5922 NULL
90334 +o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
90335 +bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
90336 +iscsi_iser_recv_41948 iscsi_iser_recv 4 41948 NULL
90337 +lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
90338 +em28xx_alloc_isoc_46892 em28xx_alloc_isoc 4 46892 NULL
90339 +read_dma_55086 read_dma 3 55086 NULL
90340 +isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
90341 +dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
90342 +edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
90343 +ntfs_rl_replace_14136 ntfs_rl_replace 2-4 14136 NULL
90344 +ip_send_unicast_reply_38714 ip_send_unicast_reply 6 38714 NULL
90345 +tcp_collapse_63294 tcp_collapse 6-5 63294 NULL
90346 +alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
90347 +isdn_ppp_ccp_xmit_reset_63297 isdn_ppp_ccp_xmit_reset 6 63297 NULL
90348 +rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
90349 +tipc_subseq_alloc_5957 tipc_subseq_alloc 1 5957 NULL
90350 +em_canid_change_14150 em_canid_change 3 14150 NULL
90351 +tracing_ctrl_read_46922 tracing_ctrl_read 3 46922 NULL
90352 +gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
90353 +fb_write_46924 fb_write 3 46924 NULL
90354 +btmrvl_curpsmode_read_46939 btmrvl_curpsmode_read 3 46939 NULL
90355 +wlcore_alloc_hw_22365 wlcore_alloc_hw 1 22365 NULL
90356 +crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
90357 +br_send_bpdu_29669 br_send_bpdu 3 29669 NULL
90358 +disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
90359 +evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
90360 +__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
90361 +udf_readpages_38761 udf_readpages 4 38761 NULL
90362 +reada_add_block_54247 reada_add_block 2 54247 NULL
90363 +ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
90364 +proc_info_read_63344 proc_info_read 3 63344 NULL
90365 +pep_indicate_38611 pep_indicate 5 38611 NULL
90366 +set_le_30581 set_le 4 30581 NULL
90367 +alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL
90368 +btmrvl_psmode_read_22395 btmrvl_psmode_read 3 22395 NULL
90369 +alloc_private_22399 alloc_private 2 22399 NULL
90370 +snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
90371 +ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
90372 +zoran_write_22404 zoran_write 3 22404 NULL
90373 +dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 NULL
90374 +idmouse_read_63374 idmouse_read 3 63374 NULL
90375 +queue_reply_22416 queue_reply 3 22416 NULL
90376 +sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
90377 +sel_write_bool_46996 sel_write_bool 3 46996 NULL
90378 +ntfs_rl_append_6037 ntfs_rl_append 2-4 6037 NULL
90379 +dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
90380 +ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
90381 +ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL
90382 +sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
90383 +edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL
90384 +ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
90385 +sched_feat_write_55202 sched_feat_write 3 55202 NULL
90386 +dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4-2 14244 NULL
90387 +snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 4 14245 NULL
90388 +ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL nohasharray
90389 +isdn_net_ciscohdlck_alloc_skb_55209 isdn_net_ciscohdlck_alloc_skb 2 55209 &ht40allow_map_read_55209
90390 +compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
90391 +sys_select_38827 sys_select 1 38827 NULL
90392 +rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
90393 +do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
90394 +cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2 47024 NULL
90395 +direct_entry_38836 direct_entry 3 38836 NULL
90396 +__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
90397 +gntdev_alloc_map_35145 gntdev_alloc_map 2 35145 NULL
90398 +compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
90399 +handle_received_packet_22457 handle_received_packet 3 22457 NULL
90400 +ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
90401 +write_head_30481 write_head 4 30481 NULL
90402 +mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
90403 +set_dis_bypass_pfs_47038 set_dis_bypass_pfs 3 47038 NULL
90404 +add_numbered_child_14273 add_numbered_child 5 14273 NULL
90405 +l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
90406 +OS_mem_token_alloc_14276 OS_mem_token_alloc 1 14276 NULL
90407 +sep_prepare_input_output_dma_table_63429 sep_prepare_input_output_dma_table 2-4-3 63429 NULL
90408 +register_unifi_sdio_55239 register_unifi_sdio 2 55239 NULL
90409 +ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
90410 +agp_remap_30665 agp_remap 2 30665 NULL
90411 +interfaces_38859 interfaces 2 38859 NULL
90412 +memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
90413 +nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
90414 +ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
90415 +cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
90416 +qc_capture_19298 qc_capture 3 19298 NULL
90417 +read_default_ldt_14302 read_default_ldt 2 14302 NULL
90418 +dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
90419 +alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
90420 +pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
90421 +rtl_port_map_2385 rtl_port_map 1-2 2385 NULL
90422 +dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
90423 +dbgfs_state_38894 dbgfs_state 3 38894 NULL
90424 +sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
90425 +snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
90426 +nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
90427 +process_bulk_data_command_38906 process_bulk_data_command 4 38906 NULL
90428 +rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
90429 +reada_find_extent_63486 reada_find_extent 2 63486 NULL
90430 +read_kcore_63488 read_kcore 3 63488 NULL
90431 +lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
90432 +__skb_cow_39254 __skb_cow 2 39254 NULL
90433 +gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
90434 +__get_vm_area_node_55305 __get_vm_area_node 1 55305 NULL
90435 +ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
90436 +rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
90437 +ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
90438 +pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
90439 +mousedev_read_47123 mousedev_read 3 47123 NULL
90440 +rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
90441 +agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
90442 +vdma_mem_alloc_6171 vdma_mem_alloc 1 6171 NULL
90443 +wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
90444 +ses_recv_diag_47143 ses_recv_diag 4 47143 NULL nohasharray
90445 +acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 &ses_recv_diag_47143
90446 +alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
90447 +vme_user_read_55338 vme_user_read 3 55338 NULL
90448 +sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 NULL
90449 +cxio_init_resource_fifo_random_47151 cxio_init_resource_fifo_random 3 47151 NULL
90450 +persistent_ram_iomap_47156 persistent_ram_iomap 1-2 47156 NULL
90451 +ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
90452 +__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
90453 +__hidp_send_ctrl_message_28303 __hidp_send_ctrl_message 4 28303 NULL
90454 +rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
90455 +append_to_buffer_63550 append_to_buffer 3 63550 NULL
90456 +smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
90457 +acpi_system_read_event_55362 acpi_system_read_event 3 55362 NULL
90458 +dbg_leb_write_63555 dbg_leb_write 4-5 63555 NULL nohasharray
90459 +kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 &dbg_leb_write_63555
90460 +snapshot_read_22601 snapshot_read 3 22601 NULL
90461 +OSDSetBlock_38986 OSDSetBlock 4-2 38986 NULL
90462 +v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
90463 +mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
90464 +mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
90465 +ioremap_cache_47189 ioremap_cache 1-2 47189 NULL
90466 +__send_to_port_55383 __send_to_port 3 55383 NULL
90467 +rproc_alloc_63577 rproc_alloc 5 63577 NULL
90468 +nf_nat_ipv4_manip_pkt_55387 nf_nat_ipv4_manip_pkt 2 55387 NULL
90469 +smk_read_doi_30813 smk_read_doi 3 30813 NULL
90470 +f_hidg_read_6238 f_hidg_read 3 6238 NULL
90471 +proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
90472 +sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
90473 +get_nodes_39012 get_nodes 3 39012 NULL
90474 +fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
90475 +sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
90476 +ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
90477 +iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
90478 +pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
90479 +create_subvol_30836 create_subvol 4 30836 NULL
90480 +mthca_map_reg_5664 mthca_map_reg 2-3 5664 NULL
90481 +ci13xxx_add_device_14456 ci13xxx_add_device 3 14456 NULL
90482 +iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
90483 +_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
90484 +sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
90485 +read_oldmem_55658 read_oldmem 3 55658 NULL
90486 +tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
90487 +xenbus_file_write_6282 xenbus_file_write 3 6282 NULL
90488 +options_write_47243 options_write 3 47243 NULL
90489 +module_alloc_63630 module_alloc 1 63630 NULL
90490 +alloc_skb_55439 alloc_skb 1 55439 NULL
90491 +nf_nat_ipv6_manip_pkt_6289 nf_nat_ipv6_manip_pkt 2 6289 NULL
90492 +portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
90493 +ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
90494 +ubifs_leb_write_22679 ubifs_leb_write 4-5 22679 NULL
90495 +nf_nat_sack_adjust_6297 nf_nat_sack_adjust 2 6297 NULL
90496 +proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
90497 +mid_get_vbt_data_r10_6308 mid_get_vbt_data_r10 2 6308 NULL
90498 +vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
90499 +__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
90500 +pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
90501 +rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
90502 +hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
90503 +ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
90504 +lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
90505 +tty_audit_log_47280 tty_audit_log 8 47280 NULL
90506 +alloc_libipw_22708 alloc_libipw 1 22708 NULL
90507 +gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
90508 +fc_host_post_vendor_event_30903 fc_host_post_vendor_event 3 30903 NULL
90509 +vbi_read_63673 vbi_read 3 63673 NULL
90510 +tun_get_user_39099 tun_get_user 4 39099 NULL
90511 +i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
90512 +alloc_tty_driver_63681 alloc_tty_driver 1 63681 NULL
90513 +read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
90514 +tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
90515 +long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
90516 +nfc_hci_hcp_message_tx_14534 nfc_hci_hcp_message_tx 6 14534 NULL
90517 +iommu_map_mmio_space_30919 iommu_map_mmio_space 1 30919 NULL
90518 +ep0_write_14536 ep0_write 3 14536 NULL nohasharray
90519 +dataflash_read_user_otp_14536 dataflash_read_user_otp 2-3 14536 &ep0_write_14536
90520 +dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 2-3-5 4303 NULL
90521 +cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4 22735 NULL
90522 +ax25_output_22736 ax25_output 2 22736 NULL
90523 +__kfifo_to_user_r_39123 __kfifo_to_user_r 3 39123 NULL
90524 +l2cap_send_cmd_14548 l2cap_send_cmd 4 14548 NULL
90525 +picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
90526 +drm_vmalloc_dma_14550 drm_vmalloc_dma 1 14550 NULL
90527 +cfpkt_pad_trail_55511 cfpkt_pad_trail 2 55511 NULL nohasharray
90528 +tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 &cfpkt_pad_trail_55511
90529 +cmtp_add_msgpart_9252 cmtp_add_msgpart 4 9252 NULL
90530 +sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
90531 +nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
90532 +hid_input_report_32458 hid_input_report 4 32458 NULL
90533 +_proc_do_string_6376 _proc_do_string 2 6376 NULL
90534 +osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
90535 +read_cis_cache_29735 read_cis_cache 4 29735 NULL
90536 +ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
90537 +alloc_ring_39151 alloc_ring 2-4 39151 NULL
90538 +proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
90539 +create_bounce_buffer_39155 create_bounce_buffer 3 39155 NULL
90540 +tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
90541 +tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
90542 +asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
90543 +ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
90544 +idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
90545 +selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
90546 +isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
90547 +bt_skb_alloc_6404 bt_skb_alloc 1 6404 NULL
90548 +get_info_55681 get_info 3 55681 NULL
90549 +setkey_14987 setkey 3 14987 NULL
90550 +__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
90551 +init_list_set_39188 init_list_set 2-3 39188 NULL
90552 +ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
90553 +snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
90554 +l2up_create_6430 l2up_create 3 6430 NULL
90555 +ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
90556 +dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
90557 +spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
90558 +add_partition_55588 add_partition 2 55588 NULL
90559 +lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
90560 +snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
90561 +depth_read_31112 depth_read 3 31112 NULL
90562 +macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
90563 +ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
90564 +selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
90565 +profile_replace_14652 profile_replace 3 14652 NULL
90566 +vzalloc_47421 vzalloc 1 47421 NULL
90567 +mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
90568 +agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
90569 +sys_writev_28384 sys_writev 3 28384 NULL
90570 +batadv_tt_response_fill_table_39236 batadv_tt_response_fill_table 1 39236 NULL
90571 +read_file_rcstat_22854 read_file_rcstat 3 22854 NULL
90572 +__videobuf_copy_stream_44769 __videobuf_copy_stream 4 44769 NULL
90573 +rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
90574 +pktgen_if_write_55628 pktgen_if_write 3 55628 NULL
90575 +create_attr_set_22861 create_attr_set 1 22861 NULL
90576 +r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
90577 +pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
90578 +compat_sys_mq_timedsend_31060 compat_sys_mq_timedsend 3 31060 NULL
90579 +sel_write_load_63830 sel_write_load 3 63830 NULL
90580 +lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
90581 +pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL
90582 +bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
90583 +dvb_dmxdev_set_buffer_size_55643 dvb_dmxdev_set_buffer_size 2 55643 NULL
90584 +tsi148_master_set_14685 tsi148_master_set 4 14685 NULL
90585 +ath6kl_wmi_set_appie_cmd_39266 ath6kl_wmi_set_appie_cmd 5 39266 NULL
90586 +probe_bios_17467 probe_bios 1 17467 NULL
90587 +vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
90588 +ttm_bo_ioremap_31082 ttm_bo_ioremap 2-3 31082 NULL
90589 +mei_read_6507 mei_read 3 6507 NULL
90590 +lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
90591 +mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
90592 +rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
90593 +sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
90594 +il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
90595 +SetArea_50835 SetArea 4 50835 NULL
90596 +tpm_read_50344 tpm_read 3 50344 NULL
90597 +newpart_47485 newpart 6 47485 NULL
90598 +jffs2_do_link_42048 jffs2_do_link 6 42048 NULL
90599 +compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
90600 +core_sys_select_47494 core_sys_select 1 47494 NULL
90601 +read_file_disable_ani_6536 read_file_disable_ani 3 6536 NULL
90602 +sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
90603 +__vmalloc_node_39308 __vmalloc_node 1 39308 NULL
90604 +libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
90605 +alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
90606 +unlink_simple_47506 unlink_simple 3 47506 NULL
90607 +rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
90608 +wdm_read_6549 wdm_read 3 6549 NULL
90609 +init_map_ipmac_63896 init_map_ipmac 4-3 63896 NULL
90610 +tipc_multicast_49144 tipc_multicast 5 49144 NULL
90611 +nfs4_realloc_slot_table_22859 nfs4_realloc_slot_table 2 22859 NULL
90612 +fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
90613 +xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL
90614 +__videobuf_alloc_uncached_55711 __videobuf_alloc_uncached 1 55711 NULL
90615 +rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
90616 +nfc_hci_send_cmd_55714 nfc_hci_send_cmd 5 55714 NULL
90617 +pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
90618 +filter_read_61692 filter_read 3 61692 NULL
90619 +mtdswap_init_55719 mtdswap_init 2 55719 NULL
90620 +rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
90621 +debugfs_read_62535 debugfs_read 3 62535 NULL
90622 +w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
90623 +process_vm_rw_47533 process_vm_rw 3-5 47533 NULL
90624 +divas_write_63901 divas_write 3 63901 NULL
90625 +alloc_sglist_22960 alloc_sglist 1-2-3 22960 NULL
90626 +caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
90627 +snd_compr_write_63923 snd_compr_write 3 63923 NULL
90628 +cfpkt_split_47541 cfpkt_split 2 47541 NULL
90629 +__copy_from_user_nocache_39351 __copy_from_user_nocache 3 39351 NULL
90630 +btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
90631 +__iio_allocate_kfifo_55738 __iio_allocate_kfifo 3-2 55738 NULL
90632 +ipw_write_59807 ipw_write 3 59807 NULL
90633 +sta_dev_read_14782 sta_dev_read 3 14782 NULL
90634 +tipc_send2port_63935 tipc_send2port 5 63935 NULL
90635 +do_write_log_from_user_39362 do_write_log_from_user 3 39362 NULL
90636 +ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
90637 +afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
90638 +mwifiex_cfg80211_mgmt_tx_12022 mwifiex_cfg80211_mgmt_tx 9 12022 NULL
90639 +cycx_setup_47562 cycx_setup 4 47562 NULL
90640 +remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
90641 +ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
90642 +set_local_name_55757 set_local_name 4 55757 NULL
90643 +printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
90644 +btrfs_init_new_buffer_55761 btrfs_init_new_buffer 4 55761 NULL
90645 +read_ldt_47570 read_ldt 2 47570 NULL
90646 +regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
90647 +pci_iomap_47575 pci_iomap 3 47575 NULL
90648 +acpi_ex_system_memory_space_handler_31192 acpi_ex_system_memory_space_handler 2 31192 NULL
90649 +kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
90650 +module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
90651 +ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
90652 +drm_ht_create_18853 drm_ht_create 2 18853 NULL
90653 +mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
90654 +qlcnic_alloc_msix_entries_46160 qlcnic_alloc_msix_entries 2 46160 NULL
90655 +ieee80211_authentication_req_63973 ieee80211_authentication_req 3 63973 NULL
90656 +dn_alloc_skb_6631 dn_alloc_skb 2 6631 NULL
90657 +conf_read_55786 conf_read 3 55786 NULL
90658 +do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
90659 +rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
90660 +viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
90661 +virtscsi_alloc_tgt_6643 virtscsi_alloc_tgt 2 6643 NULL
90662 +atm_get_addr_31221 atm_get_addr 3 31221 NULL
90663 +user_power_read_39414 user_power_read 3 39414 NULL
90664 +uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
90665 +uea_request_47613 uea_request 4 47613 NULL
90666 +cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
90667 +read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
90668 +alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
90669 +btrfs_find_create_tree_block_55812 btrfs_find_create_tree_block 3 55812 NULL
90670 +subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
90671 +lcd_write_14857 lcd_write 3 14857 NULL nohasharray
90672 +__krealloc_14857 __krealloc 2 14857 &lcd_write_14857
90673 +_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
90674 +get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
90675 +kmemdup_64015 kmemdup 2 64015 NULL
90676 +compat_sys_select_16131 compat_sys_select 1 16131 NULL
90677 +reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
90678 +unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
90679 +process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
90680 +tcf_csum_skb_nextlayer_64025 tcf_csum_skb_nextlayer 3 64025 NULL
90681 +oz_events_read_47535 oz_events_read 3 47535 NULL
90682 +sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
90683 +resize_async_buffer_64031 resize_async_buffer 4 64031 NULL
90684 +sys_semop_39457 sys_semop 3 39457 NULL
90685 +vm_map_ram_23078 vm_map_ram 2 23078 NULL nohasharray
90686 +raw_sendmsg_23078 raw_sendmsg 4 23078 &vm_map_ram_23078
90687 +update_pmkid_2481 update_pmkid 4 2481 NULL
90688 +sriov_enable_migration_14889 sriov_enable_migration 2 14889 NULL
90689 +sep_lli_table_secure_dma_64042 sep_lli_table_secure_dma 2-3 64042 NULL
90690 +ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
90691 +acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
90692 +hysdn_rx_netpkt_16136 hysdn_rx_netpkt 3 16136 NULL
90693 +setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
90694 +rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
90695 +mpeg_read_6708 mpeg_read 3 6708 NULL
90696 +hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
90697 +ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
90698 +sky2_receive_13407 sky2_receive 2 13407 NULL
90699 +krealloc_14908 krealloc 2 14908 NULL
90700 +pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
90701 +bt_skb_send_alloc_6581 bt_skb_send_alloc 2 6581 NULL
90702 +dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
90703 +gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
90704 +video_proc_write_6724 video_proc_write 3 6724 NULL
90705 +xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
90706 +mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
90707 +uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
90708 +ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
90709 +qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
90710 +drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
90711 +pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
90712 +packet_recvmsg_47700 packet_recvmsg 4 47700 NULL
90713 +command_file_write_31318 command_file_write 3 31318 NULL
90714 +gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
90715 +lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
90716 +wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
90717 +i915_cache_sharing_read_24775 i915_cache_sharing_read 3 24775 NULL
90718 +array_zalloc_7519 array_zalloc 1-2 7519 NULL
90719 +tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
90720 +ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
90721 +unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
90722 +ca91cx42_master_set_23146 ca91cx42_master_set 4 23146 NULL
90723 +videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
90724 +ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
90725 +sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
90726 +vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
90727 +sfi_check_table_6772 sfi_check_table 1 6772 NULL
90728 +bits_to_user_47733 bits_to_user 2-3 47733 NULL
90729 +int_proc_write_39542 int_proc_write 3 39542 NULL
90730 +do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
90731 +intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
90732 +read_file_ani_23161 read_file_ani 3 23161 NULL
90733 +carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
90734 +iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
90735 +ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
90736 +pp_write_39554 pp_write 3 39554 NULL
90737 +ioremap_23172 ioremap 1-2 23172 NULL
90738 +mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
90739 +hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
90740 +usblp_write_23178 usblp_write 3 23178 NULL
90741 +sel_read_policy_55947 sel_read_policy 3 55947 NULL
90742 +xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
90743 +vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3 31374 NULL
90744 +tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
90745 +datablob_format_39571 datablob_format 2 39571 NULL nohasharray
90746 +ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
90747 +fix_unclean_leb_23188 fix_unclean_leb 3 23188 NULL
90748 +simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
90749 +dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
90750 +vmalloc_32_1135 vmalloc_32 1 1135 NULL
90751 +tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
90752 +tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
90753 +__team_options_register_63941 __team_options_register 3 63941 NULL
90754 +error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL
90755 +ip_ufo_append_data_12775 ip_ufo_append_data 6-7-8 12775 NULL
90756 +rvmalloc_46873 rvmalloc 1 46873 NULL
90757 +vmap_15025 vmap 2 15025 NULL
90758 +key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
90759 +mcs7830_set_reg_31413 mcs7830_set_reg 3 31413 NULL
90760 +mon_bin_read_6841 mon_bin_read 3 6841 NULL
90761 +tty_buffer_request_room_23228 tty_buffer_request_room 2 23228 NULL
90762 +xlog_get_bp_23229 xlog_get_bp 2 23229 NULL
90763 +snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
90764 +nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
90765 +rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
90766 +TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
90767 +ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
90768 +ping_recvmsg_25597 ping_recvmsg 4 25597 NULL
90769 +macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
90770 +ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL
90771 +cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
90772 +ssb_bus_pcmciabus_register_56020 ssb_bus_pcmciabus_register 3 56020 NULL
90773 +fm_send_cmd_39639 fm_send_cmd 5 39639 NULL
90774 +ip6gre_err_19869 ip6gre_err 5 19869 NULL
90775 +nvme_alloc_iod_56027 nvme_alloc_iod 1 56027 NULL
90776 +opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
90777 +nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
90778 +ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5 15072 NULL
90779 +sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL
90780 +snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
90781 +get_new_cssid_51665 get_new_cssid 2 51665 NULL
90782 +raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 NULL
90783 +prism2_info_hostscanresults_39657 prism2_info_hostscanresults 3 39657 NULL
90784 +ced_ioctl_36647 ced_ioctl 2 36647 NULL
90785 +dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
90786 +__videobuf_alloc_vb_5665 __videobuf_alloc_vb 1 5665 NULL
90787 +kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
90788 +redrat3_transmit_ir_64244 redrat3_transmit_ir 3 64244 NULL
90789 +__alloc_extent_buffer_15093 __alloc_extent_buffer 3 15093 NULL
90790 +v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
90791 +dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
90792 +alg_setkey_31485 alg_setkey 3 31485 NULL
90793 +do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
90794 +spi_show_regs_6911 spi_show_regs 3 6911 NULL nohasharray
90795 +proc_sessionid_read_6911 proc_sessionid_read 3 6911 &spi_show_regs_6911
90796 +qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
90797 +vhci_read_47878 vhci_read 3 47878 NULL
90798 +__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
90799 +ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
90800 +i2cdev_write_23310 i2cdev_write 3 23310 NULL
90801 +keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
90802 +kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
90803 +ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
90804 +event_id_read_64288 event_id_read 3 64288 NULL nohasharray
90805 +xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 &event_id_read_64288
90806 +osd_req_read_sg_47905 osd_req_read_sg 5 47905 NULL
90807 +sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
90808 +pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
90809 +timeout_read_47915 timeout_read 3 47915 NULL
90810 +hidraw_write_31536 hidraw_write 3 31536 NULL
90811 +error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
90812 +page_readlink_23346 page_readlink 3 23346 NULL
90813 +videobuf_dma_init_kernel_6963 videobuf_dma_init_kernel 3 6963 NULL
90814 +comedi_write_47926 comedi_write 3 47926 NULL
90815 +usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
90816 +dsp_write_46218 dsp_write 2 46218 NULL
90817 +kmem_zalloc_large_56128 kmem_zalloc_large 1 56128 NULL
90818 +usbvision_read_31555 usbvision_read 3 31555 NULL
90819 +pd_video_read_24510 pd_video_read 3 24510 NULL
90820 +crypto_authenc_esn_setkey_6985 crypto_authenc_esn_setkey 3 6985 NULL
90821 +sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
90822 +request_key_async_6990 request_key_async 4 6990 NULL
90823 +ts_write_64336 ts_write 3 64336 NULL
90824 +handle_response_55951 handle_response 5 55951 NULL
90825 +usbtmc_write_64340 usbtmc_write 3 64340 NULL
90826 +tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
90827 +r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
90828 +iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
90829 +osst_write_31581 osst_write 3 31581 NULL
90830 +tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
90831 +rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
90832 +wm8350_block_write_19727 wm8350_block_write 3 19727 NULL
90833 +diva_xdi_write_63975 diva_xdi_write 4 63975 NULL
90834 +llc_alloc_frame_64366 llc_alloc_frame 4 64366 NULL
90835 +iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 NULL nohasharray
90836 +mempool_resize_47983 mempool_resize 2 47983 &iwl_dbgfs_ucode_tracing_read_47983
90837 +mangle_packet_18920 mangle_packet 7-9 18920 NULL
90838 +bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
90839 +tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
90840 +iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
90841 +ib_umad_write_47993 ib_umad_write 3 47993 NULL
90842 +ilo_write_64378 ilo_write 3 64378 NULL
90843 +btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
90844 +nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
90845 +vzalloc_node_23424 vzalloc_node 1 23424 NULL
90846 +arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
90847 +ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
90848 +variax_alloc_sysex_buffer_15237 variax_alloc_sysex_buffer 3 15237 NULL
90849 +copy_from_user_17559 copy_from_user 3 17559 NULL
90850 +ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
90851 +sys_migrate_pages_39825 sys_migrate_pages 2 39825 NULL
90852 +ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
90853 +pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
90854 +videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
90855 +rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
90856 +hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
90857 +beiscsi_process_async_pdu_39834 beiscsi_process_async_pdu 7 39834 NULL
90858 +sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
90859 +hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
90860 +snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
90861 +keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
90862 +pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
90863 +cp210x_get_config_56229 cp210x_get_config 4 56229 NULL
90864 +pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
90865 +udl_prime_create_57159 udl_prime_create 2 57159 NULL
90866 +oom_adj_write_64428 oom_adj_write 3 64428 NULL
90867 +dn_nsp_send_disc_23469 dn_nsp_send_disc 2 23469 NULL
90868 +do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
90869 +ping_sendmsg_3782 ping_sendmsg 4 3782 NULL
90870 +beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
90871 +__lgread_31668 __lgread 4 31668 NULL
90872 +scrub_setup_recheck_block_56245 scrub_setup_recheck_block 4-3 56245 NULL
90873 +fd_copyin_56247 fd_copyin 3 56247 NULL
90874 +wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
90875 +ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
90876 +sys_connect_15291 sys_connect 3 15291 NULL nohasharray
90877 +xlate_dev_mem_ptr_15291 xlate_dev_mem_ptr 1 15291 &sys_connect_15291
90878 +linear_conf_23485 linear_conf 2 23485 NULL nohasharray
90879 +divasa_remap_pci_bar_23485 divasa_remap_pci_bar 3-4 23485 &linear_conf_23485
90880 +posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
90881 +ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
90882 +_usb_writeN_sync_31682 _usb_writeN_sync 4 31682 NULL
90883 +pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
90884 +forced_ps_read_31685 forced_ps_read 3 31685 NULL
90885 +event_filter_read_23494 event_filter_read 3 23494 NULL
90886 +tpm_tis_init_15304 tpm_tis_init 2-3 15304 NULL
90887 +fcoe_ctlr_send_keep_alive_15308 fcoe_ctlr_send_keep_alive 3 15308 NULL
90888 +pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL
90889 +sys_setsockopt_35320 sys_setsockopt 5 35320 NULL
90890 +il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
90891 +pkt_add_39897 pkt_add 3 39897 NULL
90892 +RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
90893 +send_mpa_reject_7135 send_mpa_reject 3 7135 NULL
90894 +sctp_make_op_error_7057 sctp_make_op_error 5-6 7057 NULL
90895 +mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
90896 +read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
90897 +skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
90898 +dvb_aplay_56296 dvb_aplay 3 56296 NULL
90899 +gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
90900 +dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
90901 +p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
90902 +sctp_make_asconf_ack_31726 sctp_make_asconf_ack 3 31726 NULL
90903 +aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
90904 +ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
90905 +alloc_ring_15345 alloc_ring 2-4 15345 NULL
90906 +alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
90907 +remove_uuid_64505 remove_uuid 4 64505 NULL
90908 +shmem_pwrite_slow_31741 shmem_pwrite_slow 3 31741 NULL
90909 +NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 NULL
90910 +create_table_16213 create_table 2 16213 NULL
90911 +acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
90912 +pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
90913 +dbg_leb_change_23555 dbg_leb_change 4 23555 NULL
90914 +vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
90915 +bcm_char_read_31750 bcm_char_read 3 31750 NULL
90916 +snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
90917 +journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
90918 +set_discoverable_48141 set_discoverable 4 48141 NULL
90919 +compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
90920 +ses_send_diag_64527 ses_send_diag 4 64527 NULL
90921 +tcp_match_skb_to_sack_23568 tcp_match_skb_to_sack 4 23568 NULL
90922 +snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
90923 +fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
90924 +tty_prepare_flip_string_39955 tty_prepare_flip_string 3 39955 NULL
90925 +__tcp_push_pending_frames_48148 __tcp_push_pending_frames 2 48148 NULL
90926 +iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
90927 +prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
90928 +ipv6_recv_error_56347 ipv6_recv_error 3 56347 NULL
90929 +vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
90930 +isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
90931 +c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
90932 +dma_push_rx_39973 dma_push_rx 2 39973 NULL
90933 +regmap_register_patch_21681 regmap_register_patch 3 21681 NULL
90934 +broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
90935 +cfpkt_create_pfx_23594 cfpkt_create_pfx 1-2 23594 NULL
90936 +pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
90937 +iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4 56368 NULL
90938 +dev_read_56369 dev_read 3 56369 NULL
90939 +mthca_array_init_39987 mthca_array_init 2 39987 NULL
90940 +alloc_dummy_extent_buffer_56374 alloc_dummy_extent_buffer 2 56374 NULL
90941 +diva_os_alloc_message_buffer_64568 diva_os_alloc_message_buffer 1 64568 NULL
90942 +dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
90943 +alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
90944 +init_ipath_48187 init_ipath 1 48187 NULL
90945 +isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
90946 +__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4 15423 NULL
90947 +tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
90948 +tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
90949 +sys32_ipc_7238 sys32_ipc 3 7238 NULL
90950 +sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
90951 +rtllib_alloc_txb_21687 rtllib_alloc_txb 1-2 21687 NULL
90952 +dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
90953 +ddp_ppod_write_idata_25610 ddp_ppod_write_idata 5 25610 NULL
90954 +ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
90955 +nf_nat_icmpv6_reply_translation_40023 nf_nat_icmpv6_reply_translation 5 40023 NULL nohasharray
90956 +ivtvfb_write_40023 ivtvfb_write 3 40023 &nf_nat_icmpv6_reply_translation_40023
90957 +hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
90958 +tcp_write_xmit_64602 tcp_write_xmit 2 64602 NULL
90959 +use_pool_64607 use_pool 2 64607 NULL
90960 +__get_vm_area_caller_56416 __get_vm_area_caller 1 56416 NULL nohasharray
90961 +acpi_os_write_memory_56416 acpi_os_write_memory 1-3 56416 &__get_vm_area_caller_56416
90962 +store_msg_56417 store_msg 3 56417 NULL
90963 +__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
90964 +nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
90965 +datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
90966 +read_file_recv_48232 read_file_recv 3 48232 NULL
90967 +xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
90968 +set_tpl_pfs_27490 set_tpl_pfs 3 27490 NULL
90969 +fanotify_write_64623 fanotify_write 3 64623 NULL
90970 +batadv_add_packet_12136 batadv_add_packet 3 12136 NULL
90971 +rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
90972 +fl_create_56435 fl_create 5 56435 NULL
90973 +gnttab_map_56439 gnttab_map 2 56439 NULL
90974 +nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
90975 +nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
90976 +event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
90977 +cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2 56453 NULL
90978 +drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
90979 +set_connectable_56458 set_connectable 4 56458 NULL
90980 +a2mp_chan_alloc_skb_cb_27159 a2mp_chan_alloc_skb_cb 2 27159 NULL
90981 +nfc_hci_send_response_56462 nfc_hci_send_response 5 56462 NULL
90982 +add_port_54941 add_port 2 54941 NULL
90983 +osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
90984 +cx18_read_23699 cx18_read 3 23699 NULL
90985 +tlbflush_read_file_64661 tlbflush_read_file 3 64661 NULL
90986 +ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
90987 +efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
90988 +rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
90989 +ddb_output_write_31902 ddb_output_write 3 31902 NULL
90990 +send_set_info_48288 send_set_info 7 48288 NULL
90991 +sock_alloc_send_skb_23720 sock_alloc_send_skb 2 23720 NULL
90992 +wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
90993 +set_disc_pwup_pfs_48300 set_disc_pwup_pfs 3 48300 NULL
90994 +lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
90995 +p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
90996 +new_dir_31919 new_dir 3 31919 NULL
90997 +kmem_alloc_31920 kmem_alloc 1 31920 NULL
90998 +timblogiw_read_48305 timblogiw_read 3 48305 NULL
90999 +sec_bulk_write_64691 sec_bulk_write 3 64691 NULL
91000 +mgmt_control_7349 mgmt_control 3 7349 NULL
91001 +hash_setkey_48310 hash_setkey 3 48310 NULL
91002 +ql_process_mac_rx_page_15543 ql_process_mac_rx_page 4 15543 NULL
91003 +hfcpci_empty_fifo_2427 hfcpci_empty_fifo 4 2427 NULL
91004 +ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
91005 +sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
91006 +cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
91007 +rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
91008 +iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4 31942 NULL
91009 +ql_process_mac_rx_skb_6689 ql_process_mac_rx_skb 4 6689 NULL
91010 +ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
91011 +pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
91012 +dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
91013 +vb2_write_31948 vb2_write 3 31948 NULL
91014 +cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
91015 +bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
91016 +l1oip_socket_recv_56537 l1oip_socket_recv 6 56537 NULL
91017 +ip_options_get_56538 ip_options_get 4 56538 NULL
91018 +write_62671 write 3 62671 NULL
91019 +copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
91020 +tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
91021 +squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
91022 +sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
91023 +ceph_copy_page_vector_to_user_31270 ceph_copy_page_vector_to_user 3-4 31270 NULL
91024 +allocate_cnodes_5329 allocate_cnodes 1 5329 NULL
91025 +skb_add_data_48363 skb_add_data 3 48363 NULL
91026 +bio_map_kern_64751 bio_map_kern 3 64751 NULL
91027 +alloc_apertures_56561 alloc_apertures 1 56561 NULL
91028 +iscsi_complete_pdu_48372 iscsi_complete_pdu 4 48372 NULL
91029 +drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
91030 +rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
91031 +rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
91032 +debug_debug2_read_30526 debug_debug2_read 3 30526 NULL
91033 +compat_fillonedir_15620 compat_fillonedir 3 15620 NULL
91034 +set_dis_tap_pfs_15621 set_dis_tap_pfs 3 15621 NULL
91035 +ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL
91036 +dsp_cmx_send_member_15625 dsp_cmx_send_member 2 15625 NULL
91037 +portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
91038 +system_enable_read_25815 system_enable_read 3 25815 NULL
91039 +allocate_probes_40204 allocate_probes 1 40204 NULL
91040 +sd_alloc_ctl_entry_29708 sd_alloc_ctl_entry 1 29708 NULL
91041 +proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
91042 +__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
91043 +realloc_buffer_25816 realloc_buffer 2 25816 NULL
91044 +isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
91045 +rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
91046 +ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
91047 +tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray
91048 +pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &tomoyo_scan_bprm_15642
91049 +ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
91050 +au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
91051 +lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
91052 +fs_path_add_15648 fs_path_add 3 15648 NULL
91053 +event_filter_write_56609 event_filter_write 3 56609 NULL
91054 +xfs_buf_read_map_40226 xfs_buf_read_map 3 40226 NULL
91055 +ms_rw_multi_sector_7459 ms_rw_multi_sector 3-4 7459 NULL
91056 +xsd_read_15653 xsd_read 3 15653 NULL
91057 +pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
91058 +p54_init_common_23850 p54_init_common 1 23850 NULL
91059 +ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
91060 +vmw_cursor_update_dmabuf_32045 vmw_cursor_update_dmabuf 3-4 32045 NULL
91061 +sys_sched_setaffinity_32046 sys_sched_setaffinity 2 32046 NULL
91062 +garp_request_join_7471 garp_request_join 4 7471 NULL
91063 +ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
91064 +copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
91065 +unifi_read_14899 unifi_read 3 14899 NULL
91066 +il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL
91067 +compat_sys_msgrcv_7482 compat_sys_msgrcv 2 7482 NULL
91068 +do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
91069 +brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
91070 +proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
91071 +pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
91072 +ipv6_recv_rxpmtu_7142 ipv6_recv_rxpmtu 3 7142 NULL
91073 +ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
91074 +uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
91075 +ieee80211_if_read_channel_type_23884 ieee80211_if_read_channel_type 3 23884 NULL
91076 +tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
91077 +tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
91078 +uf_sme_queue_message_15697 uf_sme_queue_message 3 15697 NULL
91079 +gdth_search_isa_58595 gdth_search_isa 1 58595 NULL
91080 +sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 NULL
91081 +_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
91082 +rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
91083 +iwch_reject_cr_23901 iwch_reject_cr 3 23901 NULL
91084 +altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
91085 +bio_alloc_32095 bio_alloc 2 32095 NULL
91086 +shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
91087 +rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
91088 +ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
91089 +add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL
91090 +r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
91091 +snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL
91092 +ubi_io_write_data_40305 ubi_io_write_data 4-5 40305 NULL
91093 +send_control_msg_48498 send_control_msg 6 48498 NULL
91094 +ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
91095 +mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
91096 +ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
91097 +request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
91098 +diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
91099 +dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3 56702 NULL
91100 +uvc_alloc_entity_20836 uvc_alloc_entity 4-3 20836 NULL
91101 +batadv_tt_changes_fill_buff_40323 batadv_tt_changes_fill_buff 4 40323 NULL
91102 +__alloc_skb_23940 __alloc_skb 1 23940 NULL
91103 +sta_flags_read_56710 sta_flags_read 3 56710 NULL
91104 +ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
91105 +HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
91106 +smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
91107 +alloc_tx_32143 alloc_tx 2 32143 NULL
91108 +wiimote_hid_send_48528 wiimote_hid_send 3 48528 NULL
91109 +compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
91110 +hsc_write_55875 hsc_write 3 55875 NULL
91111 +cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
91112 +do_test_15766 do_test 1 15766 NULL
91113 +ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
91114 +ip_recv_error_23109 ip_recv_error 3 23109 NULL
91115 +named_distribute_48544 named_distribute 4 48544 NULL
91116 +ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
91117 +venus_link_32165 venus_link 5 32165 NULL
91118 +event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
91119 +drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
91120 +vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
91121 +btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
91122 +set_std_nic_pfs_15792 set_std_nic_pfs 3 15792 NULL
91123 +ubifs_wbuf_write_nolock_64946 ubifs_wbuf_write_nolock 3 64946 NULL
91124 +usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
91125 +llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL
91126 +smk_read_direct_15803 smk_read_direct 3 15803 NULL
91127 +fwnet_incoming_packet_40380 fwnet_incoming_packet 3 40380 NULL
91128 +groups_alloc_7614 groups_alloc 1 7614 NULL
91129 +cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
91130 +traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
91131 +suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
91132 +ext_sd_execute_read_data_48589 ext_sd_execute_read_data 9 48589 NULL
91133 +afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
91134 +__carl9170_rx_56784 __carl9170_rx 3 56784 NULL
91135 +__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL
91136 +oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
91137 +ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
91138 +tcf_csum_ipv4_tcp_39713 tcf_csum_ipv4_tcp 4 39713 NULL
91139 +snapshot_write_28351 snapshot_write 3 28351 NULL
91140 +event_enable_read_7074 event_enable_read 3 7074 NULL
91141 +brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
91142 +do_syslog_56807 do_syslog 3 56807 NULL
91143 +sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
91144 +pskb_pull_65005 pskb_pull 2 65005 NULL
91145 +caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
91146 +lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
91147 +sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
91148 +unifi_write_65012 unifi_write 3 65012 NULL
91149 +agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL
91150 +nfs_readdata_alloc_65015 nfs_readdata_alloc 2 65015 NULL
91151 +ubi_io_write_15870 ubi_io_write 5-4 15870 NULL nohasharray
91152 +media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
91153 +mtdchar_write_56831 mtdchar_write 3 56831 NULL nohasharray
91154 +ntfs_rl_realloc_56831 ntfs_rl_realloc 3 56831 &mtdchar_write_56831
91155 +skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
91156 +mid_get_vbt_data_r1_26170 mid_get_vbt_data_r1 2 26170 NULL
91157 +skb_copy_expand_7685 skb_copy_expand 2-3 7685 NULL nohasharray
91158 +acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 &skb_copy_expand_7685
91159 +if_write_51756 if_write 3 51756 NULL
91160 +insert_dent_65034 insert_dent 7 65034 NULL
91161 +blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
91162 +snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4 56847 NULL
91163 +vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
91164 +brcmf_sdcard_rwdata_65041 brcmf_sdcard_rwdata 5 65041 NULL
91165 +nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
91166 +lc_create_48662 lc_create 3 48662 NULL
91167 +aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
91168 +sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL nohasharray
91169 +sys_setgroups_48668 sys_setgroups 1 48668 &sm501_create_subdev_48668
91170 +ath9k_multi_regread_65056 ath9k_multi_regread 4 65056 NULL
91171 +brcmf_sdcard_send_buf_7713 brcmf_sdcard_send_buf 6 7713 NULL
91172 +l2cap_build_cmd_48676 l2cap_build_cmd 4 48676 NULL
91173 +batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
91174 +pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
91175 +request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
91176 +bnx2fc_process_l2_frame_compl_65072 bnx2fc_process_l2_frame_compl 3 65072 NULL
91177 +__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
91178 +persistent_ram_new_40501 persistent_ram_new 1-2 40501 NULL
91179 +ieee80211_send_auth_24121 ieee80211_send_auth 5 24121 NULL
91180 +altera_drscan_48698 altera_drscan 2 48698 NULL
91181 +tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
91182 +set_bypass_pwup_pfs_7742 set_bypass_pwup_pfs 3 7742 NULL
91183 +kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
91184 +power_read_15939 power_read 3 15939 NULL
91185 +recv_msg_48709 recv_msg 4 48709 NULL
91186 +befs_utf2nls_25628 befs_utf2nls 3 25628 NULL
91187 +ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
91188 +TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
91189 +btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL
91190 +irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
91191 +nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
91192 +process_vm_rw_pages_15954 process_vm_rw_pages 6-5 15954 NULL
91193 +revalidate_19043 revalidate 2 19043 NULL
91194 +t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
91195 +aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 NULL
91196 +trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
91197 +alloc_candev_7776 alloc_candev 1-2 7776 NULL
91198 +check_header_56930 check_header 2 56930 NULL
91199 +ima_write_policy_40548 ima_write_policy 3 40548 NULL
91200 +journal_init_revoke_56933 journal_init_revoke 2 56933 NULL
91201 +__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
91202 +ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
91203 +sel_read_initcon_32362 sel_read_initcon 3 32362 NULL
91204 +ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
91205 +frame_alloc_15981 frame_alloc 4 15981 NULL
91206 +esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
91207 +nf_nat_seq_adjust_44989 nf_nat_seq_adjust 4 44989 NULL
91208 +diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL
91209 +adu_read_24177 adu_read 3 24177 NULL
91210 +alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
91211 +send_mpa_reply_32372 send_mpa_reply 3 32372 NULL
91212 +alloc_vm_area_15989 alloc_vm_area 1 15989 NULL
91213 +variax_set_raw2_32374 variax_set_raw2 4 32374 NULL
91214 +vfd_write_14717 vfd_write 3 14717 NULL
91215 +carl9170_rx_copy_data_21656 carl9170_rx_copy_data 2 21656 NULL
91216 +usbtmc_read_32377 usbtmc_read 3 32377 NULL
91217 +qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
91218 +l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL
91219 +dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
91220 +cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
91221 +rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
91222 +xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
91223 +viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
91224 +__cxio_init_resource_fifo_23447 __cxio_init_resource_fifo 3 23447 NULL
91225 +skge_rx_get_40598 skge_rx_get 3 40598 NULL
91226 +nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
91227 +register_device_60015 register_device 2-3 60015 NULL
91228 +got_frame_16028 got_frame 2 16028 NULL
91229 +ssb_bus_register_65183 ssb_bus_register 3 65183 NULL
91230 +pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 1-2-3 24224 NULL
91231 +il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
91232 +scsi_register_49094 scsi_register 2 49094 NULL
91233 +twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
91234 +vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
91235 +sel_read_bool_24236 sel_read_bool 3 24236 NULL
91236 +batadv_check_unicast_packet_10866 batadv_check_unicast_packet 2 10866 NULL
91237 +tcp_push_one_48816 tcp_push_one 2 48816 NULL
91238 +nfulnl_alloc_skb_65207 nfulnl_alloc_skb 2 65207 NULL
91239 +dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
91240 +gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
91241 +atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
91242 +rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
91243 +vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
91244 +svc_pool_map_alloc_arrays_47181 svc_pool_map_alloc_arrays 2 47181 NULL
91245 +create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
91246 +dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
91247 +snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
91248 +xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
91249 +compat_sys_preadv64_24283 compat_sys_preadv64 3 24283 NULL
91250 +pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL
91251 +viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL
91252 +wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
91253 +__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
91254 +cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
91255 +isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
91256 +sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
91257 +mid_get_vbt_data_r0_10876 mid_get_vbt_data_r0 2 10876 NULL
91258 +pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
91259 +ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
91260 +dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 NULL nohasharray
91261 +isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 &dma_tx_requested_read_16110
91262 +sys_setgroups16_48882 sys_setgroups16 1 48882 NULL
91263 +kmem_zalloc_greedy_65268 kmem_zalloc_greedy 3-2 65268 NULL
91264 +nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
91265 +f_hidg_write_7932 f_hidg_write 3 7932 NULL
91266 +ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
91267 +kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
91268 +mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
91269 +mac_drv_rx_init_48898 mac_drv_rx_init 2 48898 NULL
91270 +sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
91271 +xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
91272 +compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
91273 +fsm_init_16134 fsm_init 2 16134 NULL
91274 +ext_sd_execute_write_data_8175 ext_sd_execute_write_data 9 8175 NULL
91275 +tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
91276 +disconnect_32521 disconnect 4 32521 NULL
91277 +__seq_open_private_40715 __seq_open_private 3 40715 NULL
91278 +tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
91279 +ath6kl_wmi_add_wow_pattern_cmd_12842 ath6kl_wmi_add_wow_pattern_cmd 4 12842 NULL
91280 +mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
91281 +redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
91282 +ilo_read_32531 ilo_read 3 32531 NULL
91283 +ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
91284 +smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
91285 +pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
91286 +sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
91287 +gdth_isa_probe_one_48925 gdth_isa_probe_one 1 48925 NULL
91288 +kzalloc_node_24352 kzalloc_node 1 24352 NULL
91289 +nfc_hci_execute_cmd_async_65314 nfc_hci_execute_cmd_async 5 65314 NULL
91290 +msnd_fifo_alloc_23179 msnd_fifo_alloc 2 23179 NULL
91291 +format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
91292 +nfcwilink_skb_alloc_16167 nfcwilink_skb_alloc 1 16167 NULL
91293 +xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL
91294 +remap_pci_mem_15966 remap_pci_mem 1-2 15966 NULL
91295 +qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
91296 +cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
91297 +btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
91298 +aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
91299 +card_send_command_40757 card_send_command 3 40757 NULL
91300 +sys_mbind_7990 sys_mbind 5 7990 NULL
91301 +dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
91302 +pg_write_40766 pg_write 3 40766 NULL
91303 +event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
91304 +uea_idma_write_64139 uea_idma_write 3 64139 NULL
91305 +brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
91306 +carl9170_rx_stream_1334 carl9170_rx_stream 3 1334 NULL
91307 +nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
91308 +ath_descdma_setup_12257 ath_descdma_setup 5 12257 NULL
91309 +alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
91310 +dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
91311 +batadv_orig_hash_del_if_48972 batadv_orig_hash_del_if 2 48972 NULL
91312 +tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
91313 +pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 NULL nohasharray
91314 +getxattr_24398 getxattr 4 24398 &pvr2_v4l2_ioctl_24398
91315 +stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
91316 +vcs_read_8017 vcs_read 3 8017 NULL
91317 +read_file_beacon_32595 read_file_beacon 3 32595 NULL
91318 +gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
91319 +rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
91320 +iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
91321 +_alloc_set_attr_list_48991 _alloc_set_attr_list 4 48991 NULL
91322 +sys_set_mempolicy_32608 sys_set_mempolicy 3 32608 NULL
91323 +atomic_read_file_16227 atomic_read_file 3 16227 NULL
91324 +vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
91325 +copy_and_check_19089 copy_and_check 3 19089 NULL
91326 +b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
91327 +netdev_alloc_skb_ip_align_40811 netdev_alloc_skb_ip_align 2 40811 NULL
91328 +i915_wedged_read_35474 i915_wedged_read 3 35474 NULL
91329 +ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
91330 +iser_rcv_completion_8048 iser_rcv_completion 2 8048 NULL
91331 +trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
91332 +ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
91333 +__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
91334 +trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
91335 +ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL
91336 +smk_user_access_24440 smk_user_access 3 24440 NULL
91337 +xd_rw_49020 xd_rw 3-4 49020 NULL
91338 +dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
91339 +tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
91340 +mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
91341 +kvmalloc_32646 kvmalloc 1 32646 NULL
91342 +alloc_targets_8074 alloc_targets 2 8074 NULL nohasharray
91343 +qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 &alloc_targets_8074
91344 +evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
91345 +drm_calloc_large_65421 drm_calloc_large 1-2 65421 NULL
91346 +set_disc_pfs_16270 set_disc_pfs 3 16270 NULL
91347 +skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL
91348 +__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
91349 +caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL
91350 +drbd_setsockopt_16280 drbd_setsockopt 5 16280 NULL nohasharray
91351 +nand_bch_init_16280 nand_bch_init 3-2 16280 &drbd_setsockopt_16280
91352 +xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
91353 +v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
91354 +fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
91355 +pn533_init_target_frame_65438 pn533_init_target_frame 3 65438 NULL
91356 +__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
91357 +move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
91358 +i915_wedged_write_47771 i915_wedged_write 3 47771 NULL
91359 +usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
91360 +aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
91361 +l2cap_bredr_sig_cmd_49065 l2cap_bredr_sig_cmd 3 49065 NULL
91362 +tipc_buf_acquire_60437 tipc_buf_acquire 1 60437 NULL
91363 +alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
91364 +venus_lookup_8121 venus_lookup 4 8121 NULL
91365 +compat_writev_60063 compat_writev 3 60063 NULL
91366 +io_mapping_create_wc_1354 io_mapping_create_wc 1-2 1354 NULL
91367 +jfs_readpages_32702 jfs_readpages 4 32702 NULL
91368 +read_file_queue_40895 read_file_queue 3 40895 NULL
91369 +request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
91370 +waiters_read_40902 waiters_read 3 40902 NULL
91371 +pstore_file_read_57288 pstore_file_read 3 57288 NULL
91372 +vmalloc_node_58700 vmalloc_node 1 58700 NULL
91373 +xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
91374 +ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
91375 +vmw_cursor_update_image_16332 vmw_cursor_update_image 3-4 16332 NULL
91376 +compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
91377 +dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL
91378 +vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
91379 +named_prepare_buf_24532 named_prepare_buf 2 24532 NULL
91380 +hdpvr_read_9273 hdpvr_read 3 9273 NULL
91381 +alloc_dr_65495 alloc_dr 2 65495 NULL
91382 +do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
91383 +rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
91384 +ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
91385 +megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
91386 +total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
91387 +rbd_add_16366 rbd_add 3 16366 NULL
91388 +stats_read_ul_32751 stats_read_ul 3 32751 NULL
91389 +pt_read_49136 pt_read 3 49136 NULL
91390 +tsi148_alloc_resource_24563 tsi148_alloc_resource 2 24563 NULL
91391 +snd_vx_create_40948 snd_vx_create 4 40948 NULL
91392 +iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
91393 +check_mirror_57342 check_mirror 1-2 57342 NULL nohasharray
91394 +usblp_read_57342 usblp_read 3 57342 &check_mirror_57342
91395 +atyfb_setup_generic_49151 atyfb_setup_generic 3 49151 NULL
91396 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
91397 new file mode 100644
91398 index 0000000..d52f2ee
91399 --- /dev/null
91400 +++ b/tools/gcc/size_overflow_plugin.c
91401 @@ -0,0 +1,1941 @@
91402 +/*
91403 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
91404 + * Licensed under the GPL v2, or (at your option) v3
91405 + *
91406 + * Homepage:
91407 + * http://www.grsecurity.net/~ephox/overflow_plugin/
91408 + *
91409 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
91410 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
91411 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
91412 + *
91413 + * Usage:
91414 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
91415 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
91416 + */
91417 +
91418 +#include "gcc-plugin.h"
91419 +#include "config.h"
91420 +#include "system.h"
91421 +#include "coretypes.h"
91422 +#include "tree.h"
91423 +#include "tree-pass.h"
91424 +#include "intl.h"
91425 +#include "plugin-version.h"
91426 +#include "tm.h"
91427 +#include "toplev.h"
91428 +#include "function.h"
91429 +#include "tree-flow.h"
91430 +#include "plugin.h"
91431 +#include "gimple.h"
91432 +#include "c-common.h"
91433 +#include "diagnostic.h"
91434 +#include "cfgloop.h"
91435 +
91436 +#if BUILDING_GCC_VERSION >= 4007
91437 +#include "c-tree.h"
91438 +#else
91439 +#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
91440 +#endif
91441 +
91442 +#if BUILDING_GCC_VERSION >= 4008
91443 +#define TODO_dump_func 0
91444 +#endif
91445 +
91446 +struct size_overflow_hash {
91447 + const struct size_overflow_hash * const next;
91448 + const char * const name;
91449 + const unsigned int param;
91450 +};
91451 +
91452 +#include "size_overflow_hash.h"
91453 +
91454 +enum marked {
91455 + MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL
91456 +};
91457 +
91458 +static unsigned int call_count = 0;
91459 +
91460 +#define __unused __attribute__((__unused__))
91461 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
91462 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
91463 +#define BEFORE_STMT true
91464 +#define AFTER_STMT false
91465 +#define CREATE_NEW_VAR NULL_TREE
91466 +#define CODES_LIMIT 32
91467 +#define MAX_PARAM 32
91468 +#define MY_STMT GF_PLF_1
91469 +#define NO_CAST_CHECK GF_PLF_2
91470 +
91471 +#if BUILDING_GCC_VERSION == 4005
91472 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
91473 +#endif
91474 +
91475 +int plugin_is_GPL_compatible;
91476 +void debug_gimple_stmt(gimple gs);
91477 +
91478 +static tree expand(struct pointer_set_t *visited, tree lhs);
91479 +static bool pre_expand(struct pointer_set_t *visited, const_tree lhs);
91480 +static tree report_size_overflow_decl;
91481 +static const_tree const_char_ptr_type_node;
91482 +static unsigned int handle_function(void);
91483 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
91484 +static tree get_size_overflow_type(gimple stmt, const_tree node);
91485 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
91486 +
91487 +static struct plugin_info size_overflow_plugin_info = {
91488 + .version = "20130109beta",
91489 + .help = "no-size-overflow\tturn off size overflow checking\n",
91490 +};
91491 +
91492 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
91493 +{
91494 + unsigned int arg_count;
91495 + enum tree_code code = TREE_CODE(*node);
91496 +
91497 + switch (code) {
91498 + case FUNCTION_DECL:
91499 + arg_count = type_num_arguments(TREE_TYPE(*node));
91500 + break;
91501 + case FUNCTION_TYPE:
91502 + case METHOD_TYPE:
91503 + arg_count = type_num_arguments(*node);
91504 + break;
91505 + default:
91506 + *no_add_attrs = true;
91507 + error("%s: %qE attribute only applies to functions", __func__, name);
91508 + return NULL_TREE;
91509 + }
91510 +
91511 + for (; args; args = TREE_CHAIN(args)) {
91512 + tree position = TREE_VALUE(args);
91513 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
91514 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
91515 + *no_add_attrs = true;
91516 + }
91517 + }
91518 + return NULL_TREE;
91519 +}
91520 +
91521 +static const char* get_asm_name(tree node)
91522 +{
91523 + return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node));
91524 +}
91525 +
91526 +static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
91527 +{
91528 + unsigned int arg_count, arg_num;
91529 + enum tree_code code = TREE_CODE(*node);
91530 +
91531 + switch (code) {
91532 + case FUNCTION_DECL:
91533 + arg_count = type_num_arguments(TREE_TYPE(*node));
91534 + break;
91535 + case FUNCTION_TYPE:
91536 + case METHOD_TYPE:
91537 + arg_count = type_num_arguments(*node);
91538 + break;
91539 + case FIELD_DECL:
91540 + arg_num = TREE_INT_CST_LOW(TREE_VALUE(args));
91541 + if (arg_num != 0) {
91542 + *no_add_attrs = true;
91543 + error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name);
91544 + }
91545 + return NULL_TREE;
91546 + default:
91547 + *no_add_attrs = true;
91548 + error("%qE attribute only applies to functions", name);
91549 + return NULL_TREE;
91550 + }
91551 +
91552 + for (; args; args = TREE_CHAIN(args)) {
91553 + tree position = TREE_VALUE(args);
91554 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) > arg_count ) {
91555 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
91556 + *no_add_attrs = true;
91557 + }
91558 + }
91559 + return NULL_TREE;
91560 +}
91561 +
91562 +static struct attribute_spec size_overflow_attr = {
91563 + .name = "size_overflow",
91564 + .min_length = 1,
91565 + .max_length = -1,
91566 + .decl_required = true,
91567 + .type_required = false,
91568 + .function_type_required = false,
91569 + .handler = handle_size_overflow_attribute,
91570 +#if BUILDING_GCC_VERSION >= 4007
91571 + .affects_type_identity = false
91572 +#endif
91573 +};
91574 +
91575 +static struct attribute_spec intentional_overflow_attr = {
91576 + .name = "intentional_overflow",
91577 + .min_length = 1,
91578 + .max_length = -1,
91579 + .decl_required = true,
91580 + .type_required = false,
91581 + .function_type_required = false,
91582 + .handler = handle_intentional_overflow_attribute,
91583 +#if BUILDING_GCC_VERSION >= 4007
91584 + .affects_type_identity = false
91585 +#endif
91586 +};
91587 +
91588 +static void register_attributes(void __unused *event_data, void __unused *data)
91589 +{
91590 + register_attribute(&size_overflow_attr);
91591 + register_attribute(&intentional_overflow_attr);
91592 +}
91593 +
91594 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
91595 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
91596 +{
91597 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
91598 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
91599 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
91600 +
91601 + unsigned int m = 0x57559429;
91602 + unsigned int n = 0x5052acdb;
91603 + const unsigned int *key4 = (const unsigned int *)key;
91604 + unsigned int h = len;
91605 + unsigned int k = len + seed + n;
91606 + unsigned long long p;
91607 +
91608 + while (len >= 8) {
91609 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
91610 + len -= 8;
91611 + }
91612 + if (len >= 4) {
91613 + cwmixb(key4[0]) key4 += 1;
91614 + len -= 4;
91615 + }
91616 + if (len)
91617 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
91618 + cwmixb(h ^ (k + n));
91619 + return k ^ h;
91620 +
91621 +#undef cwfold
91622 +#undef cwmixa
91623 +#undef cwmixb
91624 +}
91625 +
91626 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
91627 +{
91628 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
91629 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
91630 + return fn ^ codes;
91631 +}
91632 +
91633 +static inline tree get_original_function_decl(tree fndecl)
91634 +{
91635 + if (DECL_ABSTRACT_ORIGIN(fndecl))
91636 + return DECL_ABSTRACT_ORIGIN(fndecl);
91637 + return fndecl;
91638 +}
91639 +
91640 +static inline gimple get_def_stmt(const_tree node)
91641 +{
91642 + gcc_assert(node != NULL_TREE);
91643 + gcc_assert(TREE_CODE(node) == SSA_NAME);
91644 + return SSA_NAME_DEF_STMT(node);
91645 +}
91646 +
91647 +static unsigned char get_tree_code(const_tree type)
91648 +{
91649 + switch (TREE_CODE(type)) {
91650 + case ARRAY_TYPE:
91651 + return 0;
91652 + case BOOLEAN_TYPE:
91653 + return 1;
91654 + case ENUMERAL_TYPE:
91655 + return 2;
91656 + case FUNCTION_TYPE:
91657 + return 3;
91658 + case INTEGER_TYPE:
91659 + return 4;
91660 + case POINTER_TYPE:
91661 + return 5;
91662 + case RECORD_TYPE:
91663 + return 6;
91664 + case UNION_TYPE:
91665 + return 7;
91666 + case VOID_TYPE:
91667 + return 8;
91668 + case REAL_TYPE:
91669 + return 9;
91670 + case VECTOR_TYPE:
91671 + return 10;
91672 + case REFERENCE_TYPE:
91673 + return 11;
91674 + case OFFSET_TYPE:
91675 + return 12;
91676 + case COMPLEX_TYPE:
91677 + return 13;
91678 + default:
91679 + debug_tree((tree)type);
91680 + gcc_unreachable();
91681 + }
91682 +}
91683 +
91684 +static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len)
91685 +{
91686 + gcc_assert(type != NULL_TREE);
91687 +
91688 + while (type && len < CODES_LIMIT) {
91689 + tree_codes[len] = get_tree_code(type);
91690 + len++;
91691 + type = TREE_TYPE(type);
91692 + }
91693 + return len;
91694 +}
91695 +
91696 +static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes)
91697 +{
91698 + const_tree arg, result, arg_field, type = TREE_TYPE(fndecl);
91699 + enum tree_code code = TREE_CODE(type);
91700 + size_t len = 0;
91701 +
91702 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
91703 +
91704 + arg = TYPE_ARG_TYPES(type);
91705 + // skip builtins __builtin_constant_p
91706 + if (!arg && DECL_BUILT_IN(fndecl))
91707 + return 0;
91708 +
91709 + if (TREE_CODE_CLASS(code) == tcc_type)
91710 + result = type;
91711 + else
91712 + result = DECL_RESULT(fndecl);
91713 +
91714 + gcc_assert(result != NULL_TREE);
91715 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
91716 +
91717 + if (arg == NULL_TREE) {
91718 + gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON));
91719 + arg_field = DECL_ARGUMENT_FLD(fndecl);
91720 + if (arg_field == NULL_TREE)
91721 + return 0;
91722 + arg = TREE_TYPE(arg_field);
91723 + len = add_type_codes(arg, tree_codes, len);
91724 + gcc_assert(len != 0);
91725 + return len;
91726 + }
91727 +
91728 + gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST);
91729 + while (arg && len < CODES_LIMIT) {
91730 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
91731 + arg = TREE_CHAIN(arg);
91732 + }
91733 +
91734 + gcc_assert(len != 0);
91735 + return len;
91736 +}
91737 +
91738 +static const struct size_overflow_hash *get_function_hash(tree fndecl)
91739 +{
91740 + unsigned int hash;
91741 + const struct size_overflow_hash *entry;
91742 + unsigned char tree_codes[CODES_LIMIT];
91743 + size_t len;
91744 + const char *func_name = get_asm_name(fndecl);
91745 +
91746 + len = get_function_decl(fndecl, tree_codes);
91747 + if (len == 0)
91748 + return NULL;
91749 +
91750 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
91751 +
91752 + entry = size_overflow_hash[hash];
91753 + while (entry) {
91754 + if (!strcmp(entry->name, func_name))
91755 + return entry;
91756 + entry = entry->next;
91757 + }
91758 +
91759 + return NULL;
91760 +}
91761 +
91762 +static void check_arg_type(const_tree arg)
91763 +{
91764 + const_tree type = TREE_TYPE(arg);
91765 + enum tree_code code = TREE_CODE(type);
91766 +
91767 + if (code == BOOLEAN_TYPE)
91768 + return;
91769 +
91770 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
91771 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
91772 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
91773 +}
91774 +
91775 +static unsigned int find_arg_number(const_tree arg, tree func)
91776 +{
91777 + tree var;
91778 + unsigned int argnum = 1;
91779 +
91780 + if (TREE_CODE(arg) == SSA_NAME)
91781 + arg = SSA_NAME_VAR(arg);
91782 +
91783 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
91784 + if (strcmp(NAME(arg), NAME(var))) {
91785 + argnum++;
91786 + continue;
91787 + }
91788 + check_arg_type(var);
91789 + return argnum;
91790 + }
91791 + gcc_unreachable();
91792 +}
91793 +
91794 +static tree create_new_var(tree type)
91795 +{
91796 + tree new_var = create_tmp_var(type, "cicus");
91797 +
91798 +#if BUILDING_GCC_VERSION <= 4007
91799 + add_referenced_var(new_var);
91800 + mark_sym_for_renaming(new_var);
91801 +#endif
91802 + return new_var;
91803 +}
91804 +
91805 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
91806 +{
91807 + gimple assign;
91808 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
91809 + tree type = TREE_TYPE(rhs1);
91810 + tree lhs = create_new_var(type);
91811 +
91812 + gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
91813 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
91814 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
91815 +
91816 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
91817 + update_stmt(assign);
91818 + gimple_set_plf(assign, MY_STMT, true);
91819 + return assign;
91820 +}
91821 +
91822 +static bool is_bool(const_tree node)
91823 +{
91824 + const_tree type;
91825 +
91826 + if (node == NULL_TREE)
91827 + return false;
91828 +
91829 + type = TREE_TYPE(node);
91830 + if (!INTEGRAL_TYPE_P(type))
91831 + return false;
91832 + if (TREE_CODE(type) == BOOLEAN_TYPE)
91833 + return true;
91834 + if (TYPE_PRECISION(type) == 1)
91835 + return true;
91836 + return false;
91837 +}
91838 +
91839 +static tree cast_a_tree(tree type, tree var)
91840 +{
91841 + gcc_assert(type != NULL_TREE);
91842 + gcc_assert(var != NULL_TREE);
91843 + gcc_assert(fold_convertible_p(type, var));
91844 +
91845 + return fold_convert(type, var);
91846 +}
91847 +
91848 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before)
91849 +{
91850 + gimple assign;
91851 +
91852 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
91853 + if (gsi_end_p(*gsi) && before == AFTER_STMT)
91854 + gcc_unreachable();
91855 +
91856 + if (lhs == CREATE_NEW_VAR)
91857 + lhs = create_new_var(dst_type);
91858 +
91859 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
91860 +
91861 + if (!gsi_end_p(*gsi)) {
91862 + location_t loc = gimple_location(gsi_stmt(*gsi));
91863 + gimple_set_location(assign, loc);
91864 + }
91865 +
91866 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
91867 +
91868 + if (before)
91869 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
91870 + else
91871 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
91872 + update_stmt(assign);
91873 + gimple_set_plf(assign, MY_STMT, true);
91874 +
91875 + return assign;
91876 +}
91877 +
91878 +static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
91879 +{
91880 + gimple assign;
91881 + gimple_stmt_iterator gsi;
91882 +
91883 + if (rhs == NULL_TREE)
91884 + return NULL_TREE;
91885 +
91886 + if (types_compatible_p(TREE_TYPE(rhs), size_overflow_type) && gimple_plf(stmt, MY_STMT))
91887 + return rhs;
91888 +
91889 + gsi = gsi_for_stmt(stmt);
91890 + assign = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before);
91891 + gimple_set_plf(assign, MY_STMT, true);
91892 + return gimple_get_lhs(assign);
91893 +}
91894 +
91895 +static tree cast_to_TI_type(gimple stmt, tree node)
91896 +{
91897 + gimple_stmt_iterator gsi;
91898 + gimple cast_stmt;
91899 + tree type = TREE_TYPE(node);
91900 +
91901 + if (types_compatible_p(type, intTI_type_node))
91902 + return node;
91903 +
91904 + gsi = gsi_for_stmt(stmt);
91905 + cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
91906 + gimple_set_plf(cast_stmt, MY_STMT, true);
91907 + return gimple_get_lhs(cast_stmt);
91908 +}
91909 +
91910 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
91911 +{
91912 + tree lhs;
91913 + gimple_stmt_iterator gsi;
91914 +
91915 + if (rhs1 == NULL_TREE) {
91916 + debug_gimple_stmt(oldstmt);
91917 + error("%s: rhs1 is NULL_TREE", __func__);
91918 + gcc_unreachable();
91919 + }
91920 +
91921 + switch (gimple_code(oldstmt)) {
91922 + case GIMPLE_ASM:
91923 + lhs = rhs1;
91924 + break;
91925 + case GIMPLE_CALL:
91926 + lhs = gimple_call_lhs(oldstmt);
91927 + break;
91928 + case GIMPLE_ASSIGN:
91929 + lhs = gimple_get_lhs(oldstmt);
91930 + break;
91931 + default:
91932 + debug_gimple_stmt(oldstmt);
91933 + gcc_unreachable();
91934 + }
91935 +
91936 + gsi = gsi_for_stmt(oldstmt);
91937 + pointer_set_insert(visited, oldstmt);
91938 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
91939 + basic_block next_bb, cur_bb;
91940 + const_edge e;
91941 +
91942 + gcc_assert(before == false);
91943 + gcc_assert(stmt_can_throw_internal(oldstmt));
91944 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
91945 + gcc_assert(!gsi_end_p(gsi));
91946 +
91947 + cur_bb = gimple_bb(oldstmt);
91948 + next_bb = cur_bb->next_bb;
91949 + e = find_edge(cur_bb, next_bb);
91950 + gcc_assert(e != NULL);
91951 + gcc_assert(e->flags & EDGE_FALLTHRU);
91952 +
91953 + gsi = gsi_after_labels(next_bb);
91954 + gcc_assert(!gsi_end_p(gsi));
91955 +
91956 + before = true;
91957 + oldstmt = gsi_stmt(gsi);
91958 + }
91959 +
91960 + return cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
91961 +}
91962 +
91963 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
91964 +{
91965 + gimple stmt;
91966 + gimple_stmt_iterator gsi;
91967 + tree size_overflow_type, new_var, lhs = gimple_get_lhs(oldstmt);
91968 +
91969 + if (gimple_plf(oldstmt, MY_STMT))
91970 + return lhs;
91971 +
91972 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
91973 + rhs1 = gimple_assign_rhs1(oldstmt);
91974 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
91975 + }
91976 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
91977 + rhs2 = gimple_assign_rhs2(oldstmt);
91978 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
91979 + }
91980 +
91981 + stmt = gimple_copy(oldstmt);
91982 + gimple_set_location(stmt, gimple_location(oldstmt));
91983 + gimple_set_plf(stmt, MY_STMT, true);
91984 +
91985 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
91986 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
91987 +
91988 + size_overflow_type = get_size_overflow_type(oldstmt, node);
91989 +
91990 + if (is_bool(lhs))
91991 + new_var = SSA_NAME_VAR(lhs);
91992 + else
91993 + new_var = create_new_var(size_overflow_type);
91994 + new_var = make_ssa_name(new_var, stmt);
91995 + gimple_set_lhs(stmt, new_var);
91996 +
91997 + if (rhs1 != NULL_TREE)
91998 + gimple_assign_set_rhs1(stmt, rhs1);
91999 +
92000 + if (rhs2 != NULL_TREE)
92001 + gimple_assign_set_rhs2(stmt, rhs2);
92002 +#if BUILDING_GCC_VERSION >= 4007
92003 + if (rhs3 != NULL_TREE)
92004 + gimple_assign_set_rhs3(stmt, rhs3);
92005 +#endif
92006 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
92007 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
92008 +
92009 + gsi = gsi_for_stmt(oldstmt);
92010 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
92011 + update_stmt(stmt);
92012 + pointer_set_insert(visited, oldstmt);
92013 + return gimple_get_lhs(stmt);
92014 +}
92015 +
92016 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
92017 +{
92018 + basic_block bb;
92019 + gimple phi;
92020 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
92021 + gimple_seq seq;
92022 +
92023 + bb = gsi_bb(gsi);
92024 +
92025 + phi = create_phi_node(result, bb);
92026 + seq = phi_nodes(bb);
92027 + gsi = gsi_last(seq);
92028 + gsi_remove(&gsi, false);
92029 +
92030 + gsi = gsi_for_stmt(oldstmt);
92031 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
92032 + gimple_set_bb(phi, bb);
92033 + gimple_set_plf(phi, MY_STMT, true);
92034 + return phi;
92035 +}
92036 +
92037 +static basic_block create_a_first_bb(void)
92038 +{
92039 + basic_block first_bb;
92040 +
92041 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
92042 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
92043 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
92044 + return first_bb;
92045 +}
92046 +
92047 +static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
92048 +{
92049 + basic_block bb;
92050 + const_gimple newstmt;
92051 + gimple_stmt_iterator gsi;
92052 + bool before = BEFORE_STMT;
92053 +
92054 + if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
92055 + gsi = gsi_for_stmt(get_def_stmt(arg));
92056 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
92057 + return gimple_get_lhs(newstmt);
92058 + }
92059 +
92060 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
92061 + gsi = gsi_after_labels(bb);
92062 + if (bb->index == 0) {
92063 + bb = create_a_first_bb();
92064 + gsi = gsi_start_bb(bb);
92065 + }
92066 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
92067 + return gimple_get_lhs(newstmt);
92068 +}
92069 +
92070 +static const_gimple handle_new_phi_arg(const_tree arg, tree new_var, tree new_rhs)
92071 +{
92072 + gimple newstmt;
92073 + gimple_stmt_iterator gsi;
92074 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
92075 + gimple def_newstmt = get_def_stmt(new_rhs);
92076 +
92077 + gsi_insert = gsi_insert_after;
92078 + gsi = gsi_for_stmt(def_newstmt);
92079 +
92080 + switch (gimple_code(get_def_stmt(arg))) {
92081 + case GIMPLE_PHI:
92082 + newstmt = gimple_build_assign(new_var, new_rhs);
92083 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
92084 + gsi_insert = gsi_insert_before;
92085 + break;
92086 + case GIMPLE_ASM:
92087 + case GIMPLE_CALL:
92088 + newstmt = gimple_build_assign(new_var, new_rhs);
92089 + break;
92090 + case GIMPLE_ASSIGN:
92091 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
92092 + break;
92093 + default:
92094 + /* unknown gimple_code (handle_build_new_phi_arg) */
92095 + gcc_unreachable();
92096 + }
92097 +
92098 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
92099 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
92100 + gimple_set_plf(newstmt, MY_STMT, true);
92101 + update_stmt(newstmt);
92102 + return newstmt;
92103 +}
92104 +
92105 +static tree build_new_phi_arg(struct pointer_set_t *visited, tree size_overflow_type, tree arg, tree new_var)
92106 +{
92107 + const_gimple newstmt;
92108 + gimple def_stmt;
92109 + tree new_rhs;
92110 +
92111 + new_rhs = expand(visited, arg);
92112 + if (new_rhs == NULL_TREE)
92113 + return NULL_TREE;
92114 +
92115 + def_stmt = get_def_stmt(new_rhs);
92116 + if (gimple_code(def_stmt) == GIMPLE_NOP)
92117 + return NULL_TREE;
92118 + new_rhs = cast_to_new_size_overflow_type(def_stmt, new_rhs, size_overflow_type, AFTER_STMT);
92119 +
92120 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
92121 + return gimple_get_lhs(newstmt);
92122 +}
92123 +
92124 +static tree build_new_phi(struct pointer_set_t *visited, tree orig_result)
92125 +{
92126 + gimple phi, oldstmt = get_def_stmt(orig_result);
92127 + tree new_result, size_overflow_type;
92128 + unsigned int i;
92129 + unsigned int n = gimple_phi_num_args(oldstmt);
92130 +
92131 + size_overflow_type = get_size_overflow_type(oldstmt, orig_result);
92132 +
92133 + new_result = create_new_var(size_overflow_type);
92134 +
92135 + pointer_set_insert(visited, oldstmt);
92136 + phi = overflow_create_phi_node(oldstmt, new_result);
92137 + for (i = 0; i < n; i++) {
92138 + tree arg, lhs;
92139 +
92140 + arg = gimple_phi_arg_def(oldstmt, i);
92141 + if (is_gimple_constant(arg))
92142 + arg = cast_a_tree(size_overflow_type, arg);
92143 + lhs = build_new_phi_arg(visited, size_overflow_type, arg, new_result);
92144 + if (lhs == NULL_TREE)
92145 + lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_result, i);
92146 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
92147 + }
92148 +
92149 + update_stmt(phi);
92150 + return gimple_phi_result(phi);
92151 +}
92152 +
92153 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
92154 +{
92155 + const_gimple assign;
92156 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
92157 + tree origtype = TREE_TYPE(orig_rhs);
92158 +
92159 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
92160 +
92161 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
92162 + return gimple_get_lhs(assign);
92163 +}
92164 +
92165 +static void change_rhs1(gimple stmt, tree new_rhs1)
92166 +{
92167 + tree assign_rhs;
92168 + const_tree rhs = gimple_assign_rhs1(stmt);
92169 +
92170 + assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
92171 + gimple_assign_set_rhs1(stmt, assign_rhs);
92172 + update_stmt(stmt);
92173 +}
92174 +
92175 +static bool check_mode_type(const_gimple stmt)
92176 +{
92177 + const_tree lhs = gimple_get_lhs(stmt);
92178 + const_tree lhs_type = TREE_TYPE(lhs);
92179 + const_tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
92180 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
92181 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
92182 +
92183 + if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
92184 + return false;
92185 +
92186 + if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
92187 + return false;
92188 +
92189 + // skip lhs check on signed SI -> HI cast or signed SI -> QI cast
92190 + if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
92191 + return false;
92192 +
92193 + return true;
92194 +}
92195 +
92196 +static bool check_undefined_integer_operation(const_gimple stmt)
92197 +{
92198 + const_gimple def_stmt;
92199 + const_tree lhs = gimple_get_lhs(stmt);
92200 + const_tree rhs1 = gimple_assign_rhs1(stmt);
92201 + const_tree rhs1_type = TREE_TYPE(rhs1);
92202 + const_tree lhs_type = TREE_TYPE(lhs);
92203 +
92204 + if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
92205 + return false;
92206 +
92207 + def_stmt = get_def_stmt(rhs1);
92208 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
92209 + return false;
92210 +
92211 + if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
92212 + return false;
92213 + return true;
92214 +}
92215 +
92216 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
92217 +{
92218 + const_tree rhs1, lhs, rhs1_type, lhs_type;
92219 + enum machine_mode lhs_mode, rhs_mode;
92220 + gimple def_stmt = get_def_stmt(no_const_rhs);
92221 +
92222 + if (!gimple_assign_cast_p(def_stmt))
92223 + return false;
92224 +
92225 + rhs1 = gimple_assign_rhs1(def_stmt);
92226 + lhs = gimple_get_lhs(def_stmt);
92227 + rhs1_type = TREE_TYPE(rhs1);
92228 + lhs_type = TREE_TYPE(lhs);
92229 + rhs_mode = TYPE_MODE(rhs1_type);
92230 + lhs_mode = TYPE_MODE(lhs_type);
92231 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
92232 + return false;
92233 +
92234 + return true;
92235 +}
92236 +
92237 +static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
92238 +{
92239 + tree rhs1 = gimple_assign_rhs1(stmt);
92240 + tree lhs = gimple_get_lhs(stmt);
92241 + const_tree rhs1_type = TREE_TYPE(rhs1);
92242 + const_tree lhs_type = TREE_TYPE(lhs);
92243 +
92244 + if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
92245 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92246 +
92247 + return create_assign(visited, stmt, rhs1, AFTER_STMT);
92248 +}
92249 +
92250 +static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt)
92251 +{
92252 + tree size_overflow_type, lhs = gimple_get_lhs(stmt);
92253 + tree new_rhs1 = NULL_TREE;
92254 + tree rhs1 = gimple_assign_rhs1(stmt);
92255 + const_tree rhs1_type = TREE_TYPE(rhs1);
92256 + const_tree lhs_type = TREE_TYPE(lhs);
92257 +
92258 + if (gimple_plf(stmt, MY_STMT))
92259 + return lhs;
92260 +
92261 + if (TREE_CODE(rhs1_type) == POINTER_TYPE)
92262 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92263 +
92264 + new_rhs1 = expand(visited, rhs1);
92265 +
92266 + if (new_rhs1 == NULL_TREE)
92267 + return create_cast_assign(visited, stmt);
92268 +
92269 + if (gimple_plf(stmt, NO_CAST_CHECK))
92270 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
92271 +
92272 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
92273 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
92274 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
92275 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
92276 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92277 + }
92278 +
92279 + if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt))
92280 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
92281 +
92282 + if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
92283 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
92284 +
92285 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
92286 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
92287 +
92288 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
92289 +
92290 + change_rhs1(stmt, new_rhs1);
92291 +
92292 + if (!check_mode_type(stmt))
92293 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92294 +
92295 + size_overflow_type = get_size_overflow_type(stmt, lhs);
92296 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
92297 +
92298 + check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, BEFORE_STMT);
92299 +
92300 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92301 +}
92302 +
92303 +static tree handle_unary_ops(struct pointer_set_t *visited, gimple stmt)
92304 +{
92305 + tree rhs1, lhs = gimple_get_lhs(stmt);
92306 + gimple def_stmt = get_def_stmt(lhs);
92307 +
92308 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
92309 + rhs1 = gimple_assign_rhs1(def_stmt);
92310 +
92311 + if (is_gimple_constant(rhs1))
92312 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
92313 +
92314 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
92315 + switch (TREE_CODE(rhs1)) {
92316 + case SSA_NAME:
92317 + return handle_unary_rhs(visited, def_stmt);
92318 + case ARRAY_REF:
92319 + case BIT_FIELD_REF:
92320 + case ADDR_EXPR:
92321 + case COMPONENT_REF:
92322 + case INDIRECT_REF:
92323 +#if BUILDING_GCC_VERSION >= 4006
92324 + case MEM_REF:
92325 +#endif
92326 + case TARGET_MEM_REF:
92327 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
92328 + case PARM_DECL:
92329 + case VAR_DECL:
92330 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92331 +
92332 + default:
92333 + debug_gimple_stmt(def_stmt);
92334 + debug_tree(rhs1);
92335 + gcc_unreachable();
92336 + }
92337 +}
92338 +
92339 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
92340 +{
92341 + gimple cond_stmt;
92342 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
92343 +
92344 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
92345 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
92346 + update_stmt(cond_stmt);
92347 +}
92348 +
92349 +static tree create_string_param(tree string)
92350 +{
92351 + tree i_type, a_type;
92352 + const int length = TREE_STRING_LENGTH(string);
92353 +
92354 + gcc_assert(length > 0);
92355 +
92356 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
92357 + a_type = build_array_type(char_type_node, i_type);
92358 +
92359 + TREE_TYPE(string) = a_type;
92360 + TREE_CONSTANT(string) = 1;
92361 + TREE_READONLY(string) = 1;
92362 +
92363 + return build1(ADDR_EXPR, ptr_type_node, string);
92364 +}
92365 +
92366 +static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
92367 +{
92368 + gimple func_stmt;
92369 + const_gimple def_stmt;
92370 + const_tree loc_line;
92371 + tree loc_file, ssa_name, current_func;
92372 + expanded_location xloc;
92373 + char *ssa_name_buf;
92374 + int len;
92375 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
92376 +
92377 + def_stmt = get_def_stmt(arg);
92378 + xloc = expand_location(gimple_location(def_stmt));
92379 +
92380 + if (!gimple_has_location(def_stmt)) {
92381 + xloc = expand_location(gimple_location(stmt));
92382 + if (!gimple_has_location(stmt))
92383 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
92384 + }
92385 +
92386 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
92387 +
92388 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
92389 + loc_file = create_string_param(loc_file);
92390 +
92391 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
92392 + current_func = create_string_param(current_func);
92393 +
92394 + gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
92395 + call_count++;
92396 + len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
92397 + gcc_assert(len > 0);
92398 + ssa_name = build_string(len + 1, ssa_name_buf);
92399 + free(ssa_name_buf);
92400 + ssa_name = create_string_param(ssa_name);
92401 +
92402 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
92403 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
92404 +
92405 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
92406 +}
92407 +
92408 +static void __unused print_the_code_insertions(const_gimple stmt)
92409 +{
92410 + location_t loc = gimple_location(stmt);
92411 +
92412 + inform(loc, "Integer size_overflow check applied here.");
92413 +}
92414 +
92415 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
92416 +{
92417 + basic_block cond_bb, join_bb, bb_true;
92418 + edge e;
92419 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
92420 +
92421 + cond_bb = gimple_bb(stmt);
92422 + if (before)
92423 + gsi_prev(&gsi);
92424 + if (gsi_end_p(gsi))
92425 + e = split_block_after_labels(cond_bb);
92426 + else
92427 + e = split_block(cond_bb, gsi_stmt(gsi));
92428 + cond_bb = e->src;
92429 + join_bb = e->dest;
92430 + e->flags = EDGE_FALSE_VALUE;
92431 + e->probability = REG_BR_PROB_BASE;
92432 +
92433 + bb_true = create_empty_bb(cond_bb);
92434 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
92435 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
92436 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
92437 +
92438 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
92439 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
92440 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
92441 +
92442 + if (current_loops != NULL) {
92443 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
92444 + add_bb_to_loop(bb_true, cond_bb->loop_father);
92445 + }
92446 +
92447 + insert_cond(cond_bb, arg, cond_code, type_value);
92448 + insert_cond_result(bb_true, stmt, arg, min);
92449 +
92450 +// print_the_code_insertions(stmt);
92451 +}
92452 +
92453 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
92454 +{
92455 + const_tree rhs_type = TREE_TYPE(rhs);
92456 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
92457 +
92458 + gcc_assert(rhs_type != NULL_TREE);
92459 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
92460 + return;
92461 +
92462 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
92463 +
92464 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
92465 + // typemax (-1) < typemin (0)
92466 + if (TREE_OVERFLOW(type_max))
92467 + return;
92468 +
92469 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
92470 +
92471 + cast_rhs_type = TREE_TYPE(cast_rhs);
92472 + type_max_type = TREE_TYPE(type_max);
92473 + type_min_type = TREE_TYPE(type_min);
92474 + gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
92475 + gcc_assert(types_compatible_p(type_max_type, type_min_type));
92476 +
92477 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
92478 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
92479 +}
92480 +
92481 +static tree get_size_overflow_type_for_intentional_overflow(gimple def_stmt, tree change_rhs)
92482 +{
92483 + gimple change_rhs_def_stmt;
92484 + tree lhs = gimple_get_lhs(def_stmt);
92485 + tree lhs_type = TREE_TYPE(lhs);
92486 + tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
92487 + tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
92488 +
92489 + if (change_rhs == NULL_TREE)
92490 + return get_size_overflow_type(def_stmt, lhs);
92491 +
92492 + change_rhs_def_stmt = get_def_stmt(change_rhs);
92493 +
92494 + if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
92495 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
92496 +
92497 + if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
92498 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
92499 +
92500 + if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
92501 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
92502 +
92503 + if (!types_compatible_p(lhs_type, rhs1_type) || !types_compatible_p(rhs1_type, rhs2_type)) {
92504 + debug_gimple_stmt(def_stmt);
92505 + gcc_unreachable();
92506 + }
92507 +
92508 + return get_size_overflow_type(def_stmt, lhs);
92509 +}
92510 +
92511 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
92512 +{
92513 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
92514 + return false;
92515 + if (!is_gimple_constant(rhs))
92516 + return false;
92517 + return true;
92518 +}
92519 +
92520 +static bool is_subtraction_special(const_gimple stmt)
92521 +{
92522 + gimple rhs1_def_stmt, rhs2_def_stmt;
92523 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
92524 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
92525 + const_tree rhs1 = gimple_assign_rhs1(stmt);
92526 + const_tree rhs2 = gimple_assign_rhs2(stmt);
92527 +
92528 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
92529 + return false;
92530 +
92531 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
92532 +
92533 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
92534 + return false;
92535 +
92536 + rhs1_def_stmt = get_def_stmt(rhs1);
92537 + rhs2_def_stmt = get_def_stmt(rhs2);
92538 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
92539 + return false;
92540 +
92541 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
92542 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
92543 + rhs1_def_stmt_lhs = gimple_get_lhs(rhs1_def_stmt);
92544 + rhs2_def_stmt_lhs = gimple_get_lhs(rhs2_def_stmt);
92545 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
92546 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
92547 + rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
92548 + rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
92549 + if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
92550 + return false;
92551 + if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
92552 + return false;
92553 +
92554 + gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
92555 + gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
92556 + return true;
92557 +}
92558 +
92559 +static tree get_def_stmt_rhs(const_tree var)
92560 +{
92561 + tree rhs1, def_stmt_rhs1;
92562 + gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
92563 +
92564 + def_stmt = get_def_stmt(var);
92565 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
92566 +
92567 + rhs1 = gimple_assign_rhs1(def_stmt);
92568 + rhs1_def_stmt = get_def_stmt(rhs1);
92569 + if (!gimple_assign_cast_p(rhs1_def_stmt))
92570 + return rhs1;
92571 +
92572 + def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
92573 + def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
92574 +
92575 + switch (gimple_code(def_stmt_rhs1_def_stmt)) {
92576 + case GIMPLE_CALL:
92577 + case GIMPLE_NOP:
92578 + case GIMPLE_ASM:
92579 + return def_stmt_rhs1;
92580 + case GIMPLE_ASSIGN:
92581 + return rhs1;
92582 + default:
92583 + debug_gimple_stmt(def_stmt_rhs1_def_stmt);
92584 + gcc_unreachable();
92585 + }
92586 +}
92587 +
92588 +static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs)
92589 +{
92590 + tree new_rhs1, new_rhs2;
92591 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
92592 + gimple assign, stmt = get_def_stmt(lhs);
92593 + tree rhs1 = gimple_assign_rhs1(stmt);
92594 + tree rhs2 = gimple_assign_rhs2(stmt);
92595 +
92596 + if (!is_subtraction_special(stmt))
92597 + return NULL_TREE;
92598 +
92599 + new_rhs1 = expand(visited, rhs1);
92600 + new_rhs2 = expand(visited, rhs2);
92601 +
92602 + new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
92603 + new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
92604 +
92605 + if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
92606 + new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
92607 + new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
92608 + }
92609 +
92610 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
92611 + new_lhs = gimple_get_lhs(assign);
92612 + check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
92613 +
92614 + return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
92615 +}
92616 +
92617 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
92618 +{
92619 + const_gimple def_stmt;
92620 +
92621 + if (TREE_CODE(rhs) != SSA_NAME)
92622 + return false;
92623 +
92624 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
92625 + return false;
92626 +
92627 + def_stmt = get_def_stmt(rhs);
92628 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
92629 + return false;
92630 +
92631 + return true;
92632 +}
92633 +
92634 +static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs1, tree new_rhs2)
92635 +{
92636 + tree new_rhs, size_overflow_type, orig_rhs;
92637 + void (*gimple_assign_set_rhs)(gimple, tree);
92638 + tree rhs1 = gimple_assign_rhs1(stmt);
92639 + tree rhs2 = gimple_assign_rhs2(stmt);
92640 + tree lhs = gimple_get_lhs(stmt);
92641 +
92642 + if (change_rhs == NULL_TREE)
92643 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92644 +
92645 + if (new_rhs2 == NULL_TREE) {
92646 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs1);
92647 + new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
92648 + orig_rhs = rhs1;
92649 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
92650 + } else {
92651 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs2);
92652 + new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
92653 + orig_rhs = rhs2;
92654 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
92655 + }
92656 +
92657 + change_rhs = cast_to_new_size_overflow_type(stmt, change_rhs, size_overflow_type, BEFORE_STMT);
92658 +
92659 + if (check_overflow)
92660 + check_size_overflow(stmt, size_overflow_type, change_rhs, orig_rhs, BEFORE_STMT);
92661 +
92662 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
92663 + gimple_assign_set_rhs(stmt, new_rhs);
92664 + update_stmt(stmt);
92665 +
92666 + return create_assign(visited, stmt, lhs, AFTER_STMT);
92667 +}
92668 +
92669 +static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs)
92670 +{
92671 + tree rhs1, rhs2, new_lhs;
92672 + gimple def_stmt = get_def_stmt(lhs);
92673 + tree new_rhs1 = NULL_TREE;
92674 + tree new_rhs2 = NULL_TREE;
92675 +
92676 + rhs1 = gimple_assign_rhs1(def_stmt);
92677 + rhs2 = gimple_assign_rhs2(def_stmt);
92678 +
92679 + /* no DImode/TImode division in the 32/64 bit kernel */
92680 + switch (gimple_assign_rhs_code(def_stmt)) {
92681 + case RDIV_EXPR:
92682 + case TRUNC_DIV_EXPR:
92683 + case CEIL_DIV_EXPR:
92684 + case FLOOR_DIV_EXPR:
92685 + case ROUND_DIV_EXPR:
92686 + case TRUNC_MOD_EXPR:
92687 + case CEIL_MOD_EXPR:
92688 + case FLOOR_MOD_EXPR:
92689 + case ROUND_MOD_EXPR:
92690 + case EXACT_DIV_EXPR:
92691 + case POINTER_PLUS_EXPR:
92692 + case BIT_AND_EXPR:
92693 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
92694 + default:
92695 + break;
92696 + }
92697 +
92698 + new_lhs = handle_integer_truncation(visited, lhs);
92699 + if (new_lhs != NULL_TREE)
92700 + return new_lhs;
92701 +
92702 + if (TREE_CODE(rhs1) == SSA_NAME)
92703 + new_rhs1 = expand(visited, rhs1);
92704 + if (TREE_CODE(rhs2) == SSA_NAME)
92705 + new_rhs2 = expand(visited, rhs2);
92706 +
92707 + if (is_a_neg_overflow(def_stmt, rhs2))
92708 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
92709 + if (is_a_neg_overflow(def_stmt, rhs1))
92710 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
92711 +
92712 + if (is_a_constant_overflow(def_stmt, rhs2))
92713 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, new_rhs1, NULL_TREE);
92714 + if (is_a_constant_overflow(def_stmt, rhs1))
92715 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2);
92716 +
92717 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
92718 +}
92719 +
92720 +#if BUILDING_GCC_VERSION >= 4007
92721 +static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs)
92722 +{
92723 + if (is_gimple_constant(rhs))
92724 + return cast_a_tree(size_overflow_type, rhs);
92725 + if (TREE_CODE(rhs) != SSA_NAME)
92726 + return NULL_TREE;
92727 + return expand(visited, rhs);
92728 +}
92729 +
92730 +static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs)
92731 +{
92732 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
92733 + gimple def_stmt = get_def_stmt(lhs);
92734 +
92735 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
92736 +
92737 + rhs1 = gimple_assign_rhs1(def_stmt);
92738 + rhs2 = gimple_assign_rhs2(def_stmt);
92739 + rhs3 = gimple_assign_rhs3(def_stmt);
92740 + new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1);
92741 + new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2);
92742 + new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3);
92743 +
92744 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
92745 +}
92746 +#endif
92747 +
92748 +static tree get_size_overflow_type(gimple stmt, const_tree node)
92749 +{
92750 + const_tree type;
92751 + tree new_type;
92752 +
92753 + gcc_assert(node != NULL_TREE);
92754 +
92755 + type = TREE_TYPE(node);
92756 +
92757 + if (gimple_plf(stmt, MY_STMT))
92758 + return TREE_TYPE(node);
92759 +
92760 + switch (TYPE_MODE(type)) {
92761 + case QImode:
92762 + new_type = intHI_type_node;
92763 + break;
92764 + case HImode:
92765 + new_type = intSI_type_node;
92766 + break;
92767 + case SImode:
92768 + new_type = intDI_type_node;
92769 + break;
92770 + case DImode:
92771 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
92772 + new_type = intDI_type_node;
92773 + else
92774 + new_type = intTI_type_node;
92775 + break;
92776 + default:
92777 + debug_tree((tree)node);
92778 + error("%s: unsupported gcc configuration.", __func__);
92779 + gcc_unreachable();
92780 + }
92781 +
92782 + if (TYPE_QUALS(type) != 0)
92783 + return build_qualified_type(new_type, TYPE_QUALS(type));
92784 + return new_type;
92785 +}
92786 +
92787 +static tree expand_visited(gimple def_stmt)
92788 +{
92789 + const_gimple next_stmt;
92790 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
92791 +
92792 + gsi_next(&gsi);
92793 + next_stmt = gsi_stmt(gsi);
92794 +
92795 + gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
92796 +
92797 + switch (gimple_code(next_stmt)) {
92798 + case GIMPLE_ASSIGN:
92799 + return gimple_get_lhs(next_stmt);
92800 + case GIMPLE_PHI:
92801 + return gimple_phi_result(next_stmt);
92802 + case GIMPLE_CALL:
92803 + return gimple_call_lhs(next_stmt);
92804 + default:
92805 + return NULL_TREE;
92806 + }
92807 +}
92808 +
92809 +static tree expand(struct pointer_set_t *visited, tree lhs)
92810 +{
92811 + gimple def_stmt;
92812 + enum tree_code code = TREE_CODE(TREE_TYPE(lhs));
92813 +
92814 + if (is_gimple_constant(lhs))
92815 + return NULL_TREE;
92816 +
92817 + if (TREE_CODE(lhs) == ADDR_EXPR)
92818 + return NULL_TREE;
92819 +
92820 + if (code == REAL_TYPE)
92821 + return NULL_TREE;
92822 +
92823 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
92824 +
92825 + def_stmt = get_def_stmt(lhs);
92826 +
92827 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
92828 + return NULL_TREE;
92829 +
92830 + if (gimple_plf(def_stmt, MY_STMT))
92831 + return lhs;
92832 +
92833 + if (pointer_set_contains(visited, def_stmt))
92834 + return expand_visited(def_stmt);
92835 +
92836 + switch (gimple_code(def_stmt)) {
92837 + case GIMPLE_PHI:
92838 + return build_new_phi(visited, lhs);
92839 + case GIMPLE_CALL:
92840 + case GIMPLE_ASM:
92841 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
92842 + case GIMPLE_ASSIGN:
92843 + switch (gimple_num_ops(def_stmt)) {
92844 + case 2:
92845 + return handle_unary_ops(visited, def_stmt);
92846 + case 3:
92847 + return handle_binary_ops(visited, lhs);
92848 +#if BUILDING_GCC_VERSION >= 4007
92849 + case 4:
92850 + return handle_ternary_ops(visited, lhs);
92851 +#endif
92852 + }
92853 + default:
92854 + debug_gimple_stmt(def_stmt);
92855 + error("%s: unknown gimple code", __func__);
92856 + gcc_unreachable();
92857 + }
92858 +}
92859 +
92860 +static void change_function_arg(gimple stmt, const_tree origarg, unsigned int argnum, tree newarg)
92861 +{
92862 + const_gimple assign;
92863 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
92864 + tree origtype = TREE_TYPE(origarg);
92865 +
92866 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
92867 +
92868 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
92869 +
92870 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
92871 + update_stmt(stmt);
92872 +}
92873 +
92874 +static bool get_function_arg(unsigned int* argnum, const_tree fndecl)
92875 +{
92876 + const char *origid;
92877 + tree arg;
92878 + const_tree origarg;
92879 +
92880 + if (!DECL_ABSTRACT_ORIGIN(fndecl))
92881 + return true;
92882 +
92883 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
92884 + while (origarg && *argnum) {
92885 + (*argnum)--;
92886 + origarg = TREE_CHAIN(origarg);
92887 + }
92888 +
92889 + gcc_assert(*argnum == 0);
92890 +
92891 + gcc_assert(origarg != NULL_TREE);
92892 + origid = NAME(origarg);
92893 + *argnum = 0;
92894 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
92895 + if (!strcmp(origid, NAME(arg)))
92896 + return true;
92897 + (*argnum)++;
92898 + }
92899 + return false;
92900 +}
92901 +
92902 +static bool skip_types(const_tree var)
92903 +{
92904 + const_tree type;
92905 +
92906 + switch (TREE_CODE(var)) {
92907 + case ADDR_EXPR:
92908 +#if BUILDING_GCC_VERSION >= 4006
92909 + case MEM_REF:
92910 +#endif
92911 + case ARRAY_REF:
92912 + case BIT_FIELD_REF:
92913 + case INDIRECT_REF:
92914 + case TARGET_MEM_REF:
92915 + case VAR_DECL:
92916 + return true;
92917 + default:
92918 + break;
92919 + }
92920 +
92921 + type = TREE_TYPE(TREE_TYPE(var));
92922 + if (!type)
92923 + return false;
92924 + switch (TREE_CODE(type)) {
92925 + case RECORD_TYPE:
92926 + return true;
92927 + default:
92928 + break;
92929 + }
92930 +
92931 + return false;
92932 +}
92933 +
92934 +static bool walk_phi(struct pointer_set_t *visited, const_tree result)
92935 +{
92936 + gimple phi = get_def_stmt(result);
92937 + unsigned int i, n = gimple_phi_num_args(phi);
92938 +
92939 + if (!phi)
92940 + return false;
92941 +
92942 + pointer_set_insert(visited, phi);
92943 + for (i = 0; i < n; i++) {
92944 + const_tree arg = gimple_phi_arg_def(phi, i);
92945 + if (pre_expand(visited, arg))
92946 + return true;
92947 + }
92948 + return false;
92949 +}
92950 +
92951 +static bool walk_unary_ops(struct pointer_set_t *visited, const_tree lhs)
92952 +{
92953 + gimple def_stmt = get_def_stmt(lhs);
92954 + const_tree rhs;
92955 +
92956 + if (!def_stmt)
92957 + return false;
92958 +
92959 + rhs = gimple_assign_rhs1(def_stmt);
92960 + if (pre_expand(visited, rhs))
92961 + return true;
92962 + return false;
92963 +}
92964 +
92965 +static bool walk_binary_ops(struct pointer_set_t *visited, const_tree lhs)
92966 +{
92967 + bool rhs1_found, rhs2_found;
92968 + gimple def_stmt = get_def_stmt(lhs);
92969 + const_tree rhs1, rhs2;
92970 +
92971 + if (!def_stmt)
92972 + return false;
92973 +
92974 + rhs1 = gimple_assign_rhs1(def_stmt);
92975 + rhs2 = gimple_assign_rhs2(def_stmt);
92976 + rhs1_found = pre_expand(visited, rhs1);
92977 + rhs2_found = pre_expand(visited, rhs2);
92978 +
92979 + return rhs1_found || rhs2_found;
92980 +}
92981 +
92982 +static const_tree search_field_decl(const_tree comp_ref)
92983 +{
92984 + const_tree field = NULL_TREE;
92985 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
92986 +
92987 + for (i = 0; i < len; i++) {
92988 + field = TREE_OPERAND(comp_ref, i);
92989 + if (TREE_CODE(field) == FIELD_DECL)
92990 + break;
92991 + }
92992 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
92993 + return field;
92994 +}
92995 +
92996 +static enum marked mark_status(const_tree fndecl, unsigned int argnum)
92997 +{
92998 + const_tree attr, p;
92999 +
93000 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl));
93001 + if (!attr || !TREE_VALUE(attr))
93002 + return MARKED_NO;
93003 +
93004 + p = TREE_VALUE(attr);
93005 + if (!TREE_INT_CST_LOW(TREE_VALUE(p)))
93006 + return MARKED_NOT_INTENTIONAL;
93007 +
93008 + do {
93009 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p)))
93010 + return MARKED_YES;
93011 + p = TREE_CHAIN(p);
93012 + } while (p);
93013 +
93014 + return MARKED_NO;
93015 +}
93016 +
93017 +static void print_missing_msg(tree func, unsigned int argnum)
93018 +{
93019 + unsigned int new_hash;
93020 + size_t len;
93021 + unsigned char tree_codes[CODES_LIMIT];
93022 + location_t loc = DECL_SOURCE_LOCATION(func);
93023 + const char *curfunc = get_asm_name(func);
93024 +
93025 + len = get_function_decl(func, tree_codes);
93026 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
93027 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash);
93028 +}
93029 +
93030 +static unsigned int search_missing_attribute(const_tree arg)
93031 +{
93032 + const_tree type = TREE_TYPE(arg);
93033 + tree func = get_original_function_decl(current_function_decl);
93034 + unsigned int argnum;
93035 + const struct size_overflow_hash *hash;
93036 +
93037 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
93038 +
93039 + if (TREE_CODE(type) == POINTER_TYPE)
93040 + return 0;
93041 +
93042 + argnum = find_arg_number(arg, func);
93043 + if (argnum == 0)
93044 + return 0;
93045 +
93046 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
93047 + return argnum;
93048 +
93049 + hash = get_function_hash(func);
93050 + if (!hash || !(hash->param & (1U << argnum))) {
93051 + print_missing_msg(func, argnum);
93052 + return 0;
93053 + }
93054 + return argnum;
93055 +}
93056 +
93057 +static bool is_already_marked(const_tree lhs)
93058 +{
93059 + unsigned int argnum;
93060 + const_tree fndecl;
93061 +
93062 + argnum = search_missing_attribute(lhs);
93063 + fndecl = get_original_function_decl(current_function_decl);
93064 + if (argnum && mark_status(fndecl, argnum) == MARKED_YES)
93065 + return true;
93066 + return false;
93067 +}
93068 +
93069 +static bool pre_expand(struct pointer_set_t *visited, const_tree lhs)
93070 +{
93071 + const_gimple def_stmt;
93072 +
93073 + if (is_gimple_constant(lhs))
93074 + return false;
93075 +
93076 + if (skip_types(lhs))
93077 + return false;
93078 +
93079 + // skip char type (FIXME: only kernel)
93080 + if (TYPE_MODE(TREE_TYPE(lhs)) == QImode)
93081 + return false;
93082 +
93083 + if (TREE_CODE(lhs) == PARM_DECL)
93084 + return is_already_marked(lhs);
93085 +
93086 + if (TREE_CODE(lhs) == COMPONENT_REF) {
93087 + const_tree field, attr;
93088 +
93089 + field = search_field_decl(lhs);
93090 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field));
93091 + if (!attr || !TREE_VALUE(attr))
93092 + return false;
93093 + return true;
93094 + }
93095 +
93096 + def_stmt = get_def_stmt(lhs);
93097 +
93098 + if (!def_stmt)
93099 + return false;
93100 +
93101 + if (pointer_set_contains(visited, def_stmt))
93102 + return false;
93103 +
93104 + switch (gimple_code(def_stmt)) {
93105 + case GIMPLE_NOP:
93106 + if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL)
93107 + return is_already_marked(lhs);
93108 + return false;
93109 + case GIMPLE_PHI:
93110 + return walk_phi(visited, lhs);
93111 + case GIMPLE_CALL:
93112 + case GIMPLE_ASM:
93113 + return false;
93114 + case GIMPLE_ASSIGN:
93115 + switch (gimple_num_ops(def_stmt)) {
93116 + case 2:
93117 + return walk_unary_ops(visited, lhs);
93118 + case 3:
93119 + return walk_binary_ops(visited, lhs);
93120 + }
93121 + default:
93122 + debug_gimple_stmt((gimple)def_stmt);
93123 + error("%s: unknown gimple code", __func__);
93124 + gcc_unreachable();
93125 + }
93126 +}
93127 +
93128 +static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum)
93129 +{
93130 + struct pointer_set_t *visited;
93131 + bool is_found;
93132 + enum marked is_marked;
93133 + location_t loc;
93134 +
93135 + visited = pointer_set_create();
93136 + is_found = pre_expand(visited, arg);
93137 + pointer_set_destroy(visited);
93138 +
93139 + is_marked = mark_status(fndecl, argnum + 1);
93140 + if ((is_found && is_marked == MARKED_YES) || is_marked == MARKED_NOT_INTENTIONAL)
93141 + return true;
93142 +
93143 + if (is_found) {
93144 + loc = DECL_SOURCE_LOCATION(fndecl);
93145 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1);
93146 + return true;
93147 + }
93148 + return false;
93149 +}
93150 +
93151 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
93152 +{
93153 + struct pointer_set_t *visited;
93154 + tree arg, newarg;
93155 + bool match;
93156 +
93157 + match = get_function_arg(&argnum, fndecl);
93158 + if (!match)
93159 + return;
93160 + gcc_assert(gimple_call_num_args(stmt) > argnum);
93161 + arg = gimple_call_arg(stmt, argnum);
93162 + if (arg == NULL_TREE)
93163 + return;
93164 +
93165 + if (is_gimple_constant(arg))
93166 + return;
93167 +
93168 + if (search_attributes(fndecl, arg, argnum))
93169 + return;
93170 +
93171 + if (TREE_CODE(arg) != SSA_NAME)
93172 + return;
93173 +
93174 + check_arg_type(arg);
93175 +
93176 + visited = pointer_set_create();
93177 + newarg = expand(visited, arg);
93178 + pointer_set_destroy(visited);
93179 +
93180 + if (newarg == NULL_TREE)
93181 + return;
93182 +
93183 + change_function_arg(stmt, arg, argnum, newarg);
93184 +
93185 + check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, BEFORE_STMT);
93186 +}
93187 +
93188 +static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl)
93189 +{
93190 + tree p = TREE_VALUE(attr);
93191 + do {
93192 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
93193 + p = TREE_CHAIN(p);
93194 + } while (p);
93195 +}
93196 +
93197 +static void handle_function_by_hash(gimple stmt, tree fndecl)
93198 +{
93199 + tree orig_fndecl;
93200 + unsigned int num;
93201 + const struct size_overflow_hash *hash;
93202 +
93203 + orig_fndecl = get_original_function_decl(fndecl);
93204 + if (C_DECL_IMPLICIT(orig_fndecl))
93205 + return;
93206 + hash = get_function_hash(orig_fndecl);
93207 + if (!hash)
93208 + return;
93209 +
93210 + for (num = 1; num <= MAX_PARAM; num++)
93211 + if (hash->param & (1U << num))
93212 + handle_function_arg(stmt, fndecl, num - 1);
93213 +}
93214 +
93215 +static void set_plf_false(void)
93216 +{
93217 + basic_block bb;
93218 +
93219 + FOR_ALL_BB(bb) {
93220 + gimple_stmt_iterator si;
93221 +
93222 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
93223 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
93224 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
93225 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
93226 + }
93227 +}
93228 +
93229 +static unsigned int handle_function(void)
93230 +{
93231 + basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
93232 +
93233 + set_plf_false();
93234 +
93235 + do {
93236 + gimple_stmt_iterator gsi;
93237 + next = bb->next_bb;
93238 +
93239 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
93240 + tree fndecl, attr;
93241 + gimple stmt = gsi_stmt(gsi);
93242 +
93243 + if (!(is_gimple_call(stmt)))
93244 + continue;
93245 + fndecl = gimple_call_fndecl(stmt);
93246 + if (fndecl == NULL_TREE)
93247 + continue;
93248 + if (gimple_call_num_args(stmt) == 0)
93249 + continue;
93250 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
93251 + if (!attr || !TREE_VALUE(attr))
93252 + handle_function_by_hash(stmt, fndecl);
93253 + else
93254 + handle_function_by_attribute(stmt, attr, fndecl);
93255 + gsi = gsi_for_stmt(stmt);
93256 + next = gimple_bb(stmt)->next_bb;
93257 + }
93258 + bb = next;
93259 + } while (bb);
93260 + return 0;
93261 +}
93262 +
93263 +static struct gimple_opt_pass size_overflow_pass = {
93264 + .pass = {
93265 + .type = GIMPLE_PASS,
93266 + .name = "size_overflow",
93267 +#if BUILDING_GCC_VERSION >= 4008
93268 + .optinfo_flags = OPTGROUP_NONE,
93269 +#endif
93270 + .gate = NULL,
93271 + .execute = handle_function,
93272 + .sub = NULL,
93273 + .next = NULL,
93274 + .static_pass_number = 0,
93275 + .tv_id = TV_NONE,
93276 + .properties_required = PROP_cfg,
93277 + .properties_provided = 0,
93278 + .properties_destroyed = 0,
93279 + .todo_flags_start = 0,
93280 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
93281 + }
93282 +};
93283 +
93284 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
93285 +{
93286 + tree fntype;
93287 +
93288 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
93289 +
93290 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
93291 + fntype = build_function_type_list(void_type_node,
93292 + const_char_ptr_type_node,
93293 + unsigned_type_node,
93294 + const_char_ptr_type_node,
93295 + const_char_ptr_type_node,
93296 + NULL_TREE);
93297 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
93298 +
93299 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
93300 + TREE_PUBLIC(report_size_overflow_decl) = 1;
93301 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
93302 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
93303 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
93304 +}
93305 +
93306 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
93307 +{
93308 + int i;
93309 + const char * const plugin_name = plugin_info->base_name;
93310 + const int argc = plugin_info->argc;
93311 + const struct plugin_argument * const argv = plugin_info->argv;
93312 + bool enable = true;
93313 +
93314 + struct register_pass_info size_overflow_pass_info = {
93315 + .pass = &size_overflow_pass.pass,
93316 + .reference_pass_name = "ssa",
93317 + .ref_pass_instance_number = 1,
93318 + .pos_op = PASS_POS_INSERT_AFTER
93319 + };
93320 +
93321 + if (!plugin_default_version_check(version, &gcc_version)) {
93322 + error(G_("incompatible gcc/plugin versions"));
93323 + return 1;
93324 + }
93325 +
93326 + for (i = 0; i < argc; ++i) {
93327 + if (!strcmp(argv[i].key, "no-size-overflow")) {
93328 + enable = false;
93329 + continue;
93330 + }
93331 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
93332 + }
93333 +
93334 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
93335 + if (enable) {
93336 + register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
93337 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
93338 + }
93339 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
93340 +
93341 + return 0;
93342 +}
93343 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
93344 new file mode 100644
93345 index 0000000..ac2901e
93346 --- /dev/null
93347 +++ b/tools/gcc/stackleak_plugin.c
93348 @@ -0,0 +1,327 @@
93349 +/*
93350 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
93351 + * Licensed under the GPL v2
93352 + *
93353 + * Note: the choice of the license means that the compilation process is
93354 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
93355 + * but for the kernel it doesn't matter since it doesn't link against
93356 + * any of the gcc libraries
93357 + *
93358 + * gcc plugin to help implement various PaX features
93359 + *
93360 + * - track lowest stack pointer
93361 + *
93362 + * TODO:
93363 + * - initialize all local variables
93364 + *
93365 + * BUGS:
93366 + * - none known
93367 + */
93368 +#include "gcc-plugin.h"
93369 +#include "config.h"
93370 +#include "system.h"
93371 +#include "coretypes.h"
93372 +#include "tree.h"
93373 +#include "tree-pass.h"
93374 +#include "flags.h"
93375 +#include "intl.h"
93376 +#include "toplev.h"
93377 +#include "plugin.h"
93378 +//#include "expr.h" where are you...
93379 +#include "diagnostic.h"
93380 +#include "plugin-version.h"
93381 +#include "tm.h"
93382 +#include "function.h"
93383 +#include "basic-block.h"
93384 +#include "gimple.h"
93385 +#include "rtl.h"
93386 +#include "emit-rtl.h"
93387 +
93388 +#if BUILDING_GCC_VERSION >= 4008
93389 +#define TODO_dump_func 0
93390 +#endif
93391 +
93392 +extern void print_gimple_stmt(FILE *, gimple, int, int);
93393 +
93394 +int plugin_is_GPL_compatible;
93395 +
93396 +static int track_frame_size = -1;
93397 +static const char track_function[] = "pax_track_stack";
93398 +static const char check_function[] = "pax_check_alloca";
93399 +static bool init_locals;
93400 +
93401 +static struct plugin_info stackleak_plugin_info = {
93402 + .version = "201302112000",
93403 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
93404 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
93405 +};
93406 +
93407 +static bool gate_stackleak_track_stack(void);
93408 +static unsigned int execute_stackleak_tree_instrument(void);
93409 +static unsigned int execute_stackleak_final(void);
93410 +
93411 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
93412 + .pass = {
93413 + .type = GIMPLE_PASS,
93414 + .name = "stackleak_tree_instrument",
93415 +#if BUILDING_GCC_VERSION >= 4008
93416 + .optinfo_flags = OPTGROUP_NONE,
93417 +#endif
93418 + .gate = gate_stackleak_track_stack,
93419 + .execute = execute_stackleak_tree_instrument,
93420 + .sub = NULL,
93421 + .next = NULL,
93422 + .static_pass_number = 0,
93423 + .tv_id = TV_NONE,
93424 + .properties_required = PROP_gimple_leh | PROP_cfg,
93425 + .properties_provided = 0,
93426 + .properties_destroyed = 0,
93427 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
93428 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
93429 + }
93430 +};
93431 +
93432 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
93433 + .pass = {
93434 + .type = RTL_PASS,
93435 + .name = "stackleak_final",
93436 +#if BUILDING_GCC_VERSION >= 4008
93437 + .optinfo_flags = OPTGROUP_NONE,
93438 +#endif
93439 + .gate = gate_stackleak_track_stack,
93440 + .execute = execute_stackleak_final,
93441 + .sub = NULL,
93442 + .next = NULL,
93443 + .static_pass_number = 0,
93444 + .tv_id = TV_NONE,
93445 + .properties_required = 0,
93446 + .properties_provided = 0,
93447 + .properties_destroyed = 0,
93448 + .todo_flags_start = 0,
93449 + .todo_flags_finish = TODO_dump_func
93450 + }
93451 +};
93452 +
93453 +static bool gate_stackleak_track_stack(void)
93454 +{
93455 + return track_frame_size >= 0;
93456 +}
93457 +
93458 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
93459 +{
93460 + gimple check_alloca;
93461 + tree fntype, fndecl, alloca_size;
93462 +
93463 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
93464 + fndecl = build_fn_decl(check_function, fntype);
93465 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
93466 +
93467 + // insert call to void pax_check_alloca(unsigned long size)
93468 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
93469 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
93470 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
93471 +}
93472 +
93473 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
93474 +{
93475 + gimple track_stack;
93476 + tree fntype, fndecl;
93477 +
93478 + fntype = build_function_type_list(void_type_node, NULL_TREE);
93479 + fndecl = build_fn_decl(track_function, fntype);
93480 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
93481 +
93482 + // insert call to void pax_track_stack(void)
93483 + track_stack = gimple_build_call(fndecl, 0);
93484 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
93485 +}
93486 +
93487 +#if BUILDING_GCC_VERSION == 4005
93488 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
93489 +{
93490 + tree fndecl;
93491 +
93492 + if (!is_gimple_call(stmt))
93493 + return false;
93494 + fndecl = gimple_call_fndecl(stmt);
93495 + if (!fndecl)
93496 + return false;
93497 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
93498 + return false;
93499 +// print_node(stderr, "pax", fndecl, 4);
93500 + return DECL_FUNCTION_CODE(fndecl) == code;
93501 +}
93502 +#endif
93503 +
93504 +static bool is_alloca(gimple stmt)
93505 +{
93506 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
93507 + return true;
93508 +
93509 +#if BUILDING_GCC_VERSION >= 4007
93510 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
93511 + return true;
93512 +#endif
93513 +
93514 + return false;
93515 +}
93516 +
93517 +static unsigned int execute_stackleak_tree_instrument(void)
93518 +{
93519 + basic_block bb, entry_bb;
93520 + bool prologue_instrumented = false, is_leaf = true;
93521 +
93522 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
93523 +
93524 + // 1. loop through BBs and GIMPLE statements
93525 + FOR_EACH_BB(bb) {
93526 + gimple_stmt_iterator gsi;
93527 +
93528 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
93529 + gimple stmt;
93530 +
93531 + stmt = gsi_stmt(gsi);
93532 +
93533 + if (is_gimple_call(stmt))
93534 + is_leaf = false;
93535 +
93536 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
93537 + if (!is_alloca(stmt))
93538 + continue;
93539 +
93540 + // 2. insert stack overflow check before each __builtin_alloca call
93541 + stackleak_check_alloca(&gsi);
93542 +
93543 + // 3. insert track call after each __builtin_alloca call
93544 + stackleak_add_instrumentation(&gsi);
93545 + if (bb == entry_bb)
93546 + prologue_instrumented = true;
93547 + }
93548 + }
93549 +
93550 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
93551 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
93552 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
93553 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
93554 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
93555 + return 0;
93556 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
93557 + return 0;
93558 +
93559 + // 4. insert track call at the beginning
93560 + if (!prologue_instrumented) {
93561 + gimple_stmt_iterator gsi;
93562 +
93563 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
93564 + if (dom_info_available_p(CDI_DOMINATORS))
93565 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
93566 + gsi = gsi_start_bb(bb);
93567 + stackleak_add_instrumentation(&gsi);
93568 + }
93569 +
93570 + return 0;
93571 +}
93572 +
93573 +static unsigned int execute_stackleak_final(void)
93574 +{
93575 + rtx insn, next;
93576 +
93577 + if (cfun->calls_alloca)
93578 + return 0;
93579 +
93580 + // keep calls only if function frame is big enough
93581 + if (get_frame_size() >= track_frame_size)
93582 + return 0;
93583 +
93584 + // 1. find pax_track_stack calls
93585 + for (insn = get_insns(); insn; insn = next) {
93586 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
93587 + rtx body;
93588 +
93589 + next = NEXT_INSN(insn);
93590 + if (!CALL_P(insn))
93591 + continue;
93592 + body = PATTERN(insn);
93593 + if (GET_CODE(body) != CALL)
93594 + continue;
93595 + body = XEXP(body, 0);
93596 + if (GET_CODE(body) != MEM)
93597 + continue;
93598 + body = XEXP(body, 0);
93599 + if (GET_CODE(body) != SYMBOL_REF)
93600 + continue;
93601 + if (strcmp(XSTR(body, 0), track_function))
93602 + continue;
93603 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
93604 + // 2. delete call
93605 + delete_insn_and_edges(insn);
93606 +#if BUILDING_GCC_VERSION >= 4007
93607 + if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
93608 + insn = next;
93609 + next = NEXT_INSN(insn);
93610 + delete_insn_and_edges(insn);
93611 + }
93612 +#endif
93613 + }
93614 +
93615 +// print_simple_rtl(stderr, get_insns());
93616 +// print_rtl(stderr, get_insns());
93617 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
93618 +
93619 + return 0;
93620 +}
93621 +
93622 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
93623 +{
93624 + const char * const plugin_name = plugin_info->base_name;
93625 + const int argc = plugin_info->argc;
93626 + const struct plugin_argument * const argv = plugin_info->argv;
93627 + int i;
93628 + struct register_pass_info stackleak_tree_instrument_pass_info = {
93629 + .pass = &stackleak_tree_instrument_pass.pass,
93630 +// .reference_pass_name = "tree_profile",
93631 + .reference_pass_name = "optimized",
93632 + .ref_pass_instance_number = 1,
93633 + .pos_op = PASS_POS_INSERT_BEFORE
93634 + };
93635 + struct register_pass_info stackleak_final_pass_info = {
93636 + .pass = &stackleak_final_rtl_opt_pass.pass,
93637 + .reference_pass_name = "final",
93638 + .ref_pass_instance_number = 1,
93639 + .pos_op = PASS_POS_INSERT_BEFORE
93640 + };
93641 +
93642 + if (!plugin_default_version_check(version, &gcc_version)) {
93643 + error(G_("incompatible gcc/plugin versions"));
93644 + return 1;
93645 + }
93646 +
93647 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
93648 +
93649 + for (i = 0; i < argc; ++i) {
93650 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
93651 + if (!argv[i].value) {
93652 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
93653 + continue;
93654 + }
93655 + track_frame_size = atoi(argv[i].value);
93656 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
93657 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
93658 + continue;
93659 + }
93660 + if (!strcmp(argv[i].key, "initialize-locals")) {
93661 + if (argv[i].value) {
93662 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
93663 + continue;
93664 + }
93665 + init_locals = true;
93666 + continue;
93667 + }
93668 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
93669 + }
93670 +
93671 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
93672 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
93673 +
93674 + return 0;
93675 +}
93676 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
93677 index 6789d78..4afd019e 100644
93678 --- a/tools/perf/util/include/asm/alternative-asm.h
93679 +++ b/tools/perf/util/include/asm/alternative-asm.h
93680 @@ -5,4 +5,7 @@
93681
93682 #define altinstruction_entry #
93683
93684 + .macro pax_force_retaddr rip=0, reload=0
93685 + .endm
93686 +
93687 #endif
93688 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
93689 index 1cd693a..f4a7b20 100644
93690 --- a/virt/kvm/kvm_main.c
93691 +++ b/virt/kvm/kvm_main.c
93692 @@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
93693
93694 static cpumask_var_t cpus_hardware_enabled;
93695 static int kvm_usage_count = 0;
93696 -static atomic_t hardware_enable_failed;
93697 +static atomic_unchecked_t hardware_enable_failed;
93698
93699 struct kmem_cache *kvm_vcpu_cache;
93700 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
93701
93702 -static __read_mostly struct preempt_ops kvm_preempt_ops;
93703 +static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
93704 +static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
93705 +static struct preempt_ops kvm_preempt_ops = {
93706 + .sched_in = kvm_sched_in,
93707 + .sched_out = kvm_sched_out,
93708 +};
93709
93710 struct dentry *kvm_debugfs_dir;
93711
93712 @@ -731,7 +736,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
93713 /* We can read the guest memory with __xxx_user() later on. */
93714 if (user_alloc &&
93715 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
93716 - !access_ok(VERIFY_WRITE,
93717 + !__access_ok(VERIFY_WRITE,
93718 (void __user *)(unsigned long)mem->userspace_addr,
93719 mem->memory_size)))
93720 goto out;
93721 @@ -1783,7 +1788,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
93722 return 0;
93723 }
93724
93725 -static struct file_operations kvm_vcpu_fops = {
93726 +static file_operations_no_const kvm_vcpu_fops __read_only = {
93727 .release = kvm_vcpu_release,
93728 .unlocked_ioctl = kvm_vcpu_ioctl,
93729 #ifdef CONFIG_COMPAT
93730 @@ -2304,7 +2309,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
93731 return 0;
93732 }
93733
93734 -static struct file_operations kvm_vm_fops = {
93735 +static file_operations_no_const kvm_vm_fops __read_only = {
93736 .release = kvm_vm_release,
93737 .unlocked_ioctl = kvm_vm_ioctl,
93738 #ifdef CONFIG_COMPAT
93739 @@ -2402,7 +2407,7 @@ out:
93740 return r;
93741 }
93742
93743 -static struct file_operations kvm_chardev_ops = {
93744 +static file_operations_no_const kvm_chardev_ops __read_only = {
93745 .unlocked_ioctl = kvm_dev_ioctl,
93746 .compat_ioctl = kvm_dev_ioctl,
93747 .llseek = noop_llseek,
93748 @@ -2428,7 +2433,7 @@ static void hardware_enable_nolock(void *junk)
93749
93750 if (r) {
93751 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
93752 - atomic_inc(&hardware_enable_failed);
93753 + atomic_inc_unchecked(&hardware_enable_failed);
93754 printk(KERN_INFO "kvm: enabling virtualization on "
93755 "CPU%d failed\n", cpu);
93756 }
93757 @@ -2482,10 +2487,10 @@ static int hardware_enable_all(void)
93758
93759 kvm_usage_count++;
93760 if (kvm_usage_count == 1) {
93761 - atomic_set(&hardware_enable_failed, 0);
93762 + atomic_set_unchecked(&hardware_enable_failed, 0);
93763 on_each_cpu(hardware_enable_nolock, NULL, 1);
93764
93765 - if (atomic_read(&hardware_enable_failed)) {
93766 + if (atomic_read_unchecked(&hardware_enable_failed)) {
93767 hardware_disable_all_nolock();
93768 r = -EBUSY;
93769 }
93770 @@ -2843,7 +2848,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
93771 kvm_arch_vcpu_put(vcpu);
93772 }
93773
93774 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
93775 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
93776 struct module *module)
93777 {
93778 int r;
93779 @@ -2879,7 +2884,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
93780 if (!vcpu_align)
93781 vcpu_align = __alignof__(struct kvm_vcpu);
93782 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
93783 - 0, NULL);
93784 + SLAB_USERCOPY, NULL);
93785 if (!kvm_vcpu_cache) {
93786 r = -ENOMEM;
93787 goto out_free_3;
93788 @@ -2889,9 +2894,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
93789 if (r)
93790 goto out_free;
93791
93792 + pax_open_kernel();
93793 kvm_chardev_ops.owner = module;
93794 kvm_vm_fops.owner = module;
93795 kvm_vcpu_fops.owner = module;
93796 + pax_close_kernel();
93797
93798 r = misc_register(&kvm_dev);
93799 if (r) {
93800 @@ -2901,9 +2908,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
93801
93802 register_syscore_ops(&kvm_syscore_ops);
93803
93804 - kvm_preempt_ops.sched_in = kvm_sched_in;
93805 - kvm_preempt_ops.sched_out = kvm_sched_out;
93806 -
93807 r = kvm_init_debug();
93808 if (r) {
93809 printk(KERN_ERR "kvm: create debugfs files failed\n");