]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.7.1-201301021808.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.7.1-201301021808.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 74c25c8..deadba2 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125 -linux
126 +lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130 @@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134 -media
135 mconf
136 +mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143 +mkpiggy
144 mkprep
145 mkregtable
146 mktables
147 @@ -186,6 +205,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151 +parse-events*
152 +pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156 @@ -195,6 +216,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160 +pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164 @@ -204,7 +226,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168 +realmode.lds
169 +realmode.relocs
170 recordmcount
171 +regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175 @@ -214,8 +239,11 @@ series
176 setup
177 setup.bin
178 setup.elf
179 +size_overflow_hash.h
180 sImage
181 +slabinfo
182 sm_tbl*
183 +sortextable
184 split-include
185 syscalltab.h
186 tables.c
187 @@ -225,6 +253,7 @@ tftpboot.img
188 timeconst.h
189 times.h*
190 trix_boot.h
191 +user_constants.h
192 utsrelease.h*
193 vdso-syms.lds
194 vdso.lds
195 @@ -236,13 +265,17 @@ vdso32.lds
196 vdso32.so.dbg
197 vdso64.lds
198 vdso64.so.dbg
199 +vdsox32.lds
200 +vdsox32-syms.lds
201 version.h*
202 vmImage
203 vmlinux
204 vmlinux-*
205 vmlinux.aout
206 vmlinux.bin.all
207 +vmlinux.bin.bz2
208 vmlinux.lds
209 +vmlinux.relocs
210 vmlinuz
211 voffset.h
212 vsyscall.lds
213 @@ -250,9 +283,11 @@ vsyscall_32.lds
214 wanxlfw.inc
215 uImage
216 unifdef
217 +utsrelease.h
218 wakeup.bin
219 wakeup.elf
220 wakeup.lds
221 zImage*
222 zconf.hash.c
223 +zconf.lex.c
224 zoffset.h
225 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
226 index 9776f06..18b1856 100644
227 --- a/Documentation/kernel-parameters.txt
228 +++ b/Documentation/kernel-parameters.txt
229 @@ -905,6 +905,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
230 gpt [EFI] Forces disk with valid GPT signature but
231 invalid Protective MBR to be treated as GPT.
232
233 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
234 + ignore grsecurity's /proc restrictions
235 +
236 hashdist= [KNL,NUMA] Large hashes allocated during boot
237 are distributed across NUMA nodes. Defaults on
238 for 64-bit NUMA, off otherwise.
239 @@ -2082,6 +2085,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
240 the specified number of seconds. This is to be used if
241 your oopses keep scrolling off the screen.
242
243 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
244 + virtualization environments that don't cope well with the
245 + expand down segment used by UDEREF on X86-32 or the frequent
246 + page table updates on X86-64.
247 +
248 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
249 +
250 pcbit= [HW,ISDN]
251
252 pcd. [PARIDE]
253 diff --git a/Makefile b/Makefile
254 index fbf84a4..339f6de 100644
255 --- a/Makefile
256 +++ b/Makefile
257 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
258
259 HOSTCC = gcc
260 HOSTCXX = g++
261 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
262 -HOSTCXXFLAGS = -O2
263 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
264 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
265 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
266
267 # Decide whether to build built-in, modular, or both.
268 # Normally, just do built-in.
269 @@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
270 # Rules shared between *config targets and build targets
271
272 # Basic helpers built in scripts/
273 -PHONY += scripts_basic
274 -scripts_basic:
275 +PHONY += scripts_basic gcc-plugins
276 +scripts_basic: gcc-plugins
277 $(Q)$(MAKE) $(build)=scripts/basic
278 $(Q)rm -f .tmp_quiet_recordmcount
279
280 @@ -575,6 +576,60 @@ else
281 KBUILD_CFLAGS += -O2
282 endif
283
284 +ifndef DISABLE_PAX_PLUGINS
285 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
286 +ifneq ($(PLUGINCC),)
287 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
288 +ifndef CONFIG_UML
289 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
290 +endif
291 +endif
292 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
293 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
294 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
295 +endif
296 +ifdef CONFIG_KALLOCSTAT_PLUGIN
297 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
298 +endif
299 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
300 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
301 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
302 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
303 +endif
304 +ifdef CONFIG_CHECKER_PLUGIN
305 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
306 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
307 +endif
308 +endif
309 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
310 +ifdef CONFIG_PAX_SIZE_OVERFLOW
311 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
312 +endif
313 +ifdef CONFIG_PAX_LATENT_ENTROPY
314 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
315 +endif
316 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
317 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
318 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
319 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
320 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
321 +ifeq ($(KBUILD_EXTMOD),)
322 +gcc-plugins:
323 + $(Q)$(MAKE) $(build)=tools/gcc
324 +else
325 +gcc-plugins: ;
326 +endif
327 +else
328 +gcc-plugins:
329 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
330 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
331 +else
332 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
333 +endif
334 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
335 +endif
336 +endif
337 +
338 include $(srctree)/arch/$(SRCARCH)/Makefile
339
340 ifdef CONFIG_READABLE_ASM
341 @@ -731,7 +786,7 @@ export mod_sign_cmd
342
343
344 ifeq ($(KBUILD_EXTMOD),)
345 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
346 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
347
348 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
349 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
350 @@ -778,6 +833,8 @@ endif
351
352 # The actual objects are generated when descending,
353 # make sure no implicit rule kicks in
354 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
355 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
356 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
357
358 # Handle descending into subdirectories listed in $(vmlinux-dirs)
359 @@ -787,7 +844,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
360 # Error messages still appears in the original language
361
362 PHONY += $(vmlinux-dirs)
363 -$(vmlinux-dirs): prepare scripts
364 +$(vmlinux-dirs): gcc-plugins prepare scripts
365 $(Q)$(MAKE) $(build)=$@
366
367 # Store (new) KERNELRELASE string in include/config/kernel.release
368 @@ -831,6 +888,7 @@ prepare0: archprepare FORCE
369 $(Q)$(MAKE) $(build)=.
370
371 # All the preparing..
372 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
373 prepare: prepare0
374
375 # Generate some files
376 @@ -938,6 +996,8 @@ all: modules
377 # using awk while concatenating to the final file.
378
379 PHONY += modules
380 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
381 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
382 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
383 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
384 @$(kecho) ' Building modules, stage 2.';
385 @@ -953,7 +1013,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
386
387 # Target to prepare building external modules
388 PHONY += modules_prepare
389 -modules_prepare: prepare scripts
390 +modules_prepare: gcc-plugins prepare scripts
391
392 # Target to install modules
393 PHONY += modules_install
394 @@ -1013,7 +1073,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
395 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
396 signing_key.priv signing_key.x509 x509.genkey \
397 extra_certificates signing_key.x509.keyid \
398 - signing_key.x509.signer
399 + signing_key.x509.signer tools/gcc/size_overflow_hash.h
400
401 # clean - Delete most, but leave enough to build external modules
402 #
403 @@ -1050,6 +1110,7 @@ distclean: mrproper
404 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
405 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
406 -o -name '.*.rej' \
407 + -o -name '.*.rej' -o -name '*.so' \
408 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
409 -type f -print | xargs rm -f
410
411 @@ -1210,6 +1271,8 @@ PHONY += $(module-dirs) modules
412 $(module-dirs): crmodverdir $(objtree)/Module.symvers
413 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
414
415 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(module-dirs)
418 @$(kecho) ' Building modules, stage 2.';
419 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
420 @@ -1347,17 +1410,21 @@ else
421 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
422 endif
423
424 -%.s: %.c prepare scripts FORCE
425 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
426 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
427 +%.s: %.c gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.i: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
431 -%.o: %.c prepare scripts FORCE
432 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
433 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
434 +%.o: %.c gcc-plugins prepare scripts FORCE
435 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
436 %.lst: %.c prepare scripts FORCE
437 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
438 -%.s: %.S prepare scripts FORCE
439 +%.s: %.S gcc-plugins prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441 -%.o: %.S prepare scripts FORCE
442 +%.o: %.S gcc-plugins prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444 %.symtypes: %.c prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446 @@ -1367,11 +1434,15 @@ endif
447 $(cmd_crmodverdir)
448 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
449 $(build)=$(build-dir)
450 -%/: prepare scripts FORCE
451 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
452 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
453 +%/: gcc-plugins prepare scripts FORCE
454 $(cmd_crmodverdir)
455 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
456 $(build)=$(build-dir)
457 -%.ko: prepare scripts FORCE
458 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
459 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
460 +%.ko: gcc-plugins prepare scripts FORCE
461 $(cmd_crmodverdir)
462 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
463 $(build)=$(build-dir) $(@:.ko=.o)
464 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
465 index c2cbe4f..f7264b4 100644
466 --- a/arch/alpha/include/asm/atomic.h
467 +++ b/arch/alpha/include/asm/atomic.h
468 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
469 #define atomic_dec(v) atomic_sub(1,(v))
470 #define atomic64_dec(v) atomic64_sub(1,(v))
471
472 +#define atomic64_read_unchecked(v) atomic64_read(v)
473 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
474 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
475 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
476 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
477 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
478 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
479 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
480 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
481 +
482 #define smp_mb__before_atomic_dec() smp_mb()
483 #define smp_mb__after_atomic_dec() smp_mb()
484 #define smp_mb__before_atomic_inc() smp_mb()
485 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
486 index ad368a9..fbe0f25 100644
487 --- a/arch/alpha/include/asm/cache.h
488 +++ b/arch/alpha/include/asm/cache.h
489 @@ -4,19 +4,19 @@
490 #ifndef __ARCH_ALPHA_CACHE_H
491 #define __ARCH_ALPHA_CACHE_H
492
493 +#include <linux/const.h>
494
495 /* Bytes per L1 (data) cache line. */
496 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
497 -# define L1_CACHE_BYTES 64
498 # define L1_CACHE_SHIFT 6
499 #else
500 /* Both EV4 and EV5 are write-through, read-allocate,
501 direct-mapped, physical.
502 */
503 -# define L1_CACHE_BYTES 32
504 # define L1_CACHE_SHIFT 5
505 #endif
506
507 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
508 #define SMP_CACHE_BYTES L1_CACHE_BYTES
509
510 #endif
511 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
512 index 968d999..d36b2df 100644
513 --- a/arch/alpha/include/asm/elf.h
514 +++ b/arch/alpha/include/asm/elf.h
515 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
516
517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
518
519 +#ifdef CONFIG_PAX_ASLR
520 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
521 +
522 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
523 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
524 +#endif
525 +
526 /* $0 is set by ld.so to a pointer to a function which might be
527 registered using atexit. This provides a mean for the dynamic
528 linker to call DT_FINI functions for shared libraries that have
529 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
530 index bc2a0da..8ad11ee 100644
531 --- a/arch/alpha/include/asm/pgalloc.h
532 +++ b/arch/alpha/include/asm/pgalloc.h
533 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
534 pgd_set(pgd, pmd);
535 }
536
537 +static inline void
538 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
539 +{
540 + pgd_populate(mm, pgd, pmd);
541 +}
542 +
543 extern pgd_t *pgd_alloc(struct mm_struct *mm);
544
545 static inline void
546 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
547 index 81a4342..348b927 100644
548 --- a/arch/alpha/include/asm/pgtable.h
549 +++ b/arch/alpha/include/asm/pgtable.h
550 @@ -102,6 +102,17 @@ struct vm_area_struct;
551 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
552 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
553 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
554 +
555 +#ifdef CONFIG_PAX_PAGEEXEC
556 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
557 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
558 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
559 +#else
560 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
561 +# define PAGE_COPY_NOEXEC PAGE_COPY
562 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
563 +#endif
564 +
565 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
566
567 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
568 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
569 index 2fd00b7..cfd5069 100644
570 --- a/arch/alpha/kernel/module.c
571 +++ b/arch/alpha/kernel/module.c
572 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
573
574 /* The small sections were sorted to the end of the segment.
575 The following should definitely cover them. */
576 - gp = (u64)me->module_core + me->core_size - 0x8000;
577 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
578 got = sechdrs[me->arch.gotsecindex].sh_addr;
579
580 for (i = 0; i < n; i++) {
581 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
582 index 14db93e..65de923 100644
583 --- a/arch/alpha/kernel/osf_sys.c
584 +++ b/arch/alpha/kernel/osf_sys.c
585 @@ -1304,7 +1304,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
586 /* At this point: (!vma || addr < vma->vm_end). */
587 if (limit - len < addr)
588 return -ENOMEM;
589 - if (!vma || addr + len <= vma->vm_start)
590 + if (check_heap_stack_gap(vma, addr, len))
591 return addr;
592 addr = vma->vm_end;
593 vma = vma->vm_next;
594 @@ -1340,6 +1340,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
595 merely specific addresses, but regions of memory -- perhaps
596 this feature should be incorporated into all ports? */
597
598 +#ifdef CONFIG_PAX_RANDMMAP
599 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
600 +#endif
601 +
602 if (addr) {
603 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
604 if (addr != (unsigned long) -ENOMEM)
605 @@ -1347,8 +1351,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
606 }
607
608 /* Next, try allocating at TASK_UNMAPPED_BASE. */
609 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
610 - len, limit);
611 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
612 +
613 if (addr != (unsigned long) -ENOMEM)
614 return addr;
615
616 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
617 index 0c4132d..88f0d53 100644
618 --- a/arch/alpha/mm/fault.c
619 +++ b/arch/alpha/mm/fault.c
620 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
621 __reload_thread(pcb);
622 }
623
624 +#ifdef CONFIG_PAX_PAGEEXEC
625 +/*
626 + * PaX: decide what to do with offenders (regs->pc = fault address)
627 + *
628 + * returns 1 when task should be killed
629 + * 2 when patched PLT trampoline was detected
630 + * 3 when unpatched PLT trampoline was detected
631 + */
632 +static int pax_handle_fetch_fault(struct pt_regs *regs)
633 +{
634 +
635 +#ifdef CONFIG_PAX_EMUPLT
636 + int err;
637 +
638 + do { /* PaX: patched PLT emulation #1 */
639 + unsigned int ldah, ldq, jmp;
640 +
641 + err = get_user(ldah, (unsigned int *)regs->pc);
642 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
643 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
644 +
645 + if (err)
646 + break;
647 +
648 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
649 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
650 + jmp == 0x6BFB0000U)
651 + {
652 + unsigned long r27, addr;
653 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
654 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
655 +
656 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
657 + err = get_user(r27, (unsigned long *)addr);
658 + if (err)
659 + break;
660 +
661 + regs->r27 = r27;
662 + regs->pc = r27;
663 + return 2;
664 + }
665 + } while (0);
666 +
667 + do { /* PaX: patched PLT emulation #2 */
668 + unsigned int ldah, lda, br;
669 +
670 + err = get_user(ldah, (unsigned int *)regs->pc);
671 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
672 + err |= get_user(br, (unsigned int *)(regs->pc+8));
673 +
674 + if (err)
675 + break;
676 +
677 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
678 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
679 + (br & 0xFFE00000U) == 0xC3E00000U)
680 + {
681 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
682 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
683 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
684 +
685 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
686 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
687 + return 2;
688 + }
689 + } while (0);
690 +
691 + do { /* PaX: unpatched PLT emulation */
692 + unsigned int br;
693 +
694 + err = get_user(br, (unsigned int *)regs->pc);
695 +
696 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
697 + unsigned int br2, ldq, nop, jmp;
698 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
699 +
700 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
701 + err = get_user(br2, (unsigned int *)addr);
702 + err |= get_user(ldq, (unsigned int *)(addr+4));
703 + err |= get_user(nop, (unsigned int *)(addr+8));
704 + err |= get_user(jmp, (unsigned int *)(addr+12));
705 + err |= get_user(resolver, (unsigned long *)(addr+16));
706 +
707 + if (err)
708 + break;
709 +
710 + if (br2 == 0xC3600000U &&
711 + ldq == 0xA77B000CU &&
712 + nop == 0x47FF041FU &&
713 + jmp == 0x6B7B0000U)
714 + {
715 + regs->r28 = regs->pc+4;
716 + regs->r27 = addr+16;
717 + regs->pc = resolver;
718 + return 3;
719 + }
720 + }
721 + } while (0);
722 +#endif
723 +
724 + return 1;
725 +}
726 +
727 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
728 +{
729 + unsigned long i;
730 +
731 + printk(KERN_ERR "PAX: bytes at PC: ");
732 + for (i = 0; i < 5; i++) {
733 + unsigned int c;
734 + if (get_user(c, (unsigned int *)pc+i))
735 + printk(KERN_CONT "???????? ");
736 + else
737 + printk(KERN_CONT "%08x ", c);
738 + }
739 + printk("\n");
740 +}
741 +#endif
742
743 /*
744 * This routine handles page faults. It determines the address,
745 @@ -133,8 +251,29 @@ retry:
746 good_area:
747 si_code = SEGV_ACCERR;
748 if (cause < 0) {
749 - if (!(vma->vm_flags & VM_EXEC))
750 + if (!(vma->vm_flags & VM_EXEC)) {
751 +
752 +#ifdef CONFIG_PAX_PAGEEXEC
753 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
754 + goto bad_area;
755 +
756 + up_read(&mm->mmap_sem);
757 + switch (pax_handle_fetch_fault(regs)) {
758 +
759 +#ifdef CONFIG_PAX_EMUPLT
760 + case 2:
761 + case 3:
762 + return;
763 +#endif
764 +
765 + }
766 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
767 + do_group_exit(SIGKILL);
768 +#else
769 goto bad_area;
770 +#endif
771 +
772 + }
773 } else if (!cause) {
774 /* Allow reads even for write-only mappings */
775 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
776 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
777 index c79f61f..9ac0642 100644
778 --- a/arch/arm/include/asm/atomic.h
779 +++ b/arch/arm/include/asm/atomic.h
780 @@ -17,17 +17,35 @@
781 #include <asm/barrier.h>
782 #include <asm/cmpxchg.h>
783
784 +#ifdef CONFIG_GENERIC_ATOMIC64
785 +#include <asm-generic/atomic64.h>
786 +#endif
787 +
788 #define ATOMIC_INIT(i) { (i) }
789
790 #ifdef __KERNEL__
791
792 +#define _ASM_EXTABLE(from, to) \
793 +" .pushsection __ex_table,\"a\"\n"\
794 +" .align 3\n" \
795 +" .long " #from ", " #to"\n" \
796 +" .popsection"
797 +
798 /*
799 * On ARM, ordinary assignment (str instruction) doesn't clear the local
800 * strex/ldrex monitor on some implementations. The reason we can use it for
801 * atomic_set() is the clrex or dummy strex done on every exception return.
802 */
803 #define atomic_read(v) (*(volatile int *)&(v)->counter)
804 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
805 +{
806 + return v->counter;
807 +}
808 #define atomic_set(v,i) (((v)->counter) = (i))
809 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
810 +{
811 + v->counter = i;
812 +}
813
814 #if __LINUX_ARM_ARCH__ >= 6
815
816 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
817 int result;
818
819 __asm__ __volatile__("@ atomic_add\n"
820 +"1: ldrex %1, [%3]\n"
821 +" adds %0, %1, %4\n"
822 +
823 +#ifdef CONFIG_PAX_REFCOUNT
824 +" bvc 3f\n"
825 +"2: bkpt 0xf103\n"
826 +"3:\n"
827 +#endif
828 +
829 +" strex %1, %0, [%3]\n"
830 +" teq %1, #0\n"
831 +" bne 1b"
832 +
833 +#ifdef CONFIG_PAX_REFCOUNT
834 +"\n4:\n"
835 + _ASM_EXTABLE(2b, 4b)
836 +#endif
837 +
838 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
839 + : "r" (&v->counter), "Ir" (i)
840 + : "cc");
841 +}
842 +
843 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
844 +{
845 + unsigned long tmp;
846 + int result;
847 +
848 + __asm__ __volatile__("@ atomic_add_unchecked\n"
849 "1: ldrex %0, [%3]\n"
850 " add %0, %0, %4\n"
851 " strex %1, %0, [%3]\n"
852 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
853 smp_mb();
854
855 __asm__ __volatile__("@ atomic_add_return\n"
856 +"1: ldrex %1, [%3]\n"
857 +" adds %0, %1, %4\n"
858 +
859 +#ifdef CONFIG_PAX_REFCOUNT
860 +" bvc 3f\n"
861 +" mov %0, %1\n"
862 +"2: bkpt 0xf103\n"
863 +"3:\n"
864 +#endif
865 +
866 +" strex %1, %0, [%3]\n"
867 +" teq %1, #0\n"
868 +" bne 1b"
869 +
870 +#ifdef CONFIG_PAX_REFCOUNT
871 +"\n4:\n"
872 + _ASM_EXTABLE(2b, 4b)
873 +#endif
874 +
875 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
876 + : "r" (&v->counter), "Ir" (i)
877 + : "cc");
878 +
879 + smp_mb();
880 +
881 + return result;
882 +}
883 +
884 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
885 +{
886 + unsigned long tmp;
887 + int result;
888 +
889 + smp_mb();
890 +
891 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
892 "1: ldrex %0, [%3]\n"
893 " add %0, %0, %4\n"
894 " strex %1, %0, [%3]\n"
895 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
896 int result;
897
898 __asm__ __volatile__("@ atomic_sub\n"
899 +"1: ldrex %1, [%3]\n"
900 +" subs %0, %1, %4\n"
901 +
902 +#ifdef CONFIG_PAX_REFCOUNT
903 +" bvc 3f\n"
904 +"2: bkpt 0xf103\n"
905 +"3:\n"
906 +#endif
907 +
908 +" strex %1, %0, [%3]\n"
909 +" teq %1, #0\n"
910 +" bne 1b"
911 +
912 +#ifdef CONFIG_PAX_REFCOUNT
913 +"\n4:\n"
914 + _ASM_EXTABLE(2b, 4b)
915 +#endif
916 +
917 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
918 + : "r" (&v->counter), "Ir" (i)
919 + : "cc");
920 +}
921 +
922 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
923 +{
924 + unsigned long tmp;
925 + int result;
926 +
927 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
928 "1: ldrex %0, [%3]\n"
929 " sub %0, %0, %4\n"
930 " strex %1, %0, [%3]\n"
931 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
932 smp_mb();
933
934 __asm__ __volatile__("@ atomic_sub_return\n"
935 -"1: ldrex %0, [%3]\n"
936 -" sub %0, %0, %4\n"
937 +"1: ldrex %1, [%3]\n"
938 +" subs %0, %1, %4\n"
939 +
940 +#ifdef CONFIG_PAX_REFCOUNT
941 +" bvc 3f\n"
942 +" mov %0, %1\n"
943 +"2: bkpt 0xf103\n"
944 +"3:\n"
945 +#endif
946 +
947 " strex %1, %0, [%3]\n"
948 " teq %1, #0\n"
949 " bne 1b"
950 +
951 +#ifdef CONFIG_PAX_REFCOUNT
952 +"\n4:\n"
953 + _ASM_EXTABLE(2b, 4b)
954 +#endif
955 +
956 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
957 : "r" (&v->counter), "Ir" (i)
958 : "cc");
959 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
960 return oldval;
961 }
962
963 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
964 +{
965 + unsigned long oldval, res;
966 +
967 + smp_mb();
968 +
969 + do {
970 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
971 + "ldrex %1, [%3]\n"
972 + "mov %0, #0\n"
973 + "teq %1, %4\n"
974 + "strexeq %0, %5, [%3]\n"
975 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
976 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
977 + : "cc");
978 + } while (res);
979 +
980 + smp_mb();
981 +
982 + return oldval;
983 +}
984 +
985 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
986 {
987 unsigned long tmp, tmp2;
988 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
989
990 return val;
991 }
992 +
993 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
994 +{
995 + return atomic_add_return(i, v);
996 +}
997 +
998 #define atomic_add(i, v) (void) atomic_add_return(i, v)
999 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1000 +{
1001 + (void) atomic_add_return(i, v);
1002 +}
1003
1004 static inline int atomic_sub_return(int i, atomic_t *v)
1005 {
1006 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1007 return val;
1008 }
1009 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1010 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1011 +{
1012 + (void) atomic_sub_return(i, v);
1013 +}
1014
1015 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1016 {
1017 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1018 return ret;
1019 }
1020
1021 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1022 +{
1023 + return atomic_cmpxchg(v, old, new);
1024 +}
1025 +
1026 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1027 {
1028 unsigned long flags;
1029 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1030 #endif /* __LINUX_ARM_ARCH__ */
1031
1032 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1033 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1034 +{
1035 + return xchg(&v->counter, new);
1036 +}
1037
1038 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1039 {
1040 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1041 }
1042
1043 #define atomic_inc(v) atomic_add(1, v)
1044 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1045 +{
1046 + atomic_add_unchecked(1, v);
1047 +}
1048 #define atomic_dec(v) atomic_sub(1, v)
1049 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1050 +{
1051 + atomic_sub_unchecked(1, v);
1052 +}
1053
1054 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1055 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1056 +{
1057 + return atomic_add_return_unchecked(1, v) == 0;
1058 +}
1059 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1060 #define atomic_inc_return(v) (atomic_add_return(1, v))
1061 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1062 +{
1063 + return atomic_add_return_unchecked(1, v);
1064 +}
1065 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1066 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1067
1068 @@ -241,6 +428,14 @@ typedef struct {
1069 u64 __aligned(8) counter;
1070 } atomic64_t;
1071
1072 +#ifdef CONFIG_PAX_REFCOUNT
1073 +typedef struct {
1074 + u64 __aligned(8) counter;
1075 +} atomic64_unchecked_t;
1076 +#else
1077 +typedef atomic64_t atomic64_unchecked_t;
1078 +#endif
1079 +
1080 #define ATOMIC64_INIT(i) { (i) }
1081
1082 static inline u64 atomic64_read(const atomic64_t *v)
1083 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1084 return result;
1085 }
1086
1087 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1088 +{
1089 + u64 result;
1090 +
1091 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1092 +" ldrexd %0, %H0, [%1]"
1093 + : "=&r" (result)
1094 + : "r" (&v->counter), "Qo" (v->counter)
1095 + );
1096 +
1097 + return result;
1098 +}
1099 +
1100 static inline void atomic64_set(atomic64_t *v, u64 i)
1101 {
1102 u64 tmp;
1103 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1104 : "cc");
1105 }
1106
1107 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1108 +{
1109 + u64 tmp;
1110 +
1111 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1112 +"1: ldrexd %0, %H0, [%2]\n"
1113 +" strexd %0, %3, %H3, [%2]\n"
1114 +" teq %0, #0\n"
1115 +" bne 1b"
1116 + : "=&r" (tmp), "=Qo" (v->counter)
1117 + : "r" (&v->counter), "r" (i)
1118 + : "cc");
1119 +}
1120 +
1121 static inline void atomic64_add(u64 i, atomic64_t *v)
1122 {
1123 u64 result;
1124 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1125 __asm__ __volatile__("@ atomic64_add\n"
1126 "1: ldrexd %0, %H0, [%3]\n"
1127 " adds %0, %0, %4\n"
1128 +" adcs %H0, %H0, %H4\n"
1129 +
1130 +#ifdef CONFIG_PAX_REFCOUNT
1131 +" bvc 3f\n"
1132 +"2: bkpt 0xf103\n"
1133 +"3:\n"
1134 +#endif
1135 +
1136 +" strexd %1, %0, %H0, [%3]\n"
1137 +" teq %1, #0\n"
1138 +" bne 1b"
1139 +
1140 +#ifdef CONFIG_PAX_REFCOUNT
1141 +"\n4:\n"
1142 + _ASM_EXTABLE(2b, 4b)
1143 +#endif
1144 +
1145 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1146 + : "r" (&v->counter), "r" (i)
1147 + : "cc");
1148 +}
1149 +
1150 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1151 +{
1152 + u64 result;
1153 + unsigned long tmp;
1154 +
1155 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1156 +"1: ldrexd %0, %H0, [%3]\n"
1157 +" adds %0, %0, %4\n"
1158 " adc %H0, %H0, %H4\n"
1159 " strexd %1, %0, %H0, [%3]\n"
1160 " teq %1, #0\n"
1161 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1162
1163 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1164 {
1165 - u64 result;
1166 - unsigned long tmp;
1167 + u64 result, tmp;
1168
1169 smp_mb();
1170
1171 __asm__ __volatile__("@ atomic64_add_return\n"
1172 +"1: ldrexd %1, %H1, [%3]\n"
1173 +" adds %0, %1, %4\n"
1174 +" adcs %H0, %H1, %H4\n"
1175 +
1176 +#ifdef CONFIG_PAX_REFCOUNT
1177 +" bvc 3f\n"
1178 +" mov %0, %1\n"
1179 +" mov %H0, %H1\n"
1180 +"2: bkpt 0xf103\n"
1181 +"3:\n"
1182 +#endif
1183 +
1184 +" strexd %1, %0, %H0, [%3]\n"
1185 +" teq %1, #0\n"
1186 +" bne 1b"
1187 +
1188 +#ifdef CONFIG_PAX_REFCOUNT
1189 +"\n4:\n"
1190 + _ASM_EXTABLE(2b, 4b)
1191 +#endif
1192 +
1193 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1194 + : "r" (&v->counter), "r" (i)
1195 + : "cc");
1196 +
1197 + smp_mb();
1198 +
1199 + return result;
1200 +}
1201 +
1202 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1203 +{
1204 + u64 result;
1205 + unsigned long tmp;
1206 +
1207 + smp_mb();
1208 +
1209 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1210 "1: ldrexd %0, %H0, [%3]\n"
1211 " adds %0, %0, %4\n"
1212 " adc %H0, %H0, %H4\n"
1213 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1214 __asm__ __volatile__("@ atomic64_sub\n"
1215 "1: ldrexd %0, %H0, [%3]\n"
1216 " subs %0, %0, %4\n"
1217 +" sbcs %H0, %H0, %H4\n"
1218 +
1219 +#ifdef CONFIG_PAX_REFCOUNT
1220 +" bvc 3f\n"
1221 +"2: bkpt 0xf103\n"
1222 +"3:\n"
1223 +#endif
1224 +
1225 +" strexd %1, %0, %H0, [%3]\n"
1226 +" teq %1, #0\n"
1227 +" bne 1b"
1228 +
1229 +#ifdef CONFIG_PAX_REFCOUNT
1230 +"\n4:\n"
1231 + _ASM_EXTABLE(2b, 4b)
1232 +#endif
1233 +
1234 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1235 + : "r" (&v->counter), "r" (i)
1236 + : "cc");
1237 +}
1238 +
1239 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1240 +{
1241 + u64 result;
1242 + unsigned long tmp;
1243 +
1244 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1245 +"1: ldrexd %0, %H0, [%3]\n"
1246 +" subs %0, %0, %4\n"
1247 " sbc %H0, %H0, %H4\n"
1248 " strexd %1, %0, %H0, [%3]\n"
1249 " teq %1, #0\n"
1250 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1251
1252 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1253 {
1254 - u64 result;
1255 - unsigned long tmp;
1256 + u64 result, tmp;
1257
1258 smp_mb();
1259
1260 __asm__ __volatile__("@ atomic64_sub_return\n"
1261 -"1: ldrexd %0, %H0, [%3]\n"
1262 -" subs %0, %0, %4\n"
1263 -" sbc %H0, %H0, %H4\n"
1264 +"1: ldrexd %1, %H1, [%3]\n"
1265 +" subs %0, %1, %4\n"
1266 +" sbcs %H0, %H1, %H4\n"
1267 +
1268 +#ifdef CONFIG_PAX_REFCOUNT
1269 +" bvc 3f\n"
1270 +" mov %0, %1\n"
1271 +" mov %H0, %H1\n"
1272 +"2: bkpt 0xf103\n"
1273 +"3:\n"
1274 +#endif
1275 +
1276 " strexd %1, %0, %H0, [%3]\n"
1277 " teq %1, #0\n"
1278 " bne 1b"
1279 +
1280 +#ifdef CONFIG_PAX_REFCOUNT
1281 +"\n4:\n"
1282 + _ASM_EXTABLE(2b, 4b)
1283 +#endif
1284 +
1285 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1286 : "r" (&v->counter), "r" (i)
1287 : "cc");
1288 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1289 return oldval;
1290 }
1291
1292 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1293 +{
1294 + u64 oldval;
1295 + unsigned long res;
1296 +
1297 + smp_mb();
1298 +
1299 + do {
1300 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1301 + "ldrexd %1, %H1, [%3]\n"
1302 + "mov %0, #0\n"
1303 + "teq %1, %4\n"
1304 + "teqeq %H1, %H4\n"
1305 + "strexdeq %0, %5, %H5, [%3]"
1306 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1307 + : "r" (&ptr->counter), "r" (old), "r" (new)
1308 + : "cc");
1309 + } while (res);
1310 +
1311 + smp_mb();
1312 +
1313 + return oldval;
1314 +}
1315 +
1316 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1317 {
1318 u64 result;
1319 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1320
1321 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1322 {
1323 - u64 result;
1324 - unsigned long tmp;
1325 + u64 result, tmp;
1326
1327 smp_mb();
1328
1329 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1330 -"1: ldrexd %0, %H0, [%3]\n"
1331 -" subs %0, %0, #1\n"
1332 -" sbc %H0, %H0, #0\n"
1333 +"1: ldrexd %1, %H1, [%3]\n"
1334 +" subs %0, %1, #1\n"
1335 +" sbcs %H0, %H1, #0\n"
1336 +
1337 +#ifdef CONFIG_PAX_REFCOUNT
1338 +" bvc 3f\n"
1339 +" mov %0, %1\n"
1340 +" mov %H0, %H1\n"
1341 +"2: bkpt 0xf103\n"
1342 +"3:\n"
1343 +#endif
1344 +
1345 " teq %H0, #0\n"
1346 -" bmi 2f\n"
1347 +" bmi 4f\n"
1348 " strexd %1, %0, %H0, [%3]\n"
1349 " teq %1, #0\n"
1350 " bne 1b\n"
1351 -"2:"
1352 +"4:\n"
1353 +
1354 +#ifdef CONFIG_PAX_REFCOUNT
1355 + _ASM_EXTABLE(2b, 4b)
1356 +#endif
1357 +
1358 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1359 : "r" (&v->counter)
1360 : "cc");
1361 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1362 " teq %0, %5\n"
1363 " teqeq %H0, %H5\n"
1364 " moveq %1, #0\n"
1365 -" beq 2f\n"
1366 +" beq 4f\n"
1367 " adds %0, %0, %6\n"
1368 -" adc %H0, %H0, %H6\n"
1369 +" adcs %H0, %H0, %H6\n"
1370 +
1371 +#ifdef CONFIG_PAX_REFCOUNT
1372 +" bvc 3f\n"
1373 +"2: bkpt 0xf103\n"
1374 +"3:\n"
1375 +#endif
1376 +
1377 " strexd %2, %0, %H0, [%4]\n"
1378 " teq %2, #0\n"
1379 " bne 1b\n"
1380 -"2:"
1381 +"4:\n"
1382 +
1383 +#ifdef CONFIG_PAX_REFCOUNT
1384 + _ASM_EXTABLE(2b, 4b)
1385 +#endif
1386 +
1387 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1388 : "r" (&v->counter), "r" (u), "r" (a)
1389 : "cc");
1390 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1391
1392 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1393 #define atomic64_inc(v) atomic64_add(1LL, (v))
1394 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1395 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1396 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1397 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1398 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1399 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1400 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1401 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1402 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1403 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1404 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1405 index 75fe66b..ba3dee4 100644
1406 --- a/arch/arm/include/asm/cache.h
1407 +++ b/arch/arm/include/asm/cache.h
1408 @@ -4,8 +4,10 @@
1409 #ifndef __ASMARM_CACHE_H
1410 #define __ASMARM_CACHE_H
1411
1412 +#include <linux/const.h>
1413 +
1414 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1415 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1416 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1417
1418 /*
1419 * Memory returned by kmalloc() may be used for DMA, so we must make
1420 @@ -24,5 +26,6 @@
1421 #endif
1422
1423 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1424 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1425
1426 #endif
1427 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1428 index e1489c5..d418304 100644
1429 --- a/arch/arm/include/asm/cacheflush.h
1430 +++ b/arch/arm/include/asm/cacheflush.h
1431 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1432 void (*dma_unmap_area)(const void *, size_t, int);
1433
1434 void (*dma_flush_range)(const void *, const void *);
1435 -};
1436 +} __no_const;
1437
1438 /*
1439 * Select the calling method
1440 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1441 index 7eb18c1..e38b6d2 100644
1442 --- a/arch/arm/include/asm/cmpxchg.h
1443 +++ b/arch/arm/include/asm/cmpxchg.h
1444 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1445
1446 #define xchg(ptr,x) \
1447 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1448 +#define xchg_unchecked(ptr,x) \
1449 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1450
1451 #include <asm-generic/cmpxchg-local.h>
1452
1453 diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1454 index ab98fdd..6b19938 100644
1455 --- a/arch/arm/include/asm/delay.h
1456 +++ b/arch/arm/include/asm/delay.h
1457 @@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1458 void (*delay)(unsigned long);
1459 void (*const_udelay)(unsigned long);
1460 void (*udelay)(unsigned long);
1461 -} arm_delay_ops;
1462 +} *arm_delay_ops;
1463
1464 -#define __delay(n) arm_delay_ops.delay(n)
1465 +#define __delay(n) arm_delay_ops->delay(n)
1466
1467 /*
1468 * This function intentionally does not exist; if you see references to
1469 @@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1470 * first constant multiplications gets optimized away if the delay is
1471 * a constant)
1472 */
1473 -#define __udelay(n) arm_delay_ops.udelay(n)
1474 -#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1475 +#define __udelay(n) arm_delay_ops->udelay(n)
1476 +#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1477
1478 #define udelay(n) \
1479 (__builtin_constant_p(n) ? \
1480 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1481 index 38050b1..9d90e8b 100644
1482 --- a/arch/arm/include/asm/elf.h
1483 +++ b/arch/arm/include/asm/elf.h
1484 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1485 the loader. We need to make sure that it is out of the way of the program
1486 that it will "exec", and that there is sufficient room for the brk. */
1487
1488 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1489 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1490 +
1491 +#ifdef CONFIG_PAX_ASLR
1492 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1493 +
1494 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1495 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1496 +#endif
1497
1498 /* When the program starts, a1 contains a pointer to a function to be
1499 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1500 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1501 extern void elf_set_personality(const struct elf32_hdr *);
1502 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1503
1504 -struct mm_struct;
1505 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1506 -#define arch_randomize_brk arch_randomize_brk
1507 -
1508 #endif
1509 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1510 index 83eb2f7..ed77159 100644
1511 --- a/arch/arm/include/asm/kmap_types.h
1512 +++ b/arch/arm/include/asm/kmap_types.h
1513 @@ -4,6 +4,6 @@
1514 /*
1515 * This is the "bare minimum". AIO seems to require this.
1516 */
1517 -#define KM_TYPE_NR 16
1518 +#define KM_TYPE_NR 17
1519
1520 #endif
1521 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1522 index 195ac2f..2272f0d 100644
1523 --- a/arch/arm/include/asm/mach/map.h
1524 +++ b/arch/arm/include/asm/mach/map.h
1525 @@ -34,6 +34,9 @@ struct map_desc {
1526 #define MT_MEMORY_ITCM 13
1527 #define MT_MEMORY_SO 14
1528 #define MT_MEMORY_DMA_READY 15
1529 +#define MT_MEMORY_R 16
1530 +#define MT_MEMORY_RW 17
1531 +#define MT_MEMORY_RX 18
1532
1533 #ifdef CONFIG_MMU
1534 extern void iotable_init(struct map_desc *, int);
1535 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1536 index 53426c6..c7baff3 100644
1537 --- a/arch/arm/include/asm/outercache.h
1538 +++ b/arch/arm/include/asm/outercache.h
1539 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1540 #endif
1541 void (*set_debug)(unsigned long);
1542 void (*resume)(void);
1543 -};
1544 +} __no_const;
1545
1546 #ifdef CONFIG_OUTER_CACHE
1547
1548 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1549 index 812a494..71fc0b6 100644
1550 --- a/arch/arm/include/asm/page.h
1551 +++ b/arch/arm/include/asm/page.h
1552 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1553 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1554 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1555 unsigned long vaddr, struct vm_area_struct *vma);
1556 -};
1557 +} __no_const;
1558
1559 #ifdef MULTI_USER
1560 extern struct cpu_user_fns cpu_user;
1561 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1562 index 943504f..1ad2de8 100644
1563 --- a/arch/arm/include/asm/pgalloc.h
1564 +++ b/arch/arm/include/asm/pgalloc.h
1565 @@ -22,7 +22,7 @@
1566
1567 #ifdef CONFIG_MMU
1568
1569 -#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
1570 +#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PXNTABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
1571 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
1572
1573 #ifdef CONFIG_ARM_LPAE
1574 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1575 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1576 }
1577
1578 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1579 +{
1580 + pud_populate(mm, pud, pmd);
1581 +}
1582 +
1583 #else /* !CONFIG_ARM_LPAE */
1584
1585 /*
1586 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1587 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1588 #define pmd_free(mm, pmd) do { } while (0)
1589 #define pud_populate(mm,pmd,pte) BUG()
1590 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1591
1592 #endif /* CONFIG_ARM_LPAE */
1593
1594 @@ -126,6 +132,16 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1595 __free_page(pte);
1596 }
1597
1598 +static inline void __pmd_update(pmd_t *pmdp, pmdval_t prot)
1599 +{
1600 + pmdval_t pmdval = pmd_val(*pmdp) | prot;
1601 + pmdp[0] = __pmd(pmdval);
1602 +#ifndef CONFIG_ARM_LPAE
1603 + pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
1604 +#endif
1605 + flush_pmd_entry(pmdp);
1606 +}
1607 +
1608 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1609 pmdval_t prot)
1610 {
1611 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1612 index 5cfba15..d437dc2 100644
1613 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1614 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1615 @@ -20,12 +20,15 @@
1616 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1617 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1618 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1619 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* PXN */
1620 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1621 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1622 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1623 +
1624 /*
1625 * - section
1626 */
1627 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0)
1628 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1629 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1630 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1631 @@ -37,6 +40,7 @@
1632 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1633 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1634 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1635 +#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 0))
1636
1637 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1638 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1639 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1640 index d795282..d82ff13 100644
1641 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1642 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1643 @@ -32,6 +32,7 @@
1644 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1645 #define PMD_BIT4 (_AT(pmdval_t, 0))
1646 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1647 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1648
1649 /*
1650 * - section
1651 @@ -41,9 +42,11 @@
1652 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1653 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1654 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1655 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1656 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1657 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1658 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1659 +#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 1) << 7)
1660 #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
1661
1662 /*
1663 @@ -66,6 +69,7 @@
1664 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1665 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1666 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1667 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1668 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1669
1670 /*
1671 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1672 index b249035..4ab204b 100644
1673 --- a/arch/arm/include/asm/pgtable-3level.h
1674 +++ b/arch/arm/include/asm/pgtable-3level.h
1675 @@ -73,6 +73,7 @@
1676 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1677 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1678 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1679 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1680 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1681 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1682 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1683 @@ -80,6 +81,7 @@
1684 /*
1685 * To be used in assembly code with the upper page attributes.
1686 */
1687 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
1688 #define L_PTE_XN_HIGH (1 << (54 - 32))
1689 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1690
1691 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1692 index 08c1231..2602cf2 100644
1693 --- a/arch/arm/include/asm/pgtable.h
1694 +++ b/arch/arm/include/asm/pgtable.h
1695 @@ -30,6 +30,9 @@
1696 #include <asm/pgtable-2level.h>
1697 #endif
1698
1699 +#define ktla_ktva(addr) (addr)
1700 +#define ktva_ktla(addr) (addr)
1701 +
1702 /*
1703 * Just any arbitrary offset to the start of the vmalloc VM area: the
1704 * current 8MB value just means that there will be a 8MB "hole" after the
1705 @@ -53,6 +56,17 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1706 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1707 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1708
1709 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
1710 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1711 +
1712 +#ifdef CONFIG_PAX_KERNEXEC
1713 +static inline unsigned long pax_open_kernel(void) { return 0; /* TODO */ }
1714 +static inline unsigned long pax_close_kernel(void) { return 0; /* TODO */ }
1715 +#else
1716 +static inline unsigned long pax_open_kernel(void) { return 0; }
1717 +static inline unsigned long pax_close_kernel(void) { return 0; }
1718 +#endif
1719 +
1720 /*
1721 * This is the lowest virtual address we can permit any user space
1722 * mapping to be mapped at. This is particularly important for
1723 @@ -73,23 +87,23 @@ extern pgprot_t pgprot_kernel;
1724
1725 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1726
1727 -#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
1728 -#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
1729 -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
1730 -#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1731 -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
1732 -#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1733 -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
1734 +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_PXN | L_PTE_RDONLY)
1735 +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN | L_PTE_PXN)
1736 +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_PXN)
1737 +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1738 +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1739 +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1740 +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1741 #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
1742 #define PAGE_KERNEL_EXEC pgprot_kernel
1743
1744 -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
1745 -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
1746 -#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
1747 -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1748 -#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
1749 -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
1750 -#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
1751 +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1752 +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN | L_PTE_PXN)
1753 +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_PXN)
1754 +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1755 +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1756 +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN | L_PTE_PXN)
1757 +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_PXN)
1758
1759 #define __pgprot_modify(prot,mask,bits) \
1760 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
1761 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
1762 index f3628fb..a0672dd 100644
1763 --- a/arch/arm/include/asm/proc-fns.h
1764 +++ b/arch/arm/include/asm/proc-fns.h
1765 @@ -75,7 +75,7 @@ extern struct processor {
1766 unsigned int suspend_size;
1767 void (*do_suspend)(void *);
1768 void (*do_resume)(void *);
1769 -} processor;
1770 +} __do_const processor;
1771
1772 #ifndef MULTI_CPU
1773 extern void cpu_proc_init(void);
1774 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
1775 index 2e3be16..4dc90fc 100644
1776 --- a/arch/arm/include/asm/smp.h
1777 +++ b/arch/arm/include/asm/smp.h
1778 @@ -106,7 +106,7 @@ struct smp_operations {
1779 int (*cpu_disable)(unsigned int cpu);
1780 #endif
1781 #endif
1782 -};
1783 +} __no_const;
1784
1785 /*
1786 * set platform specific SMP operations
1787 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1788 index 8477b4c..801a6a9 100644
1789 --- a/arch/arm/include/asm/thread_info.h
1790 +++ b/arch/arm/include/asm/thread_info.h
1791 @@ -151,6 +151,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1792 #define TIF_SYSCALL_TRACE 8
1793 #define TIF_SYSCALL_AUDIT 9
1794 #define TIF_SYSCALL_TRACEPOINT 10
1795 +
1796 +/* within 8 bits of TIF_SYSCALL_TRACE
1797 + to meet flexible second operand requirements
1798 +*/
1799 +#define TIF_GRSEC_SETXID 11
1800 +
1801 #define TIF_USING_IWMMXT 17
1802 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1803 #define TIF_RESTORE_SIGMASK 20
1804 @@ -165,9 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1805 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
1806 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1807 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1808 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1809
1810 /* Checks for any syscall work in entry-common.S */
1811 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
1812 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | \
1813 + _TIF_GRSEC_SETXID)
1814
1815 /*
1816 * Change these and you break ASM code in entry-common.S
1817 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1818 index 7e1f760..1af891c 100644
1819 --- a/arch/arm/include/asm/uaccess.h
1820 +++ b/arch/arm/include/asm/uaccess.h
1821 @@ -22,6 +22,8 @@
1822 #define VERIFY_READ 0
1823 #define VERIFY_WRITE 1
1824
1825 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1826 +
1827 /*
1828 * The exception table consists of pairs of addresses: the first is the
1829 * address of an instruction that is allowed to fault, and the second is
1830 @@ -418,8 +420,23 @@ do { \
1831
1832
1833 #ifdef CONFIG_MMU
1834 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1835 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1836 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1837 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1838 +
1839 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1840 +{
1841 + if (!__builtin_constant_p(n))
1842 + check_object_size(to, n, false);
1843 + return ___copy_from_user(to, from, n);
1844 +}
1845 +
1846 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1847 +{
1848 + if (!__builtin_constant_p(n))
1849 + check_object_size(from, n, true);
1850 + return ___copy_to_user(to, from, n);
1851 +}
1852 +
1853 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1854 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1855 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1856 @@ -431,6 +448,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
1857
1858 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1859 {
1860 + if ((long)n < 0)
1861 + return n;
1862 +
1863 if (access_ok(VERIFY_READ, from, n))
1864 n = __copy_from_user(to, from, n);
1865 else /* security hole - plug it */
1866 @@ -440,6 +460,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1867
1868 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1869 {
1870 + if ((long)n < 0)
1871 + return n;
1872 +
1873 if (access_ok(VERIFY_WRITE, to, n))
1874 n = __copy_to_user(to, from, n);
1875 return n;
1876 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1877 index 60d3b73..9168db0 100644
1878 --- a/arch/arm/kernel/armksyms.c
1879 +++ b/arch/arm/kernel/armksyms.c
1880 @@ -89,8 +89,8 @@ EXPORT_SYMBOL(__memzero);
1881 #ifdef CONFIG_MMU
1882 EXPORT_SYMBOL(copy_page);
1883
1884 -EXPORT_SYMBOL(__copy_from_user);
1885 -EXPORT_SYMBOL(__copy_to_user);
1886 +EXPORT_SYMBOL(___copy_from_user);
1887 +EXPORT_SYMBOL(___copy_to_user);
1888 EXPORT_SYMBOL(__clear_user);
1889
1890 EXPORT_SYMBOL(__get_user_1);
1891 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
1892 index 4eee351..e247728 100644
1893 --- a/arch/arm/kernel/head.S
1894 +++ b/arch/arm/kernel/head.S
1895 @@ -52,7 +52,9 @@
1896 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
1897
1898 .macro pgtbl, rd, phys
1899 - add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
1900 + mov \rd, #TEXT_OFFSET
1901 + sub \rd, #PG_DIR_SIZE
1902 + add \rd, \rd, \phys
1903 .endm
1904
1905 /*
1906 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
1907 index 1e9be5d..b9a75e1 100644
1908 --- a/arch/arm/kernel/module.c
1909 +++ b/arch/arm/kernel/module.c
1910 @@ -37,12 +37,35 @@
1911 #endif
1912
1913 #ifdef CONFIG_MMU
1914 -void *module_alloc(unsigned long size)
1915 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
1916 {
1917 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
1918 - GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
1919 + GFP_KERNEL, prot, -1,
1920 __builtin_return_address(0));
1921 }
1922 +
1923 +void *module_alloc(unsigned long size)
1924 +{
1925 +
1926 +#ifdef CONFIG_PAX_KERNEXEC
1927 + return __module_alloc(size, PAGE_KERNEL);
1928 +#else
1929 + return __module_alloc(size, PAGE_KERNEL_EXEC);
1930 +#endif
1931 +
1932 +}
1933 +
1934 +#ifdef CONFIG_PAX_KERNEXEC
1935 +void module_free_exec(struct module *mod, void *module_region)
1936 +{
1937 + module_free(mod, module_region);
1938 +}
1939 +
1940 +void *module_alloc_exec(unsigned long size)
1941 +{
1942 + return __module_alloc(size, PAGE_KERNEL_EXEC);
1943 +}
1944 +#endif
1945 #endif
1946
1947 int
1948 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1949 index 90084a6..bf4bcfb 100644
1950 --- a/arch/arm/kernel/process.c
1951 +++ b/arch/arm/kernel/process.c
1952 @@ -28,7 +28,6 @@
1953 #include <linux/tick.h>
1954 #include <linux/utsname.h>
1955 #include <linux/uaccess.h>
1956 -#include <linux/random.h>
1957 #include <linux/hw_breakpoint.h>
1958 #include <linux/cpuidle.h>
1959 #include <linux/leds.h>
1960 @@ -256,9 +255,10 @@ void machine_power_off(void)
1961 machine_shutdown();
1962 if (pm_power_off)
1963 pm_power_off();
1964 + BUG();
1965 }
1966
1967 -void machine_restart(char *cmd)
1968 +__noreturn void machine_restart(char *cmd)
1969 {
1970 machine_shutdown();
1971
1972 @@ -451,12 +451,6 @@ unsigned long get_wchan(struct task_struct *p)
1973 return 0;
1974 }
1975
1976 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1977 -{
1978 - unsigned long range_end = mm->brk + 0x02000000;
1979 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1980 -}
1981 -
1982 #ifdef CONFIG_MMU
1983 /*
1984 * The vectors page is always readable from user space for the
1985 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1986 index 739db3a..7f4a272 100644
1987 --- a/arch/arm/kernel/ptrace.c
1988 +++ b/arch/arm/kernel/ptrace.c
1989 @@ -916,6 +916,10 @@ enum ptrace_syscall_dir {
1990 PTRACE_SYSCALL_EXIT,
1991 };
1992
1993 +#ifdef CONFIG_GRKERNSEC_SETXID
1994 +extern void gr_delayed_cred_worker(void);
1995 +#endif
1996 +
1997 static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
1998 enum ptrace_syscall_dir dir)
1999 {
2000 @@ -923,6 +927,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2001
2002 current_thread_info()->syscall = scno;
2003
2004 +#ifdef CONFIG_GRKERNSEC_SETXID
2005 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2006 + gr_delayed_cred_worker();
2007 +#endif
2008 +
2009 if (!test_thread_flag(TIF_SYSCALL_TRACE))
2010 return scno;
2011
2012 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2013 index da1d1aa..0a606e7 100644
2014 --- a/arch/arm/kernel/setup.c
2015 +++ b/arch/arm/kernel/setup.c
2016 @@ -99,19 +99,19 @@ EXPORT_SYMBOL(elf_hwcap);
2017
2018
2019 #ifdef MULTI_CPU
2020 -struct processor processor __read_mostly;
2021 +struct processor processor;
2022 #endif
2023 #ifdef MULTI_TLB
2024 -struct cpu_tlb_fns cpu_tlb __read_mostly;
2025 +struct cpu_tlb_fns cpu_tlb __read_only;
2026 #endif
2027 #ifdef MULTI_USER
2028 -struct cpu_user_fns cpu_user __read_mostly;
2029 +struct cpu_user_fns cpu_user __read_only;
2030 #endif
2031 #ifdef MULTI_CACHE
2032 -struct cpu_cache_fns cpu_cache __read_mostly;
2033 +struct cpu_cache_fns cpu_cache __read_only;
2034 #endif
2035 #ifdef CONFIG_OUTER_CACHE
2036 -struct outer_cache_fns outer_cache __read_mostly;
2037 +struct outer_cache_fns outer_cache __read_only;
2038 EXPORT_SYMBOL(outer_cache);
2039 #endif
2040
2041 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2042 index fbc8b26..000ded0 100644
2043 --- a/arch/arm/kernel/smp.c
2044 +++ b/arch/arm/kernel/smp.c
2045 @@ -70,7 +70,7 @@ enum ipi_msg_type {
2046
2047 static DECLARE_COMPLETION(cpu_running);
2048
2049 -static struct smp_operations smp_ops;
2050 +static struct smp_operations smp_ops __read_only;
2051
2052 void __init smp_set_ops(struct smp_operations *ops)
2053 {
2054 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2055 index b0179b8..b54c6c1 100644
2056 --- a/arch/arm/kernel/traps.c
2057 +++ b/arch/arm/kernel/traps.c
2058 @@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2059 static int die_owner = -1;
2060 static unsigned int die_nest_count;
2061
2062 +extern void gr_handle_kernel_exploit(void);
2063 +
2064 static unsigned long oops_begin(void)
2065 {
2066 int cpu;
2067 @@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
2068 panic("Fatal exception in interrupt");
2069 if (panic_on_oops)
2070 panic("Fatal exception");
2071 +
2072 + gr_handle_kernel_exploit();
2073 +
2074 if (signr)
2075 do_exit(signr);
2076 }
2077 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
2078 index 36ff15b..75d9e9d 100644
2079 --- a/arch/arm/kernel/vmlinux.lds.S
2080 +++ b/arch/arm/kernel/vmlinux.lds.S
2081 @@ -8,7 +8,11 @@
2082 #include <asm/thread_info.h>
2083 #include <asm/memory.h>
2084 #include <asm/page.h>
2085 -
2086 +
2087 +#ifdef CONFIG_PAX_KERNEXEC
2088 +#include <asm/pgtable.h>
2089 +#endif
2090 +
2091 #define PROC_INFO \
2092 . = ALIGN(4); \
2093 VMLINUX_SYMBOL(__proc_info_begin) = .; \
2094 @@ -90,6 +94,11 @@ SECTIONS
2095 _text = .;
2096 HEAD_TEXT
2097 }
2098 +
2099 +#ifdef CONFIG_PAX_KERNEXEC
2100 + . = ALIGN(1<<SECTION_SHIFT);
2101 +#endif
2102 +
2103 .text : { /* Real text segment */
2104 _stext = .; /* Text and read-only data */
2105 __exception_text_start = .;
2106 @@ -133,6 +142,10 @@ SECTIONS
2107
2108 _etext = .; /* End of text and rodata section */
2109
2110 +#ifdef CONFIG_PAX_KERNEXEC
2111 + . = ALIGN(1<<SECTION_SHIFT);
2112 +#endif
2113 +
2114 #ifndef CONFIG_XIP_KERNEL
2115 . = ALIGN(PAGE_SIZE);
2116 __init_begin = .;
2117 @@ -192,6 +205,11 @@ SECTIONS
2118 . = PAGE_OFFSET + TEXT_OFFSET;
2119 #else
2120 __init_end = .;
2121 +
2122 +#ifdef CONFIG_PAX_KERNEXEC
2123 + . = ALIGN(1<<SECTION_SHIFT);
2124 +#endif
2125 +
2126 . = ALIGN(THREAD_SIZE);
2127 __data_loc = .;
2128 #endif
2129 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
2130 index 66a477a..bee61d3 100644
2131 --- a/arch/arm/lib/copy_from_user.S
2132 +++ b/arch/arm/lib/copy_from_user.S
2133 @@ -16,7 +16,7 @@
2134 /*
2135 * Prototype:
2136 *
2137 - * size_t __copy_from_user(void *to, const void *from, size_t n)
2138 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
2139 *
2140 * Purpose:
2141 *
2142 @@ -84,11 +84,11 @@
2143
2144 .text
2145
2146 -ENTRY(__copy_from_user)
2147 +ENTRY(___copy_from_user)
2148
2149 #include "copy_template.S"
2150
2151 -ENDPROC(__copy_from_user)
2152 +ENDPROC(___copy_from_user)
2153
2154 .pushsection .fixup,"ax"
2155 .align 0
2156 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
2157 index 6ee2f67..d1cce76 100644
2158 --- a/arch/arm/lib/copy_page.S
2159 +++ b/arch/arm/lib/copy_page.S
2160 @@ -10,6 +10,7 @@
2161 * ASM optimised string functions
2162 */
2163 #include <linux/linkage.h>
2164 +#include <linux/const.h>
2165 #include <asm/assembler.h>
2166 #include <asm/asm-offsets.h>
2167 #include <asm/cache.h>
2168 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
2169 index d066df6..df28194 100644
2170 --- a/arch/arm/lib/copy_to_user.S
2171 +++ b/arch/arm/lib/copy_to_user.S
2172 @@ -16,7 +16,7 @@
2173 /*
2174 * Prototype:
2175 *
2176 - * size_t __copy_to_user(void *to, const void *from, size_t n)
2177 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
2178 *
2179 * Purpose:
2180 *
2181 @@ -88,11 +88,11 @@
2182 .text
2183
2184 ENTRY(__copy_to_user_std)
2185 -WEAK(__copy_to_user)
2186 +WEAK(___copy_to_user)
2187
2188 #include "copy_template.S"
2189
2190 -ENDPROC(__copy_to_user)
2191 +ENDPROC(___copy_to_user)
2192 ENDPROC(__copy_to_user_std)
2193
2194 .pushsection .fixup,"ax"
2195 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
2196 index 0dc5385..45833ef 100644
2197 --- a/arch/arm/lib/delay.c
2198 +++ b/arch/arm/lib/delay.c
2199 @@ -28,12 +28,14 @@
2200 /*
2201 * Default to the loop-based delay implementation.
2202 */
2203 -struct arm_delay_ops arm_delay_ops = {
2204 +static struct arm_delay_ops arm_loop_delay_ops = {
2205 .delay = __loop_delay,
2206 .const_udelay = __loop_const_udelay,
2207 .udelay = __loop_udelay,
2208 };
2209
2210 +struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
2211 +
2212 static const struct delay_timer *delay_timer;
2213 static bool delay_calibrated;
2214
2215 @@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
2216 __timer_const_udelay(usecs * UDELAY_MULT);
2217 }
2218
2219 +static struct arm_delay_ops arm_timer_delay_ops = {
2220 + .delay = __timer_delay,
2221 + .const_udelay = __timer_const_udelay,
2222 + .udelay = __timer_udelay,
2223 +};
2224 +
2225 void __init register_current_timer_delay(const struct delay_timer *timer)
2226 {
2227 if (!delay_calibrated) {
2228 @@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
2229 delay_timer = timer;
2230 lpj_fine = timer->freq / HZ;
2231 loops_per_jiffy = lpj_fine;
2232 - arm_delay_ops.delay = __timer_delay;
2233 - arm_delay_ops.const_udelay = __timer_const_udelay;
2234 - arm_delay_ops.udelay = __timer_udelay;
2235 + arm_delay_ops = &arm_timer_delay_ops;
2236 delay_calibrated = true;
2237 } else {
2238 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
2239 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
2240 index 025f742..8432b08 100644
2241 --- a/arch/arm/lib/uaccess_with_memcpy.c
2242 +++ b/arch/arm/lib/uaccess_with_memcpy.c
2243 @@ -104,7 +104,7 @@ out:
2244 }
2245
2246 unsigned long
2247 -__copy_to_user(void __user *to, const void *from, unsigned long n)
2248 +___copy_to_user(void __user *to, const void *from, unsigned long n)
2249 {
2250 /*
2251 * This test is stubbed out of the main function above to keep
2252 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
2253 index 2c6c218..f491e87 100644
2254 --- a/arch/arm/mach-kirkwood/common.c
2255 +++ b/arch/arm/mach-kirkwood/common.c
2256 @@ -150,7 +150,7 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
2257 clk_gate_ops.disable(hw);
2258 }
2259
2260 -static struct clk_ops clk_gate_fn_ops;
2261 +static clk_ops_no_const clk_gate_fn_ops;
2262
2263 static struct clk __init *clk_register_gate_fn(struct device *dev,
2264 const char *name,
2265 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
2266 index d95f727..12f10dd 100644
2267 --- a/arch/arm/mach-omap2/board-n8x0.c
2268 +++ b/arch/arm/mach-omap2/board-n8x0.c
2269 @@ -589,7 +589,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
2270 }
2271 #endif
2272
2273 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
2274 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
2275 .late_init = n8x0_menelaus_late_init,
2276 };
2277
2278 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
2279 index 87cc6d0..fd4f248 100644
2280 --- a/arch/arm/mach-omap2/omap_hwmod.c
2281 +++ b/arch/arm/mach-omap2/omap_hwmod.c
2282 @@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
2283 int (*is_hardreset_asserted)(struct omap_hwmod *oh,
2284 struct omap_hwmod_rst_info *ohri);
2285 int (*init_clkdm)(struct omap_hwmod *oh);
2286 -};
2287 +} __no_const;
2288
2289 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
2290 -static struct omap_hwmod_soc_ops soc_ops;
2291 +static struct omap_hwmod_soc_ops soc_ops __read_only;
2292
2293 /* omap_hwmod_list contains all registered struct omap_hwmods */
2294 static LIST_HEAD(omap_hwmod_list);
2295 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
2296 index 5dbf13f..2a853f8 100644
2297 --- a/arch/arm/mm/fault.c
2298 +++ b/arch/arm/mm/fault.c
2299 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 + if (fsr & FSR_LNX_PF) {
2305 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
2306 + do_group_exit(SIGKILL);
2307 + }
2308 +#endif
2309 +
2310 tsk->thread.address = addr;
2311 tsk->thread.error_code = fsr;
2312 tsk->thread.trap_no = 14;
2313 @@ -398,6 +405,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
2314 }
2315 #endif /* CONFIG_MMU */
2316
2317 +#ifdef CONFIG_PAX_PAGEEXEC
2318 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2319 +{
2320 + long i;
2321 +
2322 + printk(KERN_ERR "PAX: bytes at PC: ");
2323 + for (i = 0; i < 20; i++) {
2324 + unsigned char c;
2325 + if (get_user(c, (__force unsigned char __user *)pc+i))
2326 + printk(KERN_CONT "?? ");
2327 + else
2328 + printk(KERN_CONT "%02x ", c);
2329 + }
2330 + printk("\n");
2331 +
2332 + printk(KERN_ERR "PAX: bytes at SP-4: ");
2333 + for (i = -1; i < 20; i++) {
2334 + unsigned long c;
2335 + if (get_user(c, (__force unsigned long __user *)sp+i))
2336 + printk(KERN_CONT "???????? ");
2337 + else
2338 + printk(KERN_CONT "%08lx ", c);
2339 + }
2340 + printk("\n");
2341 +}
2342 +#endif
2343 +
2344 /*
2345 * First Level Translation Fault Handler
2346 *
2347 @@ -575,12 +609,34 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
2348 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
2349 struct siginfo info;
2350
2351 +#ifdef CONFIG_PAX_KERNEXEC
2352 + if (is_pxn_fault(ifsr) && !user_mode(regs)) {
2353 + printk(KERN_ALERT "PAX: Kernel attempted to execute userland memory at %08lx! ifsr=%08x\n", addr, ifsr);
2354 + goto die;
2355 + }
2356 +#endif
2357 +
2358 +#ifdef CONFIG_PAX_REFCOUNT
2359 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
2360 + unsigned int bkpt;
2361 +
2362 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
2363 + current->thread.error_code = ifsr;
2364 + current->thread.trap_no = 0;
2365 + pax_report_refcount_overflow(regs);
2366 + fixup_exception(regs);
2367 + return;
2368 + }
2369 + }
2370 +#endif
2371 +
2372 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
2373 return;
2374
2375 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
2376 inf->name, ifsr, addr);
2377
2378 +die:
2379 info.si_signo = inf->sig;
2380 info.si_errno = 0;
2381 info.si_code = inf->code;
2382 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
2383 index cf08bdf..5457296 100644
2384 --- a/arch/arm/mm/fault.h
2385 +++ b/arch/arm/mm/fault.h
2386 @@ -3,6 +3,7 @@
2387
2388 /*
2389 * Fault status register encodings. We steal bit 31 for our own purposes.
2390 + * Set when the FSR value is from an instruction fault.
2391 */
2392 #define FSR_LNX_PF (1 << 31)
2393 #define FSR_WRITE (1 << 11)
2394 @@ -22,6 +23,12 @@ static inline int fsr_fs(unsigned int fsr)
2395 }
2396 #endif
2397
2398 +/* valid for LPAE and !LPAE */
2399 +static inline int is_pxn_fault(unsigned int fsr)
2400 +{
2401 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
2402 +}
2403 +
2404 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
2405 unsigned long search_exception_table(unsigned long addr);
2406
2407 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
2408 index ad722f1..46b670e 100644
2409 --- a/arch/arm/mm/init.c
2410 +++ b/arch/arm/mm/init.c
2411 @@ -734,9 +734,43 @@ void __init mem_init(void)
2412
2413 void free_initmem(void)
2414 {
2415 +
2416 +#ifdef CONFIG_PAX_KERNEXEC
2417 + unsigned long addr;
2418 + pgd_t *pgd;
2419 + pud_t *pud;
2420 + pmd_t *pmd;
2421 +#endif
2422 +
2423 #ifdef CONFIG_HAVE_TCM
2424 extern char __tcm_start, __tcm_end;
2425 +#endif
2426
2427 +#ifdef CONFIG_PAX_KERNEXEC
2428 + /* make pages tables, etc before .text NX */
2429 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += PMD_SIZE) {
2430 + pgd = pgd_offset_k(addr);
2431 + pud = pud_offset(pgd, addr);
2432 + pmd = pmd_offset(pud, addr);
2433 + __pmd_update(pmd, PMD_SECT_XN);
2434 + }
2435 + /* make init NX */
2436 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += PMD_SIZE) {
2437 + pgd = pgd_offset_k(addr);
2438 + pud = pud_offset(pgd, addr);
2439 + pmd = pmd_offset(pud, addr);
2440 + __pmd_update(pmd, PMD_SECT_XN);
2441 + }
2442 + /* make kernel code/rodata read-only */
2443 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += PMD_SIZE) {
2444 + pgd = pgd_offset_k(addr);
2445 + pud = pud_offset(pgd, addr);
2446 + pmd = pmd_offset(pud, addr);
2447 + __pmd_update(pmd, PMD_SECT_AP_RDONLY);
2448 + }
2449 +#endif
2450 +
2451 +#ifdef CONFIG_HAVE_TCM
2452 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
2453 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
2454 __phys_to_pfn(__pa(&__tcm_end)),
2455 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
2456 index ce8cb19..3ec539d 100644
2457 --- a/arch/arm/mm/mmap.c
2458 +++ b/arch/arm/mm/mmap.c
2459 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2460 if (len > TASK_SIZE)
2461 return -ENOMEM;
2462
2463 +#ifdef CONFIG_PAX_RANDMMAP
2464 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2465 +#endif
2466 +
2467 if (addr) {
2468 if (do_align)
2469 addr = COLOUR_ALIGN(addr, pgoff);
2470 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2471 addr = PAGE_ALIGN(addr);
2472
2473 vma = find_vma(mm, addr);
2474 - if (TASK_SIZE - len >= addr &&
2475 - (!vma || addr + len <= vma->vm_start))
2476 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2477 return addr;
2478 }
2479 if (len > mm->cached_hole_size) {
2480 - start_addr = addr = mm->free_area_cache;
2481 + start_addr = addr = mm->free_area_cache;
2482 } else {
2483 - start_addr = addr = mm->mmap_base;
2484 - mm->cached_hole_size = 0;
2485 + start_addr = addr = mm->mmap_base;
2486 + mm->cached_hole_size = 0;
2487 }
2488
2489 full_search:
2490 @@ -124,14 +127,14 @@ full_search:
2491 * Start a new search - just in case we missed
2492 * some holes.
2493 */
2494 - if (start_addr != TASK_UNMAPPED_BASE) {
2495 - start_addr = addr = TASK_UNMAPPED_BASE;
2496 + if (start_addr != mm->mmap_base) {
2497 + start_addr = addr = mm->mmap_base;
2498 mm->cached_hole_size = 0;
2499 goto full_search;
2500 }
2501 return -ENOMEM;
2502 }
2503 - if (!vma || addr + len <= vma->vm_start) {
2504 + if (check_heap_stack_gap(vma, addr, len)) {
2505 /*
2506 * Remember the place where we stopped the search:
2507 */
2508 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2509
2510 if (mmap_is_legacy()) {
2511 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2512 +
2513 +#ifdef CONFIG_PAX_RANDMMAP
2514 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2515 + mm->mmap_base += mm->delta_mmap;
2516 +#endif
2517 +
2518 mm->get_unmapped_area = arch_get_unmapped_area;
2519 mm->unmap_area = arch_unmap_area;
2520 } else {
2521 mm->mmap_base = mmap_base(random_factor);
2522 +
2523 +#ifdef CONFIG_PAX_RANDMMAP
2524 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2525 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2526 +#endif
2527 +
2528 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2529 mm->unmap_area = arch_unmap_area_topdown;
2530 }
2531 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
2532 index 941dfb9..af59618 100644
2533 --- a/arch/arm/mm/mmu.c
2534 +++ b/arch/arm/mm/mmu.c
2535 @@ -227,16 +227,16 @@ static struct mem_type mem_types[] = {
2536 [MT_UNCACHED] = {
2537 .prot_pte = PROT_PTE_DEVICE,
2538 .prot_l1 = PMD_TYPE_TABLE,
2539 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2540 + .prot_sect = PROT_SECT_DEVICE | PMD_SECT_XN,
2541 .domain = DOMAIN_IO,
2542 },
2543 [MT_CACHECLEAN] = {
2544 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2545 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2546 .domain = DOMAIN_KERNEL,
2547 },
2548 #ifndef CONFIG_ARM_LPAE
2549 [MT_MINICLEAN] = {
2550 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
2551 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE | PMD_SECT_AP_RDONLY,
2552 .domain = DOMAIN_KERNEL,
2553 },
2554 #endif
2555 @@ -258,8 +258,26 @@ static struct mem_type mem_types[] = {
2556 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
2557 .domain = DOMAIN_KERNEL,
2558 },
2559 + [MT_MEMORY_R] = {
2560 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY | L_PTE_XN,
2561 + .prot_l1 = PMD_TYPE_TABLE,
2562 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY | PMD_SECT_XN,
2563 + .domain = DOMAIN_KERNEL,
2564 + },
2565 + [MT_MEMORY_RW] = {
2566 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN,
2567 + .prot_l1 = PMD_TYPE_TABLE,
2568 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
2569 + .domain = DOMAIN_KERNEL,
2570 + },
2571 + [MT_MEMORY_RX] = {
2572 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY,
2573 + .prot_l1 = PMD_TYPE_TABLE,
2574 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2575 + .domain = DOMAIN_KERNEL,
2576 + },
2577 [MT_ROM] = {
2578 - .prot_sect = PMD_TYPE_SECT,
2579 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2580 .domain = DOMAIN_KERNEL,
2581 },
2582 [MT_MEMORY_NONCACHED] = {
2583 @@ -273,7 +291,7 @@ static struct mem_type mem_types[] = {
2584 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
2585 L_PTE_XN,
2586 .prot_l1 = PMD_TYPE_TABLE,
2587 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2588 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2589 .domain = DOMAIN_KERNEL,
2590 },
2591 [MT_MEMORY_ITCM] = {
2592 @@ -432,6 +450,8 @@ static void __init build_mem_type_table(void)
2593 * from SVC mode and no access from userspace.
2594 */
2595 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2596 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2597 + mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2598 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2599 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2600 #endif
2601 @@ -450,6 +470,12 @@ static void __init build_mem_type_table(void)
2602 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
2603 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
2604 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
2605 + mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
2606 + mem_types[MT_MEMORY_R].prot_pte |= L_PTE_SHARED;
2607 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
2608 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
2609 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
2610 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
2611 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
2612 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
2613 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
2614 @@ -503,6 +529,12 @@ static void __init build_mem_type_table(void)
2615 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
2616 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
2617 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
2618 + mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
2619 + mem_types[MT_MEMORY_R].prot_pte |= kern_pgprot;
2620 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
2621 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
2622 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
2623 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
2624 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
2625 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
2626 mem_types[MT_ROM].prot_sect |= cp->pmd;
2627 @@ -1198,8 +1230,37 @@ static void __init map_lowmem(void)
2628 map.pfn = __phys_to_pfn(start);
2629 map.virtual = __phys_to_virt(start);
2630 map.length = end - start;
2631 - map.type = MT_MEMORY;
2632
2633 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
2634 + struct map_desc kernel;
2635 + struct map_desc initmap;
2636 +
2637 + /* when freeing initmem we will make this RW */
2638 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
2639 + initmap.virtual = (unsigned long)__init_begin;
2640 + initmap.length = _sdata - __init_begin;
2641 + initmap.type = MT_MEMORY;
2642 + create_mapping(&initmap);
2643 +
2644 + /* when freeing initmem we will make this RX */
2645 + kernel.pfn = __phys_to_pfn(__pa(_stext));
2646 + kernel.virtual = (unsigned long)_stext;
2647 + kernel.length = __init_begin - _stext;
2648 + kernel.type = MT_MEMORY;
2649 + create_mapping(&kernel);
2650 +
2651 + if (map.virtual < (unsigned long)_stext) {
2652 + map.length = (unsigned long)_stext - map.virtual;
2653 + map.type = MT_MEMORY;
2654 + create_mapping(&map);
2655 + }
2656 +
2657 + map.pfn = __phys_to_pfn(__pa(_sdata));
2658 + map.virtual = (unsigned long)_sdata;
2659 + map.length = end - __pa(_sdata);
2660 + }
2661 +
2662 + map.type = MT_MEMORY_RW;
2663 create_mapping(&map);
2664 }
2665 }
2666 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2667 index ec63e4a..62aa5f1d 100644
2668 --- a/arch/arm/plat-orion/include/plat/addr-map.h
2669 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
2670 @@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2671 value in bridge_virt_base */
2672 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2673 const int win);
2674 -};
2675 +} __no_const;
2676
2677 /*
2678 * Information needed to setup one address mapping.
2679 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2680 index f5144cd..71f6d1f 100644
2681 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2682 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2683 @@ -47,7 +47,7 @@ struct samsung_dma_ops {
2684 int (*started)(unsigned ch);
2685 int (*flush)(unsigned ch);
2686 int (*stop)(unsigned ch);
2687 -};
2688 +} __no_const;
2689
2690 extern void *samsung_dmadev_get_ops(void);
2691 extern void *s3c_dma_get_ops(void);
2692 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2693 index c3a58a1..78fbf54 100644
2694 --- a/arch/avr32/include/asm/cache.h
2695 +++ b/arch/avr32/include/asm/cache.h
2696 @@ -1,8 +1,10 @@
2697 #ifndef __ASM_AVR32_CACHE_H
2698 #define __ASM_AVR32_CACHE_H
2699
2700 +#include <linux/const.h>
2701 +
2702 #define L1_CACHE_SHIFT 5
2703 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2704 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2705
2706 /*
2707 * Memory returned by kmalloc() may be used for DMA, so we must make
2708 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2709 index e2c3287..6c4f98c 100644
2710 --- a/arch/avr32/include/asm/elf.h
2711 +++ b/arch/avr32/include/asm/elf.h
2712 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2713 the loader. We need to make sure that it is out of the way of the program
2714 that it will "exec", and that there is sufficient room for the brk. */
2715
2716 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2717 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2718
2719 +#ifdef CONFIG_PAX_ASLR
2720 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2721 +
2722 +#define PAX_DELTA_MMAP_LEN 15
2723 +#define PAX_DELTA_STACK_LEN 15
2724 +#endif
2725
2726 /* This yields a mask that user programs can use to figure out what
2727 instruction set this CPU supports. This could be done in user space,
2728 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2729 index 479330b..53717a8 100644
2730 --- a/arch/avr32/include/asm/kmap_types.h
2731 +++ b/arch/avr32/include/asm/kmap_types.h
2732 @@ -2,9 +2,9 @@
2733 #define __ASM_AVR32_KMAP_TYPES_H
2734
2735 #ifdef CONFIG_DEBUG_HIGHMEM
2736 -# define KM_TYPE_NR 29
2737 +# define KM_TYPE_NR 30
2738 #else
2739 -# define KM_TYPE_NR 14
2740 +# define KM_TYPE_NR 15
2741 #endif
2742
2743 #endif /* __ASM_AVR32_KMAP_TYPES_H */
2744 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2745 index b2f2d2d..d1c85cb 100644
2746 --- a/arch/avr32/mm/fault.c
2747 +++ b/arch/avr32/mm/fault.c
2748 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2749
2750 int exception_trace = 1;
2751
2752 +#ifdef CONFIG_PAX_PAGEEXEC
2753 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2754 +{
2755 + unsigned long i;
2756 +
2757 + printk(KERN_ERR "PAX: bytes at PC: ");
2758 + for (i = 0; i < 20; i++) {
2759 + unsigned char c;
2760 + if (get_user(c, (unsigned char *)pc+i))
2761 + printk(KERN_CONT "???????? ");
2762 + else
2763 + printk(KERN_CONT "%02x ", c);
2764 + }
2765 + printk("\n");
2766 +}
2767 +#endif
2768 +
2769 /*
2770 * This routine handles page faults. It determines the address and the
2771 * problem, and then passes it off to one of the appropriate routines.
2772 @@ -174,6 +191,16 @@ bad_area:
2773 up_read(&mm->mmap_sem);
2774
2775 if (user_mode(regs)) {
2776 +
2777 +#ifdef CONFIG_PAX_PAGEEXEC
2778 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2779 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2780 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2781 + do_group_exit(SIGKILL);
2782 + }
2783 + }
2784 +#endif
2785 +
2786 if (exception_trace && printk_ratelimit())
2787 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2788 "sp %08lx ecr %lu\n",
2789 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2790 index 568885a..f8008df 100644
2791 --- a/arch/blackfin/include/asm/cache.h
2792 +++ b/arch/blackfin/include/asm/cache.h
2793 @@ -7,6 +7,7 @@
2794 #ifndef __ARCH_BLACKFIN_CACHE_H
2795 #define __ARCH_BLACKFIN_CACHE_H
2796
2797 +#include <linux/const.h>
2798 #include <linux/linkage.h> /* for asmlinkage */
2799
2800 /*
2801 @@ -14,7 +15,7 @@
2802 * Blackfin loads 32 bytes for cache
2803 */
2804 #define L1_CACHE_SHIFT 5
2805 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2806 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2807 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2808
2809 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2810 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2811 index aea2718..3639a60 100644
2812 --- a/arch/cris/include/arch-v10/arch/cache.h
2813 +++ b/arch/cris/include/arch-v10/arch/cache.h
2814 @@ -1,8 +1,9 @@
2815 #ifndef _ASM_ARCH_CACHE_H
2816 #define _ASM_ARCH_CACHE_H
2817
2818 +#include <linux/const.h>
2819 /* Etrax 100LX have 32-byte cache-lines. */
2820 -#define L1_CACHE_BYTES 32
2821 #define L1_CACHE_SHIFT 5
2822 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2823
2824 #endif /* _ASM_ARCH_CACHE_H */
2825 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2826 index 7caf25d..ee65ac5 100644
2827 --- a/arch/cris/include/arch-v32/arch/cache.h
2828 +++ b/arch/cris/include/arch-v32/arch/cache.h
2829 @@ -1,11 +1,12 @@
2830 #ifndef _ASM_CRIS_ARCH_CACHE_H
2831 #define _ASM_CRIS_ARCH_CACHE_H
2832
2833 +#include <linux/const.h>
2834 #include <arch/hwregs/dma.h>
2835
2836 /* A cache-line is 32 bytes. */
2837 -#define L1_CACHE_BYTES 32
2838 #define L1_CACHE_SHIFT 5
2839 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2840
2841 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2842
2843 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2844 index b86329d..6709906 100644
2845 --- a/arch/frv/include/asm/atomic.h
2846 +++ b/arch/frv/include/asm/atomic.h
2847 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2848 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2849 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2850
2851 +#define atomic64_read_unchecked(v) atomic64_read(v)
2852 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2853 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2854 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2855 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2856 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2857 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2858 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2859 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2860 +
2861 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2862 {
2863 int c, old;
2864 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2865 index 2797163..c2a401d 100644
2866 --- a/arch/frv/include/asm/cache.h
2867 +++ b/arch/frv/include/asm/cache.h
2868 @@ -12,10 +12,11 @@
2869 #ifndef __ASM_CACHE_H
2870 #define __ASM_CACHE_H
2871
2872 +#include <linux/const.h>
2873
2874 /* bytes per L1 cache line */
2875 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2876 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2877 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2878
2879 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2880 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2881 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2882 index 43901f2..0d8b865 100644
2883 --- a/arch/frv/include/asm/kmap_types.h
2884 +++ b/arch/frv/include/asm/kmap_types.h
2885 @@ -2,6 +2,6 @@
2886 #ifndef _ASM_KMAP_TYPES_H
2887 #define _ASM_KMAP_TYPES_H
2888
2889 -#define KM_TYPE_NR 17
2890 +#define KM_TYPE_NR 18
2891
2892 #endif
2893 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2894 index 385fd30..6c3d97e 100644
2895 --- a/arch/frv/mm/elf-fdpic.c
2896 +++ b/arch/frv/mm/elf-fdpic.c
2897 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2898 if (addr) {
2899 addr = PAGE_ALIGN(addr);
2900 vma = find_vma(current->mm, addr);
2901 - if (TASK_SIZE - len >= addr &&
2902 - (!vma || addr + len <= vma->vm_start))
2903 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2904 goto success;
2905 }
2906
2907 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2908 for (; vma; vma = vma->vm_next) {
2909 if (addr > limit)
2910 break;
2911 - if (addr + len <= vma->vm_start)
2912 + if (check_heap_stack_gap(vma, addr, len))
2913 goto success;
2914 addr = vma->vm_end;
2915 }
2916 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2917 for (; vma; vma = vma->vm_next) {
2918 if (addr > limit)
2919 break;
2920 - if (addr + len <= vma->vm_start)
2921 + if (check_heap_stack_gap(vma, addr, len))
2922 goto success;
2923 addr = vma->vm_end;
2924 }
2925 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2926 index f4ca594..adc72fd6 100644
2927 --- a/arch/hexagon/include/asm/cache.h
2928 +++ b/arch/hexagon/include/asm/cache.h
2929 @@ -21,9 +21,11 @@
2930 #ifndef __ASM_CACHE_H
2931 #define __ASM_CACHE_H
2932
2933 +#include <linux/const.h>
2934 +
2935 /* Bytes per L1 cache line */
2936 -#define L1_CACHE_SHIFT (5)
2937 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2938 +#define L1_CACHE_SHIFT 5
2939 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2940
2941 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2942 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2943 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2944 index 6e6fe18..a6ae668 100644
2945 --- a/arch/ia64/include/asm/atomic.h
2946 +++ b/arch/ia64/include/asm/atomic.h
2947 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2948 #define atomic64_inc(v) atomic64_add(1, (v))
2949 #define atomic64_dec(v) atomic64_sub(1, (v))
2950
2951 +#define atomic64_read_unchecked(v) atomic64_read(v)
2952 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2953 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2954 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2955 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2956 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2957 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2958 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2959 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2960 +
2961 /* Atomic operations are already serializing */
2962 #define smp_mb__before_atomic_dec() barrier()
2963 #define smp_mb__after_atomic_dec() barrier()
2964 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2965 index 988254a..e1ee885 100644
2966 --- a/arch/ia64/include/asm/cache.h
2967 +++ b/arch/ia64/include/asm/cache.h
2968 @@ -1,6 +1,7 @@
2969 #ifndef _ASM_IA64_CACHE_H
2970 #define _ASM_IA64_CACHE_H
2971
2972 +#include <linux/const.h>
2973
2974 /*
2975 * Copyright (C) 1998-2000 Hewlett-Packard Co
2976 @@ -9,7 +10,7 @@
2977
2978 /* Bytes per L1 (data) cache line. */
2979 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2980 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2981 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2982
2983 #ifdef CONFIG_SMP
2984 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2985 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2986 index b5298eb..67c6e62 100644
2987 --- a/arch/ia64/include/asm/elf.h
2988 +++ b/arch/ia64/include/asm/elf.h
2989 @@ -42,6 +42,13 @@
2990 */
2991 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2992
2993 +#ifdef CONFIG_PAX_ASLR
2994 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2995 +
2996 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2997 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2998 +#endif
2999 +
3000 #define PT_IA_64_UNWIND 0x70000001
3001
3002 /* IA-64 relocations: */
3003 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
3004 index 96a8d92..617a1cf 100644
3005 --- a/arch/ia64/include/asm/pgalloc.h
3006 +++ b/arch/ia64/include/asm/pgalloc.h
3007 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3008 pgd_val(*pgd_entry) = __pa(pud);
3009 }
3010
3011 +static inline void
3012 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3013 +{
3014 + pgd_populate(mm, pgd_entry, pud);
3015 +}
3016 +
3017 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3018 {
3019 return quicklist_alloc(0, GFP_KERNEL, NULL);
3020 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3021 pud_val(*pud_entry) = __pa(pmd);
3022 }
3023
3024 +static inline void
3025 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3026 +{
3027 + pud_populate(mm, pud_entry, pmd);
3028 +}
3029 +
3030 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
3031 {
3032 return quicklist_alloc(0, GFP_KERNEL, NULL);
3033 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
3034 index 815810c..d60bd4c 100644
3035 --- a/arch/ia64/include/asm/pgtable.h
3036 +++ b/arch/ia64/include/asm/pgtable.h
3037 @@ -12,7 +12,7 @@
3038 * David Mosberger-Tang <davidm@hpl.hp.com>
3039 */
3040
3041 -
3042 +#include <linux/const.h>
3043 #include <asm/mman.h>
3044 #include <asm/page.h>
3045 #include <asm/processor.h>
3046 @@ -142,6 +142,17 @@
3047 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3048 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3049 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
3050 +
3051 +#ifdef CONFIG_PAX_PAGEEXEC
3052 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
3053 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3054 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3055 +#else
3056 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3057 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3058 +# define PAGE_COPY_NOEXEC PAGE_COPY
3059 +#endif
3060 +
3061 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
3062 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
3063 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
3064 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
3065 index 54ff557..70c88b7 100644
3066 --- a/arch/ia64/include/asm/spinlock.h
3067 +++ b/arch/ia64/include/asm/spinlock.h
3068 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
3069 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
3070
3071 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
3072 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
3073 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
3074 }
3075
3076 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
3077 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
3078 index 449c8c0..50cdf87 100644
3079 --- a/arch/ia64/include/asm/uaccess.h
3080 +++ b/arch/ia64/include/asm/uaccess.h
3081 @@ -42,6 +42,8 @@
3082 #include <asm/pgtable.h>
3083 #include <asm/io.h>
3084
3085 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3086 +
3087 /*
3088 * For historical reasons, the following macros are grossly misnamed:
3089 */
3090 @@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
3091 static inline unsigned long
3092 __copy_to_user (void __user *to, const void *from, unsigned long count)
3093 {
3094 + if (count > INT_MAX)
3095 + return count;
3096 +
3097 + if (!__builtin_constant_p(count))
3098 + check_object_size(from, count, true);
3099 +
3100 return __copy_user(to, (__force void __user *) from, count);
3101 }
3102
3103 static inline unsigned long
3104 __copy_from_user (void *to, const void __user *from, unsigned long count)
3105 {
3106 + if (count > INT_MAX)
3107 + return count;
3108 +
3109 + if (!__builtin_constant_p(count))
3110 + check_object_size(to, count, false);
3111 +
3112 return __copy_user((__force void __user *) to, from, count);
3113 }
3114
3115 @@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3116 ({ \
3117 void __user *__cu_to = (to); \
3118 const void *__cu_from = (from); \
3119 - long __cu_len = (n); \
3120 + unsigned long __cu_len = (n); \
3121 \
3122 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
3123 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
3124 + if (!__builtin_constant_p(n)) \
3125 + check_object_size(__cu_from, __cu_len, true); \
3126 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
3127 + } \
3128 __cu_len; \
3129 })
3130
3131 @@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3132 ({ \
3133 void *__cu_to = (to); \
3134 const void __user *__cu_from = (from); \
3135 - long __cu_len = (n); \
3136 + unsigned long __cu_len = (n); \
3137 \
3138 __chk_user_ptr(__cu_from); \
3139 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
3140 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
3141 + if (!__builtin_constant_p(n)) \
3142 + check_object_size(__cu_to, __cu_len, false); \
3143 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
3144 + } \
3145 __cu_len; \
3146 })
3147
3148 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
3149 index 24603be..948052d 100644
3150 --- a/arch/ia64/kernel/module.c
3151 +++ b/arch/ia64/kernel/module.c
3152 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
3153 void
3154 module_free (struct module *mod, void *module_region)
3155 {
3156 - if (mod && mod->arch.init_unw_table &&
3157 - module_region == mod->module_init) {
3158 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
3159 unw_remove_unwind_table(mod->arch.init_unw_table);
3160 mod->arch.init_unw_table = NULL;
3161 }
3162 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
3163 }
3164
3165 static inline int
3166 +in_init_rx (const struct module *mod, uint64_t addr)
3167 +{
3168 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
3169 +}
3170 +
3171 +static inline int
3172 +in_init_rw (const struct module *mod, uint64_t addr)
3173 +{
3174 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
3175 +}
3176 +
3177 +static inline int
3178 in_init (const struct module *mod, uint64_t addr)
3179 {
3180 - return addr - (uint64_t) mod->module_init < mod->init_size;
3181 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
3182 +}
3183 +
3184 +static inline int
3185 +in_core_rx (const struct module *mod, uint64_t addr)
3186 +{
3187 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
3188 +}
3189 +
3190 +static inline int
3191 +in_core_rw (const struct module *mod, uint64_t addr)
3192 +{
3193 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
3194 }
3195
3196 static inline int
3197 in_core (const struct module *mod, uint64_t addr)
3198 {
3199 - return addr - (uint64_t) mod->module_core < mod->core_size;
3200 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
3201 }
3202
3203 static inline int
3204 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
3205 break;
3206
3207 case RV_BDREL:
3208 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
3209 + if (in_init_rx(mod, val))
3210 + val -= (uint64_t) mod->module_init_rx;
3211 + else if (in_init_rw(mod, val))
3212 + val -= (uint64_t) mod->module_init_rw;
3213 + else if (in_core_rx(mod, val))
3214 + val -= (uint64_t) mod->module_core_rx;
3215 + else if (in_core_rw(mod, val))
3216 + val -= (uint64_t) mod->module_core_rw;
3217 break;
3218
3219 case RV_LTV:
3220 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
3221 * addresses have been selected...
3222 */
3223 uint64_t gp;
3224 - if (mod->core_size > MAX_LTOFF)
3225 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
3226 /*
3227 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
3228 * at the end of the module.
3229 */
3230 - gp = mod->core_size - MAX_LTOFF / 2;
3231 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
3232 else
3233 - gp = mod->core_size / 2;
3234 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
3235 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
3236 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
3237 mod->arch.gp = gp;
3238 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
3239 }
3240 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
3241 index d9439ef..b9a4303 100644
3242 --- a/arch/ia64/kernel/sys_ia64.c
3243 +++ b/arch/ia64/kernel/sys_ia64.c
3244 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3245 if (REGION_NUMBER(addr) == RGN_HPAGE)
3246 addr = 0;
3247 #endif
3248 +
3249 +#ifdef CONFIG_PAX_RANDMMAP
3250 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3251 + addr = mm->free_area_cache;
3252 + else
3253 +#endif
3254 +
3255 if (!addr)
3256 addr = mm->free_area_cache;
3257
3258 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3259 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
3260 /* At this point: (!vma || addr < vma->vm_end). */
3261 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
3262 - if (start_addr != TASK_UNMAPPED_BASE) {
3263 + if (start_addr != mm->mmap_base) {
3264 /* Start a new search --- just in case we missed some holes. */
3265 - addr = TASK_UNMAPPED_BASE;
3266 + addr = mm->mmap_base;
3267 goto full_search;
3268 }
3269 return -ENOMEM;
3270 }
3271 - if (!vma || addr + len <= vma->vm_start) {
3272 + if (check_heap_stack_gap(vma, addr, len)) {
3273 /* Remember the address where we stopped this search: */
3274 mm->free_area_cache = addr + len;
3275 return addr;
3276 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
3277 index 0ccb28f..8992469 100644
3278 --- a/arch/ia64/kernel/vmlinux.lds.S
3279 +++ b/arch/ia64/kernel/vmlinux.lds.S
3280 @@ -198,7 +198,7 @@ SECTIONS {
3281 /* Per-cpu data: */
3282 . = ALIGN(PERCPU_PAGE_SIZE);
3283 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
3284 - __phys_per_cpu_start = __per_cpu_load;
3285 + __phys_per_cpu_start = per_cpu_load;
3286 /*
3287 * ensure percpu data fits
3288 * into percpu page size
3289 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
3290 index 6cf0341..d352594 100644
3291 --- a/arch/ia64/mm/fault.c
3292 +++ b/arch/ia64/mm/fault.c
3293 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
3294 return pte_present(pte);
3295 }
3296
3297 +#ifdef CONFIG_PAX_PAGEEXEC
3298 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3299 +{
3300 + unsigned long i;
3301 +
3302 + printk(KERN_ERR "PAX: bytes at PC: ");
3303 + for (i = 0; i < 8; i++) {
3304 + unsigned int c;
3305 + if (get_user(c, (unsigned int *)pc+i))
3306 + printk(KERN_CONT "???????? ");
3307 + else
3308 + printk(KERN_CONT "%08x ", c);
3309 + }
3310 + printk("\n");
3311 +}
3312 +#endif
3313 +
3314 # define VM_READ_BIT 0
3315 # define VM_WRITE_BIT 1
3316 # define VM_EXEC_BIT 2
3317 @@ -149,8 +166,21 @@ retry:
3318 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
3319 goto bad_area;
3320
3321 - if ((vma->vm_flags & mask) != mask)
3322 + if ((vma->vm_flags & mask) != mask) {
3323 +
3324 +#ifdef CONFIG_PAX_PAGEEXEC
3325 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
3326 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
3327 + goto bad_area;
3328 +
3329 + up_read(&mm->mmap_sem);
3330 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
3331 + do_group_exit(SIGKILL);
3332 + }
3333 +#endif
3334 +
3335 goto bad_area;
3336 + }
3337
3338 /*
3339 * If for any reason at all we couldn't handle the fault, make
3340 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
3341 index 5ca674b..e0e1b70 100644
3342 --- a/arch/ia64/mm/hugetlbpage.c
3343 +++ b/arch/ia64/mm/hugetlbpage.c
3344 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3345 /* At this point: (!vmm || addr < vmm->vm_end). */
3346 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
3347 return -ENOMEM;
3348 - if (!vmm || (addr + len) <= vmm->vm_start)
3349 + if (check_heap_stack_gap(vmm, addr, len))
3350 return addr;
3351 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
3352 }
3353 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
3354 index 082e383..fb7be80 100644
3355 --- a/arch/ia64/mm/init.c
3356 +++ b/arch/ia64/mm/init.c
3357 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
3358 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
3359 vma->vm_end = vma->vm_start + PAGE_SIZE;
3360 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
3361 +
3362 +#ifdef CONFIG_PAX_PAGEEXEC
3363 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
3364 + vma->vm_flags &= ~VM_EXEC;
3365 +
3366 +#ifdef CONFIG_PAX_MPROTECT
3367 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
3368 + vma->vm_flags &= ~VM_MAYEXEC;
3369 +#endif
3370 +
3371 + }
3372 +#endif
3373 +
3374 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3375 down_write(&current->mm->mmap_sem);
3376 if (insert_vm_struct(current->mm, vma)) {
3377 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
3378 index 40b3ee9..8c2c112 100644
3379 --- a/arch/m32r/include/asm/cache.h
3380 +++ b/arch/m32r/include/asm/cache.h
3381 @@ -1,8 +1,10 @@
3382 #ifndef _ASM_M32R_CACHE_H
3383 #define _ASM_M32R_CACHE_H
3384
3385 +#include <linux/const.h>
3386 +
3387 /* L1 cache line size */
3388 #define L1_CACHE_SHIFT 4
3389 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3390 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3391
3392 #endif /* _ASM_M32R_CACHE_H */
3393 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
3394 index 82abd15..d95ae5d 100644
3395 --- a/arch/m32r/lib/usercopy.c
3396 +++ b/arch/m32r/lib/usercopy.c
3397 @@ -14,6 +14,9 @@
3398 unsigned long
3399 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3400 {
3401 + if ((long)n < 0)
3402 + return n;
3403 +
3404 prefetch(from);
3405 if (access_ok(VERIFY_WRITE, to, n))
3406 __copy_user(to,from,n);
3407 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3408 unsigned long
3409 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
3410 {
3411 + if ((long)n < 0)
3412 + return n;
3413 +
3414 prefetchw(to);
3415 if (access_ok(VERIFY_READ, from, n))
3416 __copy_user_zeroing(to,from,n);
3417 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
3418 index 0395c51..5f26031 100644
3419 --- a/arch/m68k/include/asm/cache.h
3420 +++ b/arch/m68k/include/asm/cache.h
3421 @@ -4,9 +4,11 @@
3422 #ifndef __ARCH_M68K_CACHE_H
3423 #define __ARCH_M68K_CACHE_H
3424
3425 +#include <linux/const.h>
3426 +
3427 /* bytes per L1 cache line */
3428 #define L1_CACHE_SHIFT 4
3429 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
3430 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3431
3432 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
3433
3434 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
3435 index 4efe96a..60e8699 100644
3436 --- a/arch/microblaze/include/asm/cache.h
3437 +++ b/arch/microblaze/include/asm/cache.h
3438 @@ -13,11 +13,12 @@
3439 #ifndef _ASM_MICROBLAZE_CACHE_H
3440 #define _ASM_MICROBLAZE_CACHE_H
3441
3442 +#include <linux/const.h>
3443 #include <asm/registers.h>
3444
3445 #define L1_CACHE_SHIFT 5
3446 /* word-granular cache in microblaze */
3447 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3448 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3449
3450 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3451
3452 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
3453 index 01cc6ba..bcb7a5d 100644
3454 --- a/arch/mips/include/asm/atomic.h
3455 +++ b/arch/mips/include/asm/atomic.h
3456 @@ -21,6 +21,10 @@
3457 #include <asm/cmpxchg.h>
3458 #include <asm/war.h>
3459
3460 +#ifdef CONFIG_GENERIC_ATOMIC64
3461 +#include <asm-generic/atomic64.h>
3462 +#endif
3463 +
3464 #define ATOMIC_INIT(i) { (i) }
3465
3466 /*
3467 @@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3468 */
3469 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
3470
3471 +#define atomic64_read_unchecked(v) atomic64_read(v)
3472 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3473 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3474 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3475 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3476 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3477 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3478 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3479 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3480 +
3481 #endif /* CONFIG_64BIT */
3482
3483 /*
3484 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
3485 index b4db69f..8f3b093 100644
3486 --- a/arch/mips/include/asm/cache.h
3487 +++ b/arch/mips/include/asm/cache.h
3488 @@ -9,10 +9,11 @@
3489 #ifndef _ASM_CACHE_H
3490 #define _ASM_CACHE_H
3491
3492 +#include <linux/const.h>
3493 #include <kmalloc.h>
3494
3495 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
3496 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3497 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3498
3499 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3500 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3501 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
3502 index 455c0ac..ad65fbe 100644
3503 --- a/arch/mips/include/asm/elf.h
3504 +++ b/arch/mips/include/asm/elf.h
3505 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
3506 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
3507 #endif
3508
3509 +#ifdef CONFIG_PAX_ASLR
3510 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3511 +
3512 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3513 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3514 +#endif
3515 +
3516 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3517 struct linux_binprm;
3518 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3519 int uses_interp);
3520
3521 -struct mm_struct;
3522 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3523 -#define arch_randomize_brk arch_randomize_brk
3524 -
3525 #endif /* _ASM_ELF_H */
3526 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
3527 index c1f6afa..38cc6e9 100644
3528 --- a/arch/mips/include/asm/exec.h
3529 +++ b/arch/mips/include/asm/exec.h
3530 @@ -12,6 +12,6 @@
3531 #ifndef _ASM_EXEC_H
3532 #define _ASM_EXEC_H
3533
3534 -extern unsigned long arch_align_stack(unsigned long sp);
3535 +#define arch_align_stack(x) ((x) & ~0xfUL)
3536
3537 #endif /* _ASM_EXEC_H */
3538 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
3539 index da9bd7d..91aa7ab 100644
3540 --- a/arch/mips/include/asm/page.h
3541 +++ b/arch/mips/include/asm/page.h
3542 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
3543 #ifdef CONFIG_CPU_MIPS32
3544 typedef struct { unsigned long pte_low, pte_high; } pte_t;
3545 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
3546 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
3547 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
3548 #else
3549 typedef struct { unsigned long long pte; } pte_t;
3550 #define pte_val(x) ((x).pte)
3551 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
3552 index 881d18b..cea38bc 100644
3553 --- a/arch/mips/include/asm/pgalloc.h
3554 +++ b/arch/mips/include/asm/pgalloc.h
3555 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3556 {
3557 set_pud(pud, __pud((unsigned long)pmd));
3558 }
3559 +
3560 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3561 +{
3562 + pud_populate(mm, pud, pmd);
3563 +}
3564 #endif
3565
3566 /*
3567 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
3568 index 18806a5..141ffcf 100644
3569 --- a/arch/mips/include/asm/thread_info.h
3570 +++ b/arch/mips/include/asm/thread_info.h
3571 @@ -110,6 +110,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
3572 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
3573 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
3574 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
3575 +/* li takes a 32bit immediate */
3576 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
3577 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
3578
3579 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3580 @@ -125,15 +127,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
3581 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
3582 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
3583 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
3584 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3585 +
3586 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3587
3588 /* work to do in syscall_trace_leave() */
3589 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
3590 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3591
3592 /* work to do on interrupt/exception return */
3593 #define _TIF_WORK_MASK \
3594 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
3595 /* work to do on any return to u-space */
3596 -#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
3597 +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
3598
3599 #endif /* __KERNEL__ */
3600
3601 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
3602 index 9fdd8bc..4bd7f1a 100644
3603 --- a/arch/mips/kernel/binfmt_elfn32.c
3604 +++ b/arch/mips/kernel/binfmt_elfn32.c
3605 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3606 #undef ELF_ET_DYN_BASE
3607 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3608
3609 +#ifdef CONFIG_PAX_ASLR
3610 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3611 +
3612 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3613 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3614 +#endif
3615 +
3616 #include <asm/processor.h>
3617 #include <linux/module.h>
3618 #include <linux/elfcore.h>
3619 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
3620 index ff44823..97f8906 100644
3621 --- a/arch/mips/kernel/binfmt_elfo32.c
3622 +++ b/arch/mips/kernel/binfmt_elfo32.c
3623 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3624 #undef ELF_ET_DYN_BASE
3625 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3626
3627 +#ifdef CONFIG_PAX_ASLR
3628 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3629 +
3630 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3631 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3632 +#endif
3633 +
3634 #include <asm/processor.h>
3635
3636 /*
3637 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
3638 index e9a5fd7..378809a 100644
3639 --- a/arch/mips/kernel/process.c
3640 +++ b/arch/mips/kernel/process.c
3641 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
3642 out:
3643 return pc;
3644 }
3645 -
3646 -/*
3647 - * Don't forget that the stack pointer must be aligned on a 8 bytes
3648 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
3649 - */
3650 -unsigned long arch_align_stack(unsigned long sp)
3651 -{
3652 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3653 - sp -= get_random_int() & ~PAGE_MASK;
3654 -
3655 - return sp & ALMASK;
3656 -}
3657 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3658 index 4812c6d..2069554 100644
3659 --- a/arch/mips/kernel/ptrace.c
3660 +++ b/arch/mips/kernel/ptrace.c
3661 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
3662 return arch;
3663 }
3664
3665 +#ifdef CONFIG_GRKERNSEC_SETXID
3666 +extern void gr_delayed_cred_worker(void);
3667 +#endif
3668 +
3669 /*
3670 * Notification of system call entry/exit
3671 * - triggered by current->work.syscall_trace
3672 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3673 /* do the secure computing check first */
3674 secure_computing_strict(regs->regs[2]);
3675
3676 +#ifdef CONFIG_GRKERNSEC_SETXID
3677 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3678 + gr_delayed_cred_worker();
3679 +#endif
3680 +
3681 if (!(current->ptrace & PT_PTRACED))
3682 goto out;
3683
3684 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3685 index 374f66e..1c882a0 100644
3686 --- a/arch/mips/kernel/scall32-o32.S
3687 +++ b/arch/mips/kernel/scall32-o32.S
3688 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3689
3690 stack_done:
3691 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3692 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3693 + li t1, _TIF_SYSCALL_WORK
3694 and t0, t1
3695 bnez t0, syscall_trace_entry # -> yes
3696
3697 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3698 index 169de6a..f594a89 100644
3699 --- a/arch/mips/kernel/scall64-64.S
3700 +++ b/arch/mips/kernel/scall64-64.S
3701 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3702
3703 sd a3, PT_R26(sp) # save a3 for syscall restarting
3704
3705 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3706 + li t1, _TIF_SYSCALL_WORK
3707 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3708 and t0, t1, t0
3709 bnez t0, syscall_trace_entry
3710 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3711 index 86ec03f..1235baf 100644
3712 --- a/arch/mips/kernel/scall64-n32.S
3713 +++ b/arch/mips/kernel/scall64-n32.S
3714 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3715
3716 sd a3, PT_R26(sp) # save a3 for syscall restarting
3717
3718 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3719 + li t1, _TIF_SYSCALL_WORK
3720 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3721 and t0, t1, t0
3722 bnez t0, n32_syscall_trace_entry
3723 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3724 index 53c2d72..3734584 100644
3725 --- a/arch/mips/kernel/scall64-o32.S
3726 +++ b/arch/mips/kernel/scall64-o32.S
3727 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3728 PTR 4b, bad_stack
3729 .previous
3730
3731 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3732 + li t1, _TIF_SYSCALL_WORK
3733 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3734 and t0, t1, t0
3735 bnez t0, trace_a_syscall
3736 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3737 index ddcec1e..c7f983e 100644
3738 --- a/arch/mips/mm/fault.c
3739 +++ b/arch/mips/mm/fault.c
3740 @@ -27,6 +27,23 @@
3741 #include <asm/highmem.h> /* For VMALLOC_END */
3742 #include <linux/kdebug.h>
3743
3744 +#ifdef CONFIG_PAX_PAGEEXEC
3745 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3746 +{
3747 + unsigned long i;
3748 +
3749 + printk(KERN_ERR "PAX: bytes at PC: ");
3750 + for (i = 0; i < 5; i++) {
3751 + unsigned int c;
3752 + if (get_user(c, (unsigned int *)pc+i))
3753 + printk(KERN_CONT "???????? ");
3754 + else
3755 + printk(KERN_CONT "%08x ", c);
3756 + }
3757 + printk("\n");
3758 +}
3759 +#endif
3760 +
3761 /*
3762 * This routine handles page faults. It determines the address,
3763 * and the problem, and then passes it off to one of the appropriate
3764 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3765 index 302d779..7d35bf8 100644
3766 --- a/arch/mips/mm/mmap.c
3767 +++ b/arch/mips/mm/mmap.c
3768 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3769 do_color_align = 1;
3770
3771 /* requesting a specific address */
3772 +
3773 +#ifdef CONFIG_PAX_RANDMMAP
3774 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3775 +#endif
3776 +
3777 if (addr) {
3778 if (do_color_align)
3779 addr = COLOUR_ALIGN(addr, pgoff);
3780 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3781 addr = PAGE_ALIGN(addr);
3782
3783 vma = find_vma(mm, addr);
3784 - if (TASK_SIZE - len >= addr &&
3785 - (!vma || addr + len <= vma->vm_start))
3786 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3787 return addr;
3788 }
3789
3790 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3791 /* At this point: (!vma || addr < vma->vm_end). */
3792 if (TASK_SIZE - len < addr)
3793 return -ENOMEM;
3794 - if (!vma || addr + len <= vma->vm_start)
3795 + if (check_heap_stack_gap(vmm, addr, len))
3796 return addr;
3797 addr = vma->vm_end;
3798 if (do_color_align)
3799 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3800 /* make sure it can fit in the remaining address space */
3801 if (likely(addr > len)) {
3802 vma = find_vma(mm, addr - len);
3803 - if (!vma || addr <= vma->vm_start) {
3804 + if (check_heap_stack_gap(vmm, addr - len, len))
3805 /* cache the address as a hint for next time */
3806 return mm->free_area_cache = addr - len;
3807 }
3808 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3809 * return with success:
3810 */
3811 vma = find_vma(mm, addr);
3812 - if (likely(!vma || addr + len <= vma->vm_start)) {
3813 + if (check_heap_stack_gap(vmm, addr, len)) {
3814 /* cache the address as a hint for next time */
3815 return mm->free_area_cache = addr;
3816 }
3817 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3818 mm->unmap_area = arch_unmap_area_topdown;
3819 }
3820 }
3821 -
3822 -static inline unsigned long brk_rnd(void)
3823 -{
3824 - unsigned long rnd = get_random_int();
3825 -
3826 - rnd = rnd << PAGE_SHIFT;
3827 - /* 8MB for 32bit, 256MB for 64bit */
3828 - if (TASK_IS_32BIT_ADDR)
3829 - rnd = rnd & 0x7ffffful;
3830 - else
3831 - rnd = rnd & 0xffffffful;
3832 -
3833 - return rnd;
3834 -}
3835 -
3836 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3837 -{
3838 - unsigned long base = mm->brk;
3839 - unsigned long ret;
3840 -
3841 - ret = PAGE_ALIGN(base + brk_rnd());
3842 -
3843 - if (ret < mm->brk)
3844 - return mm->brk;
3845 -
3846 - return ret;
3847 -}
3848 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3849 index 967d144..db12197 100644
3850 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3851 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3852 @@ -11,12 +11,14 @@
3853 #ifndef _ASM_PROC_CACHE_H
3854 #define _ASM_PROC_CACHE_H
3855
3856 +#include <linux/const.h>
3857 +
3858 /* L1 cache */
3859
3860 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3861 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3862 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3863 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3864 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3865 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3866
3867 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3868 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3869 index bcb5df2..84fabd2 100644
3870 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3871 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3872 @@ -16,13 +16,15 @@
3873 #ifndef _ASM_PROC_CACHE_H
3874 #define _ASM_PROC_CACHE_H
3875
3876 +#include <linux/const.h>
3877 +
3878 /*
3879 * L1 cache
3880 */
3881 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3882 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3883 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3884 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3885 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3886 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3887
3888 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3889 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3890 index 4ce7a01..449202a 100644
3891 --- a/arch/openrisc/include/asm/cache.h
3892 +++ b/arch/openrisc/include/asm/cache.h
3893 @@ -19,11 +19,13 @@
3894 #ifndef __ASM_OPENRISC_CACHE_H
3895 #define __ASM_OPENRISC_CACHE_H
3896
3897 +#include <linux/const.h>
3898 +
3899 /* FIXME: How can we replace these with values from the CPU...
3900 * they shouldn't be hard-coded!
3901 */
3902
3903 -#define L1_CACHE_BYTES 16
3904 #define L1_CACHE_SHIFT 4
3905 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3906
3907 #endif /* __ASM_OPENRISC_CACHE_H */
3908 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3909 index af9cf30..2aae9b2 100644
3910 --- a/arch/parisc/include/asm/atomic.h
3911 +++ b/arch/parisc/include/asm/atomic.h
3912 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3913
3914 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3915
3916 +#define atomic64_read_unchecked(v) atomic64_read(v)
3917 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3918 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3919 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3920 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3921 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3922 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3923 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3924 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3925 +
3926 #endif /* !CONFIG_64BIT */
3927
3928
3929 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3930 index 47f11c7..3420df2 100644
3931 --- a/arch/parisc/include/asm/cache.h
3932 +++ b/arch/parisc/include/asm/cache.h
3933 @@ -5,6 +5,7 @@
3934 #ifndef __ARCH_PARISC_CACHE_H
3935 #define __ARCH_PARISC_CACHE_H
3936
3937 +#include <linux/const.h>
3938
3939 /*
3940 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3941 @@ -15,13 +16,13 @@
3942 * just ruin performance.
3943 */
3944 #ifdef CONFIG_PA20
3945 -#define L1_CACHE_BYTES 64
3946 #define L1_CACHE_SHIFT 6
3947 #else
3948 -#define L1_CACHE_BYTES 32
3949 #define L1_CACHE_SHIFT 5
3950 #endif
3951
3952 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3953 +
3954 #ifndef __ASSEMBLY__
3955
3956 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3957 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3958 index 19f6cb1..6c78cf2 100644
3959 --- a/arch/parisc/include/asm/elf.h
3960 +++ b/arch/parisc/include/asm/elf.h
3961 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3964
3965 +#ifdef CONFIG_PAX_ASLR
3966 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967 +
3968 +#define PAX_DELTA_MMAP_LEN 16
3969 +#define PAX_DELTA_STACK_LEN 16
3970 +#endif
3971 +
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this CPU supports. This could be done in user space,
3974 but it's not easy, and we've already done it here. */
3975 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3976 index fc987a1..6e068ef 100644
3977 --- a/arch/parisc/include/asm/pgalloc.h
3978 +++ b/arch/parisc/include/asm/pgalloc.h
3979 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3980 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3981 }
3982
3983 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3984 +{
3985 + pgd_populate(mm, pgd, pmd);
3986 +}
3987 +
3988 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3989 {
3990 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3991 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3992 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3993 #define pmd_free(mm, x) do { } while (0)
3994 #define pgd_populate(mm, pmd, pte) BUG()
3995 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3996
3997 #endif
3998
3999 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
4000 index ee99f23..802b0a1 100644
4001 --- a/arch/parisc/include/asm/pgtable.h
4002 +++ b/arch/parisc/include/asm/pgtable.h
4003 @@ -212,6 +212,17 @@ struct vm_area_struct;
4004 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
4005 #define PAGE_COPY PAGE_EXECREAD
4006 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
4007 +
4008 +#ifdef CONFIG_PAX_PAGEEXEC
4009 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
4010 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4011 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4012 +#else
4013 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4014 +# define PAGE_COPY_NOEXEC PAGE_COPY
4015 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4016 +#endif
4017 +
4018 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
4019 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
4020 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
4021 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
4022 index 4ba2c93..f5e3974 100644
4023 --- a/arch/parisc/include/asm/uaccess.h
4024 +++ b/arch/parisc/include/asm/uaccess.h
4025 @@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
4026 const void __user *from,
4027 unsigned long n)
4028 {
4029 - int sz = __compiletime_object_size(to);
4030 + size_t sz = __compiletime_object_size(to);
4031 int ret = -EFAULT;
4032
4033 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
4034 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
4035 ret = __copy_from_user(to, from, n);
4036 else
4037 copy_from_user_overflow();
4038 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
4039 index 5e34ccf..672bc9c 100644
4040 --- a/arch/parisc/kernel/module.c
4041 +++ b/arch/parisc/kernel/module.c
4042 @@ -98,16 +98,38 @@
4043
4044 /* three functions to determine where in the module core
4045 * or init pieces the location is */
4046 +static inline int in_init_rx(struct module *me, void *loc)
4047 +{
4048 + return (loc >= me->module_init_rx &&
4049 + loc < (me->module_init_rx + me->init_size_rx));
4050 +}
4051 +
4052 +static inline int in_init_rw(struct module *me, void *loc)
4053 +{
4054 + return (loc >= me->module_init_rw &&
4055 + loc < (me->module_init_rw + me->init_size_rw));
4056 +}
4057 +
4058 static inline int in_init(struct module *me, void *loc)
4059 {
4060 - return (loc >= me->module_init &&
4061 - loc <= (me->module_init + me->init_size));
4062 + return in_init_rx(me, loc) || in_init_rw(me, loc);
4063 +}
4064 +
4065 +static inline int in_core_rx(struct module *me, void *loc)
4066 +{
4067 + return (loc >= me->module_core_rx &&
4068 + loc < (me->module_core_rx + me->core_size_rx));
4069 +}
4070 +
4071 +static inline int in_core_rw(struct module *me, void *loc)
4072 +{
4073 + return (loc >= me->module_core_rw &&
4074 + loc < (me->module_core_rw + me->core_size_rw));
4075 }
4076
4077 static inline int in_core(struct module *me, void *loc)
4078 {
4079 - return (loc >= me->module_core &&
4080 - loc <= (me->module_core + me->core_size));
4081 + return in_core_rx(me, loc) || in_core_rw(me, loc);
4082 }
4083
4084 static inline int in_local(struct module *me, void *loc)
4085 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
4086 }
4087
4088 /* align things a bit */
4089 - me->core_size = ALIGN(me->core_size, 16);
4090 - me->arch.got_offset = me->core_size;
4091 - me->core_size += gots * sizeof(struct got_entry);
4092 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
4093 + me->arch.got_offset = me->core_size_rw;
4094 + me->core_size_rw += gots * sizeof(struct got_entry);
4095
4096 - me->core_size = ALIGN(me->core_size, 16);
4097 - me->arch.fdesc_offset = me->core_size;
4098 - me->core_size += fdescs * sizeof(Elf_Fdesc);
4099 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
4100 + me->arch.fdesc_offset = me->core_size_rw;
4101 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
4102
4103 me->arch.got_max = gots;
4104 me->arch.fdesc_max = fdescs;
4105 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4106
4107 BUG_ON(value == 0);
4108
4109 - got = me->module_core + me->arch.got_offset;
4110 + got = me->module_core_rw + me->arch.got_offset;
4111 for (i = 0; got[i].addr; i++)
4112 if (got[i].addr == value)
4113 goto out;
4114 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4115 #ifdef CONFIG_64BIT
4116 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4117 {
4118 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
4119 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
4120
4121 if (!value) {
4122 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
4123 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4124
4125 /* Create new one */
4126 fdesc->addr = value;
4127 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4128 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4129 return (Elf_Addr)fdesc;
4130 }
4131 #endif /* CONFIG_64BIT */
4132 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
4133
4134 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
4135 end = table + sechdrs[me->arch.unwind_section].sh_size;
4136 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4137 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4138
4139 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
4140 me->arch.unwind_section, table, end, gp);
4141 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
4142 index f76c108..8117482 100644
4143 --- a/arch/parisc/kernel/sys_parisc.c
4144 +++ b/arch/parisc/kernel/sys_parisc.c
4145 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4146 /* At this point: (!vma || addr < vma->vm_end). */
4147 if (TASK_SIZE - len < addr)
4148 return -ENOMEM;
4149 - if (!vma || addr + len <= vma->vm_start)
4150 + if (check_heap_stack_gap(vma, addr, len))
4151 return addr;
4152 addr = vma->vm_end;
4153 }
4154 @@ -81,7 +81,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
4155 /* At this point: (!vma || addr < vma->vm_end). */
4156 if (TASK_SIZE - len < addr)
4157 return -ENOMEM;
4158 - if (!vma || addr + len <= vma->vm_start)
4159 + if (check_heap_stack_gap(vma, addr, len))
4160 return addr;
4161 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
4162 if (addr < vma->vm_end) /* handle wraparound */
4163 @@ -100,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4164 if (flags & MAP_FIXED)
4165 return addr;
4166 if (!addr)
4167 - addr = TASK_UNMAPPED_BASE;
4168 + addr = current->mm->mmap_base;
4169
4170 if (filp) {
4171 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
4172 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
4173 index 45ba99f..8e22c33 100644
4174 --- a/arch/parisc/kernel/traps.c
4175 +++ b/arch/parisc/kernel/traps.c
4176 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
4177
4178 down_read(&current->mm->mmap_sem);
4179 vma = find_vma(current->mm,regs->iaoq[0]);
4180 - if (vma && (regs->iaoq[0] >= vma->vm_start)
4181 - && (vma->vm_flags & VM_EXEC)) {
4182 -
4183 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
4184 fault_address = regs->iaoq[0];
4185 fault_space = regs->iasq[0];
4186
4187 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
4188 index 18162ce..94de376 100644
4189 --- a/arch/parisc/mm/fault.c
4190 +++ b/arch/parisc/mm/fault.c
4191 @@ -15,6 +15,7 @@
4192 #include <linux/sched.h>
4193 #include <linux/interrupt.h>
4194 #include <linux/module.h>
4195 +#include <linux/unistd.h>
4196
4197 #include <asm/uaccess.h>
4198 #include <asm/traps.h>
4199 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
4200 static unsigned long
4201 parisc_acctyp(unsigned long code, unsigned int inst)
4202 {
4203 - if (code == 6 || code == 16)
4204 + if (code == 6 || code == 7 || code == 16)
4205 return VM_EXEC;
4206
4207 switch (inst & 0xf0000000) {
4208 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
4209 }
4210 #endif
4211
4212 +#ifdef CONFIG_PAX_PAGEEXEC
4213 +/*
4214 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
4215 + *
4216 + * returns 1 when task should be killed
4217 + * 2 when rt_sigreturn trampoline was detected
4218 + * 3 when unpatched PLT trampoline was detected
4219 + */
4220 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4221 +{
4222 +
4223 +#ifdef CONFIG_PAX_EMUPLT
4224 + int err;
4225 +
4226 + do { /* PaX: unpatched PLT emulation */
4227 + unsigned int bl, depwi;
4228 +
4229 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
4230 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
4231 +
4232 + if (err)
4233 + break;
4234 +
4235 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
4236 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
4237 +
4238 + err = get_user(ldw, (unsigned int *)addr);
4239 + err |= get_user(bv, (unsigned int *)(addr+4));
4240 + err |= get_user(ldw2, (unsigned int *)(addr+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if (ldw == 0x0E801096U &&
4246 + bv == 0xEAC0C000U &&
4247 + ldw2 == 0x0E881095U)
4248 + {
4249 + unsigned int resolver, map;
4250 +
4251 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
4252 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
4253 + if (err)
4254 + break;
4255 +
4256 + regs->gr[20] = instruction_pointer(regs)+8;
4257 + regs->gr[21] = map;
4258 + regs->gr[22] = resolver;
4259 + regs->iaoq[0] = resolver | 3UL;
4260 + regs->iaoq[1] = regs->iaoq[0] + 4;
4261 + return 3;
4262 + }
4263 + }
4264 + } while (0);
4265 +#endif
4266 +
4267 +#ifdef CONFIG_PAX_EMUTRAMP
4268 +
4269 +#ifndef CONFIG_PAX_EMUSIGRT
4270 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
4271 + return 1;
4272 +#endif
4273 +
4274 + do { /* PaX: rt_sigreturn emulation */
4275 + unsigned int ldi1, ldi2, bel, nop;
4276 +
4277 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
4278 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
4279 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
4280 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
4281 +
4282 + if (err)
4283 + break;
4284 +
4285 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
4286 + ldi2 == 0x3414015AU &&
4287 + bel == 0xE4008200U &&
4288 + nop == 0x08000240U)
4289 + {
4290 + regs->gr[25] = (ldi1 & 2) >> 1;
4291 + regs->gr[20] = __NR_rt_sigreturn;
4292 + regs->gr[31] = regs->iaoq[1] + 16;
4293 + regs->sr[0] = regs->iasq[1];
4294 + regs->iaoq[0] = 0x100UL;
4295 + regs->iaoq[1] = regs->iaoq[0] + 4;
4296 + regs->iasq[0] = regs->sr[2];
4297 + regs->iasq[1] = regs->sr[2];
4298 + return 2;
4299 + }
4300 + } while (0);
4301 +#endif
4302 +
4303 + return 1;
4304 +}
4305 +
4306 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4307 +{
4308 + unsigned long i;
4309 +
4310 + printk(KERN_ERR "PAX: bytes at PC: ");
4311 + for (i = 0; i < 5; i++) {
4312 + unsigned int c;
4313 + if (get_user(c, (unsigned int *)pc+i))
4314 + printk(KERN_CONT "???????? ");
4315 + else
4316 + printk(KERN_CONT "%08x ", c);
4317 + }
4318 + printk("\n");
4319 +}
4320 +#endif
4321 +
4322 int fixup_exception(struct pt_regs *regs)
4323 {
4324 const struct exception_table_entry *fix;
4325 @@ -192,8 +303,33 @@ good_area:
4326
4327 acc_type = parisc_acctyp(code,regs->iir);
4328
4329 - if ((vma->vm_flags & acc_type) != acc_type)
4330 + if ((vma->vm_flags & acc_type) != acc_type) {
4331 +
4332 +#ifdef CONFIG_PAX_PAGEEXEC
4333 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
4334 + (address & ~3UL) == instruction_pointer(regs))
4335 + {
4336 + up_read(&mm->mmap_sem);
4337 + switch (pax_handle_fetch_fault(regs)) {
4338 +
4339 +#ifdef CONFIG_PAX_EMUPLT
4340 + case 3:
4341 + return;
4342 +#endif
4343 +
4344 +#ifdef CONFIG_PAX_EMUTRAMP
4345 + case 2:
4346 + return;
4347 +#endif
4348 +
4349 + }
4350 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
4351 + do_group_exit(SIGKILL);
4352 + }
4353 +#endif
4354 +
4355 goto bad_area;
4356 + }
4357
4358 /*
4359 * If for any reason at all we couldn't handle the fault, make
4360 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
4361 index e3b1d41..8e81edf 100644
4362 --- a/arch/powerpc/include/asm/atomic.h
4363 +++ b/arch/powerpc/include/asm/atomic.h
4364 @@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
4365 return t1;
4366 }
4367
4368 +#define atomic64_read_unchecked(v) atomic64_read(v)
4369 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4370 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4371 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4372 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4373 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4374 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4375 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4376 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4377 +
4378 #endif /* __powerpc64__ */
4379
4380 #endif /* __KERNEL__ */
4381 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
4382 index 9e495c9..b6878e5 100644
4383 --- a/arch/powerpc/include/asm/cache.h
4384 +++ b/arch/powerpc/include/asm/cache.h
4385 @@ -3,6 +3,7 @@
4386
4387 #ifdef __KERNEL__
4388
4389 +#include <linux/const.h>
4390
4391 /* bytes per L1 cache line */
4392 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
4393 @@ -22,7 +23,7 @@
4394 #define L1_CACHE_SHIFT 7
4395 #endif
4396
4397 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4398 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4399
4400 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4401
4402 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
4403 index 6abf0a1..459d0f1 100644
4404 --- a/arch/powerpc/include/asm/elf.h
4405 +++ b/arch/powerpc/include/asm/elf.h
4406 @@ -28,8 +28,19 @@
4407 the loader. We need to make sure that it is out of the way of the program
4408 that it will "exec", and that there is sufficient room for the brk. */
4409
4410 -extern unsigned long randomize_et_dyn(unsigned long base);
4411 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
4412 +#define ELF_ET_DYN_BASE (0x20000000)
4413 +
4414 +#ifdef CONFIG_PAX_ASLR
4415 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
4416 +
4417 +#ifdef __powerpc64__
4418 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
4419 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
4420 +#else
4421 +#define PAX_DELTA_MMAP_LEN 15
4422 +#define PAX_DELTA_STACK_LEN 15
4423 +#endif
4424 +#endif
4425
4426 /*
4427 * Our registers are always unsigned longs, whether we're a 32 bit
4428 @@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
4429 (0x7ff >> (PAGE_SHIFT - 12)) : \
4430 (0x3ffff >> (PAGE_SHIFT - 12)))
4431
4432 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4433 -#define arch_randomize_brk arch_randomize_brk
4434 -
4435 -
4436 #ifdef CONFIG_SPU_BASE
4437 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
4438 #define NT_SPU 1
4439 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
4440 index 8196e9c..d83a9f3 100644
4441 --- a/arch/powerpc/include/asm/exec.h
4442 +++ b/arch/powerpc/include/asm/exec.h
4443 @@ -4,6 +4,6 @@
4444 #ifndef _ASM_POWERPC_EXEC_H
4445 #define _ASM_POWERPC_EXEC_H
4446
4447 -extern unsigned long arch_align_stack(unsigned long sp);
4448 +#define arch_align_stack(x) ((x) & ~0xfUL)
4449
4450 #endif /* _ASM_POWERPC_EXEC_H */
4451 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
4452 index 5acabbd..7ea14fa 100644
4453 --- a/arch/powerpc/include/asm/kmap_types.h
4454 +++ b/arch/powerpc/include/asm/kmap_types.h
4455 @@ -10,7 +10,7 @@
4456 * 2 of the License, or (at your option) any later version.
4457 */
4458
4459 -#define KM_TYPE_NR 16
4460 +#define KM_TYPE_NR 17
4461
4462 #endif /* __KERNEL__ */
4463 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
4464 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
4465 index 8565c25..2865190 100644
4466 --- a/arch/powerpc/include/asm/mman.h
4467 +++ b/arch/powerpc/include/asm/mman.h
4468 @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
4469 }
4470 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
4471
4472 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
4473 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
4474 {
4475 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
4476 }
4477 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
4478 index f072e97..b436dee 100644
4479 --- a/arch/powerpc/include/asm/page.h
4480 +++ b/arch/powerpc/include/asm/page.h
4481 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
4482 * and needs to be executable. This means the whole heap ends
4483 * up being executable.
4484 */
4485 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4486 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4487 +#define VM_DATA_DEFAULT_FLAGS32 \
4488 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4489 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4490
4491 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4492 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4493 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
4494 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
4495 #endif
4496
4497 +#define ktla_ktva(addr) (addr)
4498 +#define ktva_ktla(addr) (addr)
4499 +
4500 /*
4501 * Use the top bit of the higher-level page table entries to indicate whether
4502 * the entries we point to contain hugepages. This works because we know that
4503 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
4504 index cd915d6..c10cee8 100644
4505 --- a/arch/powerpc/include/asm/page_64.h
4506 +++ b/arch/powerpc/include/asm/page_64.h
4507 @@ -154,15 +154,18 @@ do { \
4508 * stack by default, so in the absence of a PT_GNU_STACK program header
4509 * we turn execute permission off.
4510 */
4511 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4512 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4513 +#define VM_STACK_DEFAULT_FLAGS32 \
4514 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4515 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4516
4517 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4518 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4519
4520 +#ifndef CONFIG_PAX_PAGEEXEC
4521 #define VM_STACK_DEFAULT_FLAGS \
4522 (is_32bit_task() ? \
4523 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
4524 +#endif
4525
4526 #include <asm-generic/getorder.h>
4527
4528 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
4529 index 292725c..f87ae14 100644
4530 --- a/arch/powerpc/include/asm/pgalloc-64.h
4531 +++ b/arch/powerpc/include/asm/pgalloc-64.h
4532 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
4533 #ifndef CONFIG_PPC_64K_PAGES
4534
4535 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
4536 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
4537
4538 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4539 {
4540 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4541 pud_set(pud, (unsigned long)pmd);
4542 }
4543
4544 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4545 +{
4546 + pud_populate(mm, pud, pmd);
4547 +}
4548 +
4549 #define pmd_populate(mm, pmd, pte_page) \
4550 pmd_populate_kernel(mm, pmd, page_address(pte_page))
4551 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
4552 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4553 #else /* CONFIG_PPC_64K_PAGES */
4554
4555 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
4556 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
4557
4558 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
4559 pte_t *pte)
4560 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
4561 index a9cbd3b..3b67efa 100644
4562 --- a/arch/powerpc/include/asm/pgtable.h
4563 +++ b/arch/powerpc/include/asm/pgtable.h
4564 @@ -2,6 +2,7 @@
4565 #define _ASM_POWERPC_PGTABLE_H
4566 #ifdef __KERNEL__
4567
4568 +#include <linux/const.h>
4569 #ifndef __ASSEMBLY__
4570 #include <asm/processor.h> /* For TASK_SIZE */
4571 #include <asm/mmu.h>
4572 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
4573 index 4aad413..85d86bf 100644
4574 --- a/arch/powerpc/include/asm/pte-hash32.h
4575 +++ b/arch/powerpc/include/asm/pte-hash32.h
4576 @@ -21,6 +21,7 @@
4577 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
4578 #define _PAGE_USER 0x004 /* usermode access allowed */
4579 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
4580 +#define _PAGE_EXEC _PAGE_GUARDED
4581 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
4582 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
4583 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
4584 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
4585 index d24c141..b60696e 100644
4586 --- a/arch/powerpc/include/asm/reg.h
4587 +++ b/arch/powerpc/include/asm/reg.h
4588 @@ -215,6 +215,7 @@
4589 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
4590 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
4591 #define DSISR_NOHPTE 0x40000000 /* no translation found */
4592 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
4593 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
4594 #define DSISR_ISSTORE 0x02000000 /* access was a store */
4595 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
4596 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
4597 index 406b7b9..af63426 100644
4598 --- a/arch/powerpc/include/asm/thread_info.h
4599 +++ b/arch/powerpc/include/asm/thread_info.h
4600 @@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
4601 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
4602 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
4603 #define TIF_SINGLESTEP 8 /* singlestepping active */
4604 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
4605 #define TIF_SECCOMP 10 /* secure computing */
4606 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
4607 #define TIF_NOERROR 12 /* Force successful syscall return */
4608 @@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
4609 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
4610 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
4611 for stack store? */
4612 +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
4613 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
4614 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
4615
4616 /* as above, but as bit values */
4617 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
4618 @@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
4619 #define _TIF_UPROBE (1<<TIF_UPROBE)
4620 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
4621 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
4622 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
4623 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
4624 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
4625 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
4626 + _TIF_GRSEC_SETXID)
4627
4628 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
4629 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
4630 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
4631 index 4db4959..335e00c 100644
4632 --- a/arch/powerpc/include/asm/uaccess.h
4633 +++ b/arch/powerpc/include/asm/uaccess.h
4634 @@ -13,6 +13,8 @@
4635 #define VERIFY_READ 0
4636 #define VERIFY_WRITE 1
4637
4638 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4639 +
4640 /*
4641 * The fs value determines whether argument validity checking should be
4642 * performed or not. If get_fs() == USER_DS, checking is performed, with
4643 @@ -318,52 +320,6 @@ do { \
4644 extern unsigned long __copy_tofrom_user(void __user *to,
4645 const void __user *from, unsigned long size);
4646
4647 -#ifndef __powerpc64__
4648 -
4649 -static inline unsigned long copy_from_user(void *to,
4650 - const void __user *from, unsigned long n)
4651 -{
4652 - unsigned long over;
4653 -
4654 - if (access_ok(VERIFY_READ, from, n))
4655 - return __copy_tofrom_user((__force void __user *)to, from, n);
4656 - if ((unsigned long)from < TASK_SIZE) {
4657 - over = (unsigned long)from + n - TASK_SIZE;
4658 - return __copy_tofrom_user((__force void __user *)to, from,
4659 - n - over) + over;
4660 - }
4661 - return n;
4662 -}
4663 -
4664 -static inline unsigned long copy_to_user(void __user *to,
4665 - const void *from, unsigned long n)
4666 -{
4667 - unsigned long over;
4668 -
4669 - if (access_ok(VERIFY_WRITE, to, n))
4670 - return __copy_tofrom_user(to, (__force void __user *)from, n);
4671 - if ((unsigned long)to < TASK_SIZE) {
4672 - over = (unsigned long)to + n - TASK_SIZE;
4673 - return __copy_tofrom_user(to, (__force void __user *)from,
4674 - n - over) + over;
4675 - }
4676 - return n;
4677 -}
4678 -
4679 -#else /* __powerpc64__ */
4680 -
4681 -#define __copy_in_user(to, from, size) \
4682 - __copy_tofrom_user((to), (from), (size))
4683 -
4684 -extern unsigned long copy_from_user(void *to, const void __user *from,
4685 - unsigned long n);
4686 -extern unsigned long copy_to_user(void __user *to, const void *from,
4687 - unsigned long n);
4688 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
4689 - unsigned long n);
4690 -
4691 -#endif /* __powerpc64__ */
4692 -
4693 static inline unsigned long __copy_from_user_inatomic(void *to,
4694 const void __user *from, unsigned long n)
4695 {
4696 @@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4697 if (ret == 0)
4698 return 0;
4699 }
4700 +
4701 + if (!__builtin_constant_p(n))
4702 + check_object_size(to, n, false);
4703 +
4704 return __copy_tofrom_user((__force void __user *)to, from, n);
4705 }
4706
4707 @@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4708 if (ret == 0)
4709 return 0;
4710 }
4711 +
4712 + if (!__builtin_constant_p(n))
4713 + check_object_size(from, n, true);
4714 +
4715 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4716 }
4717
4718 @@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4719 return __copy_to_user_inatomic(to, from, size);
4720 }
4721
4722 +#ifndef __powerpc64__
4723 +
4724 +static inline unsigned long __must_check copy_from_user(void *to,
4725 + const void __user *from, unsigned long n)
4726 +{
4727 + unsigned long over;
4728 +
4729 + if ((long)n < 0)
4730 + return n;
4731 +
4732 + if (access_ok(VERIFY_READ, from, n)) {
4733 + if (!__builtin_constant_p(n))
4734 + check_object_size(to, n, false);
4735 + return __copy_tofrom_user((__force void __user *)to, from, n);
4736 + }
4737 + if ((unsigned long)from < TASK_SIZE) {
4738 + over = (unsigned long)from + n - TASK_SIZE;
4739 + if (!__builtin_constant_p(n - over))
4740 + check_object_size(to, n - over, false);
4741 + return __copy_tofrom_user((__force void __user *)to, from,
4742 + n - over) + over;
4743 + }
4744 + return n;
4745 +}
4746 +
4747 +static inline unsigned long __must_check copy_to_user(void __user *to,
4748 + const void *from, unsigned long n)
4749 +{
4750 + unsigned long over;
4751 +
4752 + if ((long)n < 0)
4753 + return n;
4754 +
4755 + if (access_ok(VERIFY_WRITE, to, n)) {
4756 + if (!__builtin_constant_p(n))
4757 + check_object_size(from, n, true);
4758 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4759 + }
4760 + if ((unsigned long)to < TASK_SIZE) {
4761 + over = (unsigned long)to + n - TASK_SIZE;
4762 + if (!__builtin_constant_p(n))
4763 + check_object_size(from, n - over, true);
4764 + return __copy_tofrom_user(to, (__force void __user *)from,
4765 + n - over) + over;
4766 + }
4767 + return n;
4768 +}
4769 +
4770 +#else /* __powerpc64__ */
4771 +
4772 +#define __copy_in_user(to, from, size) \
4773 + __copy_tofrom_user((to), (from), (size))
4774 +
4775 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4776 +{
4777 + if ((long)n < 0 || n > INT_MAX)
4778 + return n;
4779 +
4780 + if (!__builtin_constant_p(n))
4781 + check_object_size(to, n, false);
4782 +
4783 + if (likely(access_ok(VERIFY_READ, from, n)))
4784 + n = __copy_from_user(to, from, n);
4785 + else
4786 + memset(to, 0, n);
4787 + return n;
4788 +}
4789 +
4790 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4791 +{
4792 + if ((long)n < 0 || n > INT_MAX)
4793 + return n;
4794 +
4795 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4796 + if (!__builtin_constant_p(n))
4797 + check_object_size(from, n, true);
4798 + n = __copy_to_user(to, from, n);
4799 + }
4800 + return n;
4801 +}
4802 +
4803 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4804 + unsigned long n);
4805 +
4806 +#endif /* __powerpc64__ */
4807 +
4808 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4809
4810 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4811 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4812 index 4684e33..acc4d19e 100644
4813 --- a/arch/powerpc/kernel/exceptions-64e.S
4814 +++ b/arch/powerpc/kernel/exceptions-64e.S
4815 @@ -715,6 +715,7 @@ storage_fault_common:
4816 std r14,_DAR(r1)
4817 std r15,_DSISR(r1)
4818 addi r3,r1,STACK_FRAME_OVERHEAD
4819 + bl .save_nvgprs
4820 mr r4,r14
4821 mr r5,r15
4822 ld r14,PACA_EXGEN+EX_R14(r13)
4823 @@ -723,8 +724,7 @@ storage_fault_common:
4824 cmpdi r3,0
4825 bne- 1f
4826 b .ret_from_except_lite
4827 -1: bl .save_nvgprs
4828 - mr r5,r3
4829 +1: mr r5,r3
4830 addi r3,r1,STACK_FRAME_OVERHEAD
4831 ld r4,_DAR(r1)
4832 bl .bad_page_fault
4833 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4834 index 10b658a..e542888 100644
4835 --- a/arch/powerpc/kernel/exceptions-64s.S
4836 +++ b/arch/powerpc/kernel/exceptions-64s.S
4837 @@ -1013,10 +1013,10 @@ handle_page_fault:
4838 11: ld r4,_DAR(r1)
4839 ld r5,_DSISR(r1)
4840 addi r3,r1,STACK_FRAME_OVERHEAD
4841 + bl .save_nvgprs
4842 bl .do_page_fault
4843 cmpdi r3,0
4844 beq+ 12f
4845 - bl .save_nvgprs
4846 mr r5,r3
4847 addi r3,r1,STACK_FRAME_OVERHEAD
4848 lwz r4,_DAR(r1)
4849 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4850 index 2e3200c..72095ce 100644
4851 --- a/arch/powerpc/kernel/module_32.c
4852 +++ b/arch/powerpc/kernel/module_32.c
4853 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4854 me->arch.core_plt_section = i;
4855 }
4856 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4857 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4858 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4859 return -ENOEXEC;
4860 }
4861
4862 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4863
4864 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4865 /* Init, or core PLT? */
4866 - if (location >= mod->module_core
4867 - && location < mod->module_core + mod->core_size)
4868 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4869 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4870 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4871 - else
4872 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4873 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4874 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4875 + else {
4876 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4877 + return ~0UL;
4878 + }
4879
4880 /* Find this entry, or if that fails, the next avail. entry */
4881 while (entry->jump[0]) {
4882 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4883 index ba48233..16ac31d 100644
4884 --- a/arch/powerpc/kernel/process.c
4885 +++ b/arch/powerpc/kernel/process.c
4886 @@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
4887 * Lookup NIP late so we have the best change of getting the
4888 * above info out without failing
4889 */
4890 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4891 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4892 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4893 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4894 #endif
4895 show_stack(current, (unsigned long *) regs->gpr[1]);
4896 if (!user_mode(regs))
4897 @@ -1175,10 +1175,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4898 newsp = stack[0];
4899 ip = stack[STACK_FRAME_LR_SAVE];
4900 if (!firstframe || ip != lr) {
4901 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4902 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4903 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4904 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4905 - printk(" (%pS)",
4906 + printk(" (%pA)",
4907 (void *)current->ret_stack[curr_frame].ret);
4908 curr_frame--;
4909 }
4910 @@ -1198,7 +1198,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4911 struct pt_regs *regs = (struct pt_regs *)
4912 (sp + STACK_FRAME_OVERHEAD);
4913 lr = regs->link;
4914 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4915 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4916 regs->trap, (void *)regs->nip, (void *)lr);
4917 firstframe = 1;
4918 }
4919 @@ -1240,58 +1240,3 @@ void __ppc64_runlatch_off(void)
4920 mtspr(SPRN_CTRLT, ctrl);
4921 }
4922 #endif /* CONFIG_PPC64 */
4923 -
4924 -unsigned long arch_align_stack(unsigned long sp)
4925 -{
4926 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4927 - sp -= get_random_int() & ~PAGE_MASK;
4928 - return sp & ~0xf;
4929 -}
4930 -
4931 -static inline unsigned long brk_rnd(void)
4932 -{
4933 - unsigned long rnd = 0;
4934 -
4935 - /* 8MB for 32bit, 1GB for 64bit */
4936 - if (is_32bit_task())
4937 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4938 - else
4939 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4940 -
4941 - return rnd << PAGE_SHIFT;
4942 -}
4943 -
4944 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4945 -{
4946 - unsigned long base = mm->brk;
4947 - unsigned long ret;
4948 -
4949 -#ifdef CONFIG_PPC_STD_MMU_64
4950 - /*
4951 - * If we are using 1TB segments and we are allowed to randomise
4952 - * the heap, we can put it above 1TB so it is backed by a 1TB
4953 - * segment. Otherwise the heap will be in the bottom 1TB
4954 - * which always uses 256MB segments and this may result in a
4955 - * performance penalty.
4956 - */
4957 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4958 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4959 -#endif
4960 -
4961 - ret = PAGE_ALIGN(base + brk_rnd());
4962 -
4963 - if (ret < mm->brk)
4964 - return mm->brk;
4965 -
4966 - return ret;
4967 -}
4968 -
4969 -unsigned long randomize_et_dyn(unsigned long base)
4970 -{
4971 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4972 -
4973 - if (ret < base)
4974 - return base;
4975 -
4976 - return ret;
4977 -}
4978 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4979 index 79d8e56..38ffcbb 100644
4980 --- a/arch/powerpc/kernel/ptrace.c
4981 +++ b/arch/powerpc/kernel/ptrace.c
4982 @@ -1663,6 +1663,10 @@ long arch_ptrace(struct task_struct *child, long request,
4983 return ret;
4984 }
4985
4986 +#ifdef CONFIG_GRKERNSEC_SETXID
4987 +extern void gr_delayed_cred_worker(void);
4988 +#endif
4989 +
4990 /*
4991 * We must return the syscall number to actually look up in the table.
4992 * This can be -1L to skip running any syscall at all.
4993 @@ -1673,6 +1677,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4994
4995 secure_computing_strict(regs->gpr[0]);
4996
4997 +#ifdef CONFIG_GRKERNSEC_SETXID
4998 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4999 + gr_delayed_cred_worker();
5000 +#endif
5001 +
5002 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
5003 tracehook_report_syscall_entry(regs))
5004 /*
5005 @@ -1707,6 +1716,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
5006 {
5007 int step;
5008
5009 +#ifdef CONFIG_GRKERNSEC_SETXID
5010 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5011 + gr_delayed_cred_worker();
5012 +#endif
5013 +
5014 audit_syscall_exit(regs);
5015
5016 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5017 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
5018 index 804e323..79181c1 100644
5019 --- a/arch/powerpc/kernel/signal_32.c
5020 +++ b/arch/powerpc/kernel/signal_32.c
5021 @@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
5022 /* Save user registers on the stack */
5023 frame = &rt_sf->uc.uc_mcontext;
5024 addr = frame;
5025 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
5026 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5027 if (save_user_regs(regs, frame, 0, 1))
5028 goto badframe;
5029 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
5030 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
5031 index d183f87..1867f1a 100644
5032 --- a/arch/powerpc/kernel/signal_64.c
5033 +++ b/arch/powerpc/kernel/signal_64.c
5034 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
5035 current->thread.fpscr.val = 0;
5036
5037 /* Set up to return from userspace. */
5038 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
5039 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5040 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
5041 } else {
5042 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
5043 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
5044 index 3251840..3f7c77a 100644
5045 --- a/arch/powerpc/kernel/traps.c
5046 +++ b/arch/powerpc/kernel/traps.c
5047 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
5048 return flags;
5049 }
5050
5051 +extern void gr_handle_kernel_exploit(void);
5052 +
5053 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5054 int signr)
5055 {
5056 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5057 panic("Fatal exception in interrupt");
5058 if (panic_on_oops)
5059 panic("Fatal exception");
5060 +
5061 + gr_handle_kernel_exploit();
5062 +
5063 do_exit(signr);
5064 }
5065
5066 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
5067 index 1b2076f..835e4be 100644
5068 --- a/arch/powerpc/kernel/vdso.c
5069 +++ b/arch/powerpc/kernel/vdso.c
5070 @@ -34,6 +34,7 @@
5071 #include <asm/firmware.h>
5072 #include <asm/vdso.h>
5073 #include <asm/vdso_datapage.h>
5074 +#include <asm/mman.h>
5075
5076 #include "setup.h"
5077
5078 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5079 vdso_base = VDSO32_MBASE;
5080 #endif
5081
5082 - current->mm->context.vdso_base = 0;
5083 + current->mm->context.vdso_base = ~0UL;
5084
5085 /* vDSO has a problem and was disabled, just don't "enable" it for the
5086 * process
5087 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5088 vdso_base = get_unmapped_area(NULL, vdso_base,
5089 (vdso_pages << PAGE_SHIFT) +
5090 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
5091 - 0, 0);
5092 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
5093 if (IS_ERR_VALUE(vdso_base)) {
5094 rc = vdso_base;
5095 goto fail_mmapsem;
5096 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
5097 index 5eea6f3..5d10396 100644
5098 --- a/arch/powerpc/lib/usercopy_64.c
5099 +++ b/arch/powerpc/lib/usercopy_64.c
5100 @@ -9,22 +9,6 @@
5101 #include <linux/module.h>
5102 #include <asm/uaccess.h>
5103
5104 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5105 -{
5106 - if (likely(access_ok(VERIFY_READ, from, n)))
5107 - n = __copy_from_user(to, from, n);
5108 - else
5109 - memset(to, 0, n);
5110 - return n;
5111 -}
5112 -
5113 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5114 -{
5115 - if (likely(access_ok(VERIFY_WRITE, to, n)))
5116 - n = __copy_to_user(to, from, n);
5117 - return n;
5118 -}
5119 -
5120 unsigned long copy_in_user(void __user *to, const void __user *from,
5121 unsigned long n)
5122 {
5123 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
5124 return n;
5125 }
5126
5127 -EXPORT_SYMBOL(copy_from_user);
5128 -EXPORT_SYMBOL(copy_to_user);
5129 EXPORT_SYMBOL(copy_in_user);
5130
5131 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
5132 index 0a6b283..7674925 100644
5133 --- a/arch/powerpc/mm/fault.c
5134 +++ b/arch/powerpc/mm/fault.c
5135 @@ -32,6 +32,10 @@
5136 #include <linux/perf_event.h>
5137 #include <linux/magic.h>
5138 #include <linux/ratelimit.h>
5139 +#include <linux/slab.h>
5140 +#include <linux/pagemap.h>
5141 +#include <linux/compiler.h>
5142 +#include <linux/unistd.h>
5143
5144 #include <asm/firmware.h>
5145 #include <asm/page.h>
5146 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
5147 }
5148 #endif
5149
5150 +#ifdef CONFIG_PAX_PAGEEXEC
5151 +/*
5152 + * PaX: decide what to do with offenders (regs->nip = fault address)
5153 + *
5154 + * returns 1 when task should be killed
5155 + */
5156 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5157 +{
5158 + return 1;
5159 +}
5160 +
5161 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5162 +{
5163 + unsigned long i;
5164 +
5165 + printk(KERN_ERR "PAX: bytes at PC: ");
5166 + for (i = 0; i < 5; i++) {
5167 + unsigned int c;
5168 + if (get_user(c, (unsigned int __user *)pc+i))
5169 + printk(KERN_CONT "???????? ");
5170 + else
5171 + printk(KERN_CONT "%08x ", c);
5172 + }
5173 + printk("\n");
5174 +}
5175 +#endif
5176 +
5177 /*
5178 * Check whether the instruction at regs->nip is a store using
5179 * an update addressing form which will update r1.
5180 @@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
5181 * indicate errors in DSISR but can validly be set in SRR1.
5182 */
5183 if (trap == 0x400)
5184 - error_code &= 0x48200000;
5185 + error_code &= 0x58200000;
5186 else
5187 is_write = error_code & DSISR_ISSTORE;
5188 #else
5189 @@ -367,7 +398,7 @@ good_area:
5190 * "undefined". Of those that can be set, this is the only
5191 * one which seems bad.
5192 */
5193 - if (error_code & 0x10000000)
5194 + if (error_code & DSISR_GUARDED)
5195 /* Guarded storage error. */
5196 goto bad_area;
5197 #endif /* CONFIG_8xx */
5198 @@ -382,7 +413,7 @@ good_area:
5199 * processors use the same I/D cache coherency mechanism
5200 * as embedded.
5201 */
5202 - if (error_code & DSISR_PROTFAULT)
5203 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
5204 goto bad_area;
5205 #endif /* CONFIG_PPC_STD_MMU */
5206
5207 @@ -465,6 +496,23 @@ bad_area:
5208 bad_area_nosemaphore:
5209 /* User mode accesses cause a SIGSEGV */
5210 if (user_mode(regs)) {
5211 +
5212 +#ifdef CONFIG_PAX_PAGEEXEC
5213 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5214 +#ifdef CONFIG_PPC_STD_MMU
5215 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
5216 +#else
5217 + if (is_exec && regs->nip == address) {
5218 +#endif
5219 + switch (pax_handle_fetch_fault(regs)) {
5220 + }
5221 +
5222 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
5223 + do_group_exit(SIGKILL);
5224 + }
5225 + }
5226 +#endif
5227 +
5228 _exception(SIGSEGV, regs, code, address);
5229 return 0;
5230 }
5231 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
5232 index 67a42ed..1c7210c 100644
5233 --- a/arch/powerpc/mm/mmap_64.c
5234 +++ b/arch/powerpc/mm/mmap_64.c
5235 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5236 */
5237 if (mmap_is_legacy()) {
5238 mm->mmap_base = TASK_UNMAPPED_BASE;
5239 +
5240 +#ifdef CONFIG_PAX_RANDMMAP
5241 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5242 + mm->mmap_base += mm->delta_mmap;
5243 +#endif
5244 +
5245 mm->get_unmapped_area = arch_get_unmapped_area;
5246 mm->unmap_area = arch_unmap_area;
5247 } else {
5248 mm->mmap_base = mmap_base();
5249 +
5250 +#ifdef CONFIG_PAX_RANDMMAP
5251 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5252 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5253 +#endif
5254 +
5255 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5256 mm->unmap_area = arch_unmap_area_topdown;
5257 }
5258 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
5259 index 5829d2a..b64ed2e 100644
5260 --- a/arch/powerpc/mm/slice.c
5261 +++ b/arch/powerpc/mm/slice.c
5262 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
5263 if ((mm->task_size - len) < addr)
5264 return 0;
5265 vma = find_vma(mm, addr);
5266 - return (!vma || (addr + len) <= vma->vm_start);
5267 + return check_heap_stack_gap(vma, addr, len);
5268 }
5269
5270 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
5271 @@ -272,7 +272,7 @@ full_search:
5272 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
5273 continue;
5274 }
5275 - if (!vma || addr + len <= vma->vm_start) {
5276 + if (check_heap_stack_gap(vma, addr, len)) {
5277 /*
5278 * Remember the place where we stopped the search:
5279 */
5280 @@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5281 }
5282 }
5283
5284 - addr = mm->mmap_base;
5285 - while (addr > len) {
5286 + if (mm->mmap_base < len)
5287 + addr = -ENOMEM;
5288 + else
5289 + addr = mm->mmap_base - len;
5290 +
5291 + while (!IS_ERR_VALUE(addr)) {
5292 /* Go down by chunk size */
5293 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
5294 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
5295
5296 /* Check for hit with different page size */
5297 mask = slice_range_to_mask(addr, len);
5298 @@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5299 * return with success:
5300 */
5301 vma = find_vma(mm, addr);
5302 - if (!vma || (addr + len) <= vma->vm_start) {
5303 + if (check_heap_stack_gap(vma, addr, len)) {
5304 /* remember the address as a hint for next time */
5305 if (use_cache)
5306 mm->free_area_cache = addr;
5307 @@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5308 mm->cached_hole_size = vma->vm_start - addr;
5309
5310 /* try just below the current vma->vm_start */
5311 - addr = vma->vm_start;
5312 + addr = skip_heap_stack_gap(vma, len);
5313 }
5314
5315 /*
5316 @@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
5317 if (fixed && addr > (mm->task_size - len))
5318 return -EINVAL;
5319
5320 +#ifdef CONFIG_PAX_RANDMMAP
5321 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
5322 + addr = 0;
5323 +#endif
5324 +
5325 /* If hint, make sure it matches our alignment restrictions */
5326 if (!fixed && addr) {
5327 addr = _ALIGN_UP(addr, 1ul << pshift);
5328 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
5329 index c797832..ce575c8 100644
5330 --- a/arch/s390/include/asm/atomic.h
5331 +++ b/arch/s390/include/asm/atomic.h
5332 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
5333 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
5334 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5335
5336 +#define atomic64_read_unchecked(v) atomic64_read(v)
5337 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5338 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5339 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5340 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5341 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5342 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5343 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5344 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5345 +
5346 #define smp_mb__before_atomic_dec() smp_mb()
5347 #define smp_mb__after_atomic_dec() smp_mb()
5348 #define smp_mb__before_atomic_inc() smp_mb()
5349 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
5350 index 4d7ccac..d03d0ad 100644
5351 --- a/arch/s390/include/asm/cache.h
5352 +++ b/arch/s390/include/asm/cache.h
5353 @@ -9,8 +9,10 @@
5354 #ifndef __ARCH_S390_CACHE_H
5355 #define __ARCH_S390_CACHE_H
5356
5357 -#define L1_CACHE_BYTES 256
5358 +#include <linux/const.h>
5359 +
5360 #define L1_CACHE_SHIFT 8
5361 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5362 #define NET_SKB_PAD 32
5363
5364 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5365 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
5366 index 178ff96..8c93bd1 100644
5367 --- a/arch/s390/include/asm/elf.h
5368 +++ b/arch/s390/include/asm/elf.h
5369 @@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
5370 the loader. We need to make sure that it is out of the way of the program
5371 that it will "exec", and that there is sufficient room for the brk. */
5372
5373 -extern unsigned long randomize_et_dyn(unsigned long base);
5374 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
5375 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
5376 +
5377 +#ifdef CONFIG_PAX_ASLR
5378 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
5379 +
5380 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5381 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5382 +#endif
5383
5384 /* This yields a mask that user programs can use to figure out what
5385 instruction set this CPU supports. */
5386 @@ -210,9 +216,6 @@ struct linux_binprm;
5387 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5388 int arch_setup_additional_pages(struct linux_binprm *, int);
5389
5390 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5391 -#define arch_randomize_brk arch_randomize_brk
5392 -
5393 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
5394
5395 #endif
5396 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
5397 index c4a93d6..4d2a9b4 100644
5398 --- a/arch/s390/include/asm/exec.h
5399 +++ b/arch/s390/include/asm/exec.h
5400 @@ -7,6 +7,6 @@
5401 #ifndef __ASM_EXEC_H
5402 #define __ASM_EXEC_H
5403
5404 -extern unsigned long arch_align_stack(unsigned long sp);
5405 +#define arch_align_stack(x) ((x) & ~0xfUL)
5406
5407 #endif /* __ASM_EXEC_H */
5408 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
5409 index 34268df..ea97318 100644
5410 --- a/arch/s390/include/asm/uaccess.h
5411 +++ b/arch/s390/include/asm/uaccess.h
5412 @@ -252,6 +252,10 @@ static inline unsigned long __must_check
5413 copy_to_user(void __user *to, const void *from, unsigned long n)
5414 {
5415 might_fault();
5416 +
5417 + if ((long)n < 0)
5418 + return n;
5419 +
5420 if (access_ok(VERIFY_WRITE, to, n))
5421 n = __copy_to_user(to, from, n);
5422 return n;
5423 @@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
5424 static inline unsigned long __must_check
5425 __copy_from_user(void *to, const void __user *from, unsigned long n)
5426 {
5427 + if ((long)n < 0)
5428 + return n;
5429 +
5430 if (__builtin_constant_p(n) && (n <= 256))
5431 return uaccess.copy_from_user_small(n, from, to);
5432 else
5433 @@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
5434 static inline unsigned long __must_check
5435 copy_from_user(void *to, const void __user *from, unsigned long n)
5436 {
5437 - unsigned int sz = __compiletime_object_size(to);
5438 + size_t sz = __compiletime_object_size(to);
5439
5440 might_fault();
5441 - if (unlikely(sz != -1 && sz < n)) {
5442 +
5443 + if ((long)n < 0)
5444 + return n;
5445 +
5446 + if (unlikely(sz != (size_t)-1 && sz < n)) {
5447 copy_from_user_overflow();
5448 return n;
5449 }
5450 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
5451 index 4610dea..cf0af21 100644
5452 --- a/arch/s390/kernel/module.c
5453 +++ b/arch/s390/kernel/module.c
5454 @@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
5455
5456 /* Increase core size by size of got & plt and set start
5457 offsets for got and plt. */
5458 - me->core_size = ALIGN(me->core_size, 4);
5459 - me->arch.got_offset = me->core_size;
5460 - me->core_size += me->arch.got_size;
5461 - me->arch.plt_offset = me->core_size;
5462 - me->core_size += me->arch.plt_size;
5463 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
5464 + me->arch.got_offset = me->core_size_rw;
5465 + me->core_size_rw += me->arch.got_size;
5466 + me->arch.plt_offset = me->core_size_rx;
5467 + me->core_size_rx += me->arch.plt_size;
5468 return 0;
5469 }
5470
5471 @@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5472 if (info->got_initialized == 0) {
5473 Elf_Addr *gotent;
5474
5475 - gotent = me->module_core + me->arch.got_offset +
5476 + gotent = me->module_core_rw + me->arch.got_offset +
5477 info->got_offset;
5478 *gotent = val;
5479 info->got_initialized = 1;
5480 @@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5481 else if (r_type == R_390_GOTENT ||
5482 r_type == R_390_GOTPLTENT)
5483 *(unsigned int *) loc =
5484 - (val + (Elf_Addr) me->module_core - loc) >> 1;
5485 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
5486 else if (r_type == R_390_GOT64 ||
5487 r_type == R_390_GOTPLT64)
5488 *(unsigned long *) loc = val;
5489 @@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5490 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
5491 if (info->plt_initialized == 0) {
5492 unsigned int *ip;
5493 - ip = me->module_core + me->arch.plt_offset +
5494 + ip = me->module_core_rx + me->arch.plt_offset +
5495 info->plt_offset;
5496 #ifndef CONFIG_64BIT
5497 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
5498 @@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5499 val - loc + 0xffffUL < 0x1ffffeUL) ||
5500 (r_type == R_390_PLT32DBL &&
5501 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
5502 - val = (Elf_Addr) me->module_core +
5503 + val = (Elf_Addr) me->module_core_rx +
5504 me->arch.plt_offset +
5505 info->plt_offset;
5506 val += rela->r_addend - loc;
5507 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5508 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
5509 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
5510 val = val + rela->r_addend -
5511 - ((Elf_Addr) me->module_core + me->arch.got_offset);
5512 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
5513 if (r_type == R_390_GOTOFF16)
5514 *(unsigned short *) loc = val;
5515 else if (r_type == R_390_GOTOFF32)
5516 @@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5517 break;
5518 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
5519 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
5520 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
5521 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
5522 rela->r_addend - loc;
5523 if (r_type == R_390_GOTPC)
5524 *(unsigned int *) loc = val;
5525 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
5526 index cd31ad4..201c5a3 100644
5527 --- a/arch/s390/kernel/process.c
5528 +++ b/arch/s390/kernel/process.c
5529 @@ -283,39 +283,3 @@ unsigned long get_wchan(struct task_struct *p)
5530 }
5531 return 0;
5532 }
5533 -
5534 -unsigned long arch_align_stack(unsigned long sp)
5535 -{
5536 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5537 - sp -= get_random_int() & ~PAGE_MASK;
5538 - return sp & ~0xf;
5539 -}
5540 -
5541 -static inline unsigned long brk_rnd(void)
5542 -{
5543 - /* 8MB for 32bit, 1GB for 64bit */
5544 - if (is_32bit_task())
5545 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
5546 - else
5547 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
5548 -}
5549 -
5550 -unsigned long arch_randomize_brk(struct mm_struct *mm)
5551 -{
5552 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
5553 -
5554 - if (ret < mm->brk)
5555 - return mm->brk;
5556 - return ret;
5557 -}
5558 -
5559 -unsigned long randomize_et_dyn(unsigned long base)
5560 -{
5561 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5562 -
5563 - if (!(current->flags & PF_RANDOMIZE))
5564 - return base;
5565 - if (ret < base)
5566 - return base;
5567 - return ret;
5568 -}
5569 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5570 index c59a5ef..3fae59c 100644
5571 --- a/arch/s390/mm/mmap.c
5572 +++ b/arch/s390/mm/mmap.c
5573 @@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5574 */
5575 if (mmap_is_legacy()) {
5576 mm->mmap_base = TASK_UNMAPPED_BASE;
5577 +
5578 +#ifdef CONFIG_PAX_RANDMMAP
5579 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5580 + mm->mmap_base += mm->delta_mmap;
5581 +#endif
5582 +
5583 mm->get_unmapped_area = arch_get_unmapped_area;
5584 mm->unmap_area = arch_unmap_area;
5585 } else {
5586 mm->mmap_base = mmap_base();
5587 +
5588 +#ifdef CONFIG_PAX_RANDMMAP
5589 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5590 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5591 +#endif
5592 +
5593 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5594 mm->unmap_area = arch_unmap_area_topdown;
5595 }
5596 @@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5597 */
5598 if (mmap_is_legacy()) {
5599 mm->mmap_base = TASK_UNMAPPED_BASE;
5600 +
5601 +#ifdef CONFIG_PAX_RANDMMAP
5602 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5603 + mm->mmap_base += mm->delta_mmap;
5604 +#endif
5605 +
5606 mm->get_unmapped_area = s390_get_unmapped_area;
5607 mm->unmap_area = arch_unmap_area;
5608 } else {
5609 mm->mmap_base = mmap_base();
5610 +
5611 +#ifdef CONFIG_PAX_RANDMMAP
5612 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5613 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5614 +#endif
5615 +
5616 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5617 mm->unmap_area = arch_unmap_area_topdown;
5618 }
5619 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5620 index ae3d59f..f65f075 100644
5621 --- a/arch/score/include/asm/cache.h
5622 +++ b/arch/score/include/asm/cache.h
5623 @@ -1,7 +1,9 @@
5624 #ifndef _ASM_SCORE_CACHE_H
5625 #define _ASM_SCORE_CACHE_H
5626
5627 +#include <linux/const.h>
5628 +
5629 #define L1_CACHE_SHIFT 4
5630 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5631 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5632
5633 #endif /* _ASM_SCORE_CACHE_H */
5634 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
5635 index f9f3cd5..58ff438 100644
5636 --- a/arch/score/include/asm/exec.h
5637 +++ b/arch/score/include/asm/exec.h
5638 @@ -1,6 +1,6 @@
5639 #ifndef _ASM_SCORE_EXEC_H
5640 #define _ASM_SCORE_EXEC_H
5641
5642 -extern unsigned long arch_align_stack(unsigned long sp);
5643 +#define arch_align_stack(x) (x)
5644
5645 #endif /* _ASM_SCORE_EXEC_H */
5646 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5647 index 637970c..0b6556b 100644
5648 --- a/arch/score/kernel/process.c
5649 +++ b/arch/score/kernel/process.c
5650 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5651
5652 return task_pt_regs(task)->cp0_epc;
5653 }
5654 -
5655 -unsigned long arch_align_stack(unsigned long sp)
5656 -{
5657 - return sp;
5658 -}
5659 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5660 index ef9e555..331bd29 100644
5661 --- a/arch/sh/include/asm/cache.h
5662 +++ b/arch/sh/include/asm/cache.h
5663 @@ -9,10 +9,11 @@
5664 #define __ASM_SH_CACHE_H
5665 #ifdef __KERNEL__
5666
5667 +#include <linux/const.h>
5668 #include <linux/init.h>
5669 #include <cpu/cache.h>
5670
5671 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5672 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5673
5674 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5675
5676 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5677 index afeb710..d1d1289 100644
5678 --- a/arch/sh/mm/mmap.c
5679 +++ b/arch/sh/mm/mmap.c
5680 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5681 addr = PAGE_ALIGN(addr);
5682
5683 vma = find_vma(mm, addr);
5684 - if (TASK_SIZE - len >= addr &&
5685 - (!vma || addr + len <= vma->vm_start))
5686 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5687 return addr;
5688 }
5689
5690 @@ -106,7 +105,7 @@ full_search:
5691 }
5692 return -ENOMEM;
5693 }
5694 - if (likely(!vma || addr + len <= vma->vm_start)) {
5695 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5696 /*
5697 * Remember the place where we stopped the search:
5698 */
5699 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5700 addr = PAGE_ALIGN(addr);
5701
5702 vma = find_vma(mm, addr);
5703 - if (TASK_SIZE - len >= addr &&
5704 - (!vma || addr + len <= vma->vm_start))
5705 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5706 return addr;
5707 }
5708
5709 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5710 /* make sure it can fit in the remaining address space */
5711 if (likely(addr > len)) {
5712 vma = find_vma(mm, addr-len);
5713 - if (!vma || addr <= vma->vm_start) {
5714 + if (check_heap_stack_gap(vma, addr - len, len)) {
5715 /* remember the address as a hint for next time */
5716 return (mm->free_area_cache = addr-len);
5717 }
5718 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5719 if (unlikely(mm->mmap_base < len))
5720 goto bottomup;
5721
5722 - addr = mm->mmap_base-len;
5723 - if (do_colour_align)
5724 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5725 + addr = mm->mmap_base - len;
5726
5727 do {
5728 + if (do_colour_align)
5729 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5730 /*
5731 * Lookup failure means no vma is above this address,
5732 * else if new region fits below vma->vm_start,
5733 * return with success:
5734 */
5735 vma = find_vma(mm, addr);
5736 - if (likely(!vma || addr+len <= vma->vm_start)) {
5737 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5738 /* remember the address as a hint for next time */
5739 return (mm->free_area_cache = addr);
5740 }
5741 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5742 mm->cached_hole_size = vma->vm_start - addr;
5743
5744 /* try just below the current vma->vm_start */
5745 - addr = vma->vm_start-len;
5746 - if (do_colour_align)
5747 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5748 - } while (likely(len < vma->vm_start));
5749 + addr = skip_heap_stack_gap(vma, len);
5750 + } while (!IS_ERR_VALUE(addr));
5751
5752 bottomup:
5753 /*
5754 diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S
5755 index 23f6cbb..1cda8aa 100644
5756 --- a/arch/sparc/crypto/aes_asm.S
5757 +++ b/arch/sparc/crypto/aes_asm.S
5758 @@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256)
5759 add %o2, 0x20, %o2
5760 brlz,pt %o3, 11f
5761 nop
5762 -10: ldx [%o1 + 0x00], %g3
5763 +10: ldd [%o0 + 0xd0], %f56
5764 + ldd [%o0 + 0xd8], %f58
5765 + ldd [%o0 + 0xe0], %f60
5766 + ldd [%o0 + 0xe8], %f62
5767 + ldx [%o1 + 0x00], %g3
5768 ldx [%o1 + 0x08], %g7
5769 xor %g1, %g3, %g3
5770 xor %g2, %g7, %g7
5771 @@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
5772 /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
5773 ldx [%o0 - 0x10], %g1
5774 subcc %o3, 0x10, %o3
5775 + ldx [%o0 - 0x08], %g2
5776 be 10f
5777 - ldx [%o0 - 0x08], %g2
5778 - sub %o0, 0xf0, %o0
5779 + sub %o0, 0xf0, %o0
5780 1: ldx [%o1 + 0x00], %g3
5781 ldx [%o1 + 0x08], %g7
5782 ldx [%o1 + 0x10], %o4
5783 @@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
5784 add %o2, 0x20, %o2
5785 brlz,pt %o3, 11f
5786 nop
5787 -10: ldx [%o1 + 0x00], %g3
5788 +10: ldd [%o0 + 0x18], %f56
5789 + ldd [%o0 + 0x10], %f58
5790 + ldd [%o0 + 0x08], %f60
5791 + ldd [%o0 + 0x00], %f62
5792 + ldx [%o1 + 0x00], %g3
5793 ldx [%o1 + 0x08], %g7
5794 xor %g1, %g3, %g3
5795 xor %g2, %g7, %g7
5796 @@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256)
5797 add %o2, 0x20, %o2
5798 brlz,pt %o3, 11f
5799 nop
5800 - ldd [%o0 + 0xd0], %f56
5801 +10: ldd [%o0 + 0xd0], %f56
5802 ldd [%o0 + 0xd8], %f58
5803 ldd [%o0 + 0xe0], %f60
5804 ldd [%o0 + 0xe8], %f62
5805 -10: xor %g1, %g3, %o5
5806 + xor %g1, %g3, %o5
5807 MOVXTOD_O5_F0
5808 xor %g2, %g7, %o5
5809 MOVXTOD_O5_F2
5810 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5811 index be56a24..443328f 100644
5812 --- a/arch/sparc/include/asm/atomic_64.h
5813 +++ b/arch/sparc/include/asm/atomic_64.h
5814 @@ -14,18 +14,40 @@
5815 #define ATOMIC64_INIT(i) { (i) }
5816
5817 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5818 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5819 +{
5820 + return v->counter;
5821 +}
5822 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5823 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5824 +{
5825 + return v->counter;
5826 +}
5827
5828 #define atomic_set(v, i) (((v)->counter) = i)
5829 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5830 +{
5831 + v->counter = i;
5832 +}
5833 #define atomic64_set(v, i) (((v)->counter) = i)
5834 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5835 +{
5836 + v->counter = i;
5837 +}
5838
5839 extern void atomic_add(int, atomic_t *);
5840 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5841 extern void atomic64_add(long, atomic64_t *);
5842 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5843 extern void atomic_sub(int, atomic_t *);
5844 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5845 extern void atomic64_sub(long, atomic64_t *);
5846 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5847
5848 extern int atomic_add_ret(int, atomic_t *);
5849 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5850 extern long atomic64_add_ret(long, atomic64_t *);
5851 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5852 extern int atomic_sub_ret(int, atomic_t *);
5853 extern long atomic64_sub_ret(long, atomic64_t *);
5854
5855 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5856 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5857
5858 #define atomic_inc_return(v) atomic_add_ret(1, v)
5859 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5860 +{
5861 + return atomic_add_ret_unchecked(1, v);
5862 +}
5863 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5864 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5865 +{
5866 + return atomic64_add_ret_unchecked(1, v);
5867 +}
5868
5869 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5870 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5871
5872 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5873 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5874 +{
5875 + return atomic_add_ret_unchecked(i, v);
5876 +}
5877 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5878 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5879 +{
5880 + return atomic64_add_ret_unchecked(i, v);
5881 +}
5882
5883 /*
5884 * atomic_inc_and_test - increment and test
5885 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5886 * other cases.
5887 */
5888 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5889 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5890 +{
5891 + return atomic_inc_return_unchecked(v) == 0;
5892 +}
5893 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5894
5895 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5896 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5897 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5898
5899 #define atomic_inc(v) atomic_add(1, v)
5900 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5901 +{
5902 + atomic_add_unchecked(1, v);
5903 +}
5904 #define atomic64_inc(v) atomic64_add(1, v)
5905 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5906 +{
5907 + atomic64_add_unchecked(1, v);
5908 +}
5909
5910 #define atomic_dec(v) atomic_sub(1, v)
5911 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5912 +{
5913 + atomic_sub_unchecked(1, v);
5914 +}
5915 #define atomic64_dec(v) atomic64_sub(1, v)
5916 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5917 +{
5918 + atomic64_sub_unchecked(1, v);
5919 +}
5920
5921 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5922 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5923
5924 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5925 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5926 +{
5927 + return cmpxchg(&v->counter, old, new);
5928 +}
5929 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5930 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5931 +{
5932 + return xchg(&v->counter, new);
5933 +}
5934
5935 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5936 {
5937 - int c, old;
5938 + int c, old, new;
5939 c = atomic_read(v);
5940 for (;;) {
5941 - if (unlikely(c == (u)))
5942 + if (unlikely(c == u))
5943 break;
5944 - old = atomic_cmpxchg((v), c, c + (a));
5945 +
5946 + asm volatile("addcc %2, %0, %0\n"
5947 +
5948 +#ifdef CONFIG_PAX_REFCOUNT
5949 + "tvs %%icc, 6\n"
5950 +#endif
5951 +
5952 + : "=r" (new)
5953 + : "0" (c), "ir" (a)
5954 + : "cc");
5955 +
5956 + old = atomic_cmpxchg(v, c, new);
5957 if (likely(old == c))
5958 break;
5959 c = old;
5960 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5961 #define atomic64_cmpxchg(v, o, n) \
5962 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5963 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5964 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5965 +{
5966 + return xchg(&v->counter, new);
5967 +}
5968
5969 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5970 {
5971 - long c, old;
5972 + long c, old, new;
5973 c = atomic64_read(v);
5974 for (;;) {
5975 - if (unlikely(c == (u)))
5976 + if (unlikely(c == u))
5977 break;
5978 - old = atomic64_cmpxchg((v), c, c + (a));
5979 +
5980 + asm volatile("addcc %2, %0, %0\n"
5981 +
5982 +#ifdef CONFIG_PAX_REFCOUNT
5983 + "tvs %%xcc, 6\n"
5984 +#endif
5985 +
5986 + : "=r" (new)
5987 + : "0" (c), "ir" (a)
5988 + : "cc");
5989 +
5990 + old = atomic64_cmpxchg(v, c, new);
5991 if (likely(old == c))
5992 break;
5993 c = old;
5994 }
5995 - return c != (u);
5996 + return c != u;
5997 }
5998
5999 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6000 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
6001 index 5bb6991..5c2132e 100644
6002 --- a/arch/sparc/include/asm/cache.h
6003 +++ b/arch/sparc/include/asm/cache.h
6004 @@ -7,10 +7,12 @@
6005 #ifndef _SPARC_CACHE_H
6006 #define _SPARC_CACHE_H
6007
6008 +#include <linux/const.h>
6009 +
6010 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
6011
6012 #define L1_CACHE_SHIFT 5
6013 -#define L1_CACHE_BYTES 32
6014 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6015
6016 #ifdef CONFIG_SPARC32
6017 #define SMP_CACHE_BYTES_SHIFT 5
6018 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
6019 index ac74a2c..a9e58af 100644
6020 --- a/arch/sparc/include/asm/elf_32.h
6021 +++ b/arch/sparc/include/asm/elf_32.h
6022 @@ -114,6 +114,13 @@ typedef struct {
6023
6024 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
6025
6026 +#ifdef CONFIG_PAX_ASLR
6027 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
6028 +
6029 +#define PAX_DELTA_MMAP_LEN 16
6030 +#define PAX_DELTA_STACK_LEN 16
6031 +#endif
6032 +
6033 /* This yields a mask that user programs can use to figure out what
6034 instruction set this cpu supports. This can NOT be done in userspace
6035 on Sparc. */
6036 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
6037 index 370ca1e..d4f4a98 100644
6038 --- a/arch/sparc/include/asm/elf_64.h
6039 +++ b/arch/sparc/include/asm/elf_64.h
6040 @@ -189,6 +189,13 @@ typedef struct {
6041 #define ELF_ET_DYN_BASE 0x0000010000000000UL
6042 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
6043
6044 +#ifdef CONFIG_PAX_ASLR
6045 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
6046 +
6047 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
6048 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
6049 +#endif
6050 +
6051 extern unsigned long sparc64_elf_hwcap;
6052 #define ELF_HWCAP sparc64_elf_hwcap
6053
6054 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
6055 index 9b1c36d..209298b 100644
6056 --- a/arch/sparc/include/asm/pgalloc_32.h
6057 +++ b/arch/sparc/include/asm/pgalloc_32.h
6058 @@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
6059 }
6060
6061 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
6062 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
6063
6064 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
6065 unsigned long address)
6066 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
6067 index bcfe063..b333142 100644
6068 --- a/arch/sparc/include/asm/pgalloc_64.h
6069 +++ b/arch/sparc/include/asm/pgalloc_64.h
6070 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6071 }
6072
6073 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
6074 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
6075
6076 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
6077 {
6078 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
6079 index 6fc1348..390c50a 100644
6080 --- a/arch/sparc/include/asm/pgtable_32.h
6081 +++ b/arch/sparc/include/asm/pgtable_32.h
6082 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
6083 #define PAGE_SHARED SRMMU_PAGE_SHARED
6084 #define PAGE_COPY SRMMU_PAGE_COPY
6085 #define PAGE_READONLY SRMMU_PAGE_RDONLY
6086 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
6087 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
6088 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
6089 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
6090
6091 /* Top-level page directory - dummy used by init-mm.
6092 @@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
6093
6094 /* xwr */
6095 #define __P000 PAGE_NONE
6096 -#define __P001 PAGE_READONLY
6097 -#define __P010 PAGE_COPY
6098 -#define __P011 PAGE_COPY
6099 +#define __P001 PAGE_READONLY_NOEXEC
6100 +#define __P010 PAGE_COPY_NOEXEC
6101 +#define __P011 PAGE_COPY_NOEXEC
6102 #define __P100 PAGE_READONLY
6103 #define __P101 PAGE_READONLY
6104 #define __P110 PAGE_COPY
6105 #define __P111 PAGE_COPY
6106
6107 #define __S000 PAGE_NONE
6108 -#define __S001 PAGE_READONLY
6109 -#define __S010 PAGE_SHARED
6110 -#define __S011 PAGE_SHARED
6111 +#define __S001 PAGE_READONLY_NOEXEC
6112 +#define __S010 PAGE_SHARED_NOEXEC
6113 +#define __S011 PAGE_SHARED_NOEXEC
6114 #define __S100 PAGE_READONLY
6115 #define __S101 PAGE_READONLY
6116 #define __S110 PAGE_SHARED
6117 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
6118 index 79da178..c2eede8 100644
6119 --- a/arch/sparc/include/asm/pgtsrmmu.h
6120 +++ b/arch/sparc/include/asm/pgtsrmmu.h
6121 @@ -115,6 +115,11 @@
6122 SRMMU_EXEC | SRMMU_REF)
6123 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
6124 SRMMU_EXEC | SRMMU_REF)
6125 +
6126 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
6127 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6128 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6129 +
6130 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
6131 SRMMU_DIRTY | SRMMU_REF)
6132
6133 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
6134 index 9689176..63c18ea 100644
6135 --- a/arch/sparc/include/asm/spinlock_64.h
6136 +++ b/arch/sparc/include/asm/spinlock_64.h
6137 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
6138
6139 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
6140
6141 -static void inline arch_read_lock(arch_rwlock_t *lock)
6142 +static inline void arch_read_lock(arch_rwlock_t *lock)
6143 {
6144 unsigned long tmp1, tmp2;
6145
6146 __asm__ __volatile__ (
6147 "1: ldsw [%2], %0\n"
6148 " brlz,pn %0, 2f\n"
6149 -"4: add %0, 1, %1\n"
6150 +"4: addcc %0, 1, %1\n"
6151 +
6152 +#ifdef CONFIG_PAX_REFCOUNT
6153 +" tvs %%icc, 6\n"
6154 +#endif
6155 +
6156 " cas [%2], %0, %1\n"
6157 " cmp %0, %1\n"
6158 " bne,pn %%icc, 1b\n"
6159 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
6160 " .previous"
6161 : "=&r" (tmp1), "=&r" (tmp2)
6162 : "r" (lock)
6163 - : "memory");
6164 + : "memory", "cc");
6165 }
6166
6167 -static int inline arch_read_trylock(arch_rwlock_t *lock)
6168 +static inline int arch_read_trylock(arch_rwlock_t *lock)
6169 {
6170 int tmp1, tmp2;
6171
6172 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6173 "1: ldsw [%2], %0\n"
6174 " brlz,a,pn %0, 2f\n"
6175 " mov 0, %0\n"
6176 -" add %0, 1, %1\n"
6177 +" addcc %0, 1, %1\n"
6178 +
6179 +#ifdef CONFIG_PAX_REFCOUNT
6180 +" tvs %%icc, 6\n"
6181 +#endif
6182 +
6183 " cas [%2], %0, %1\n"
6184 " cmp %0, %1\n"
6185 " bne,pn %%icc, 1b\n"
6186 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6187 return tmp1;
6188 }
6189
6190 -static void inline arch_read_unlock(arch_rwlock_t *lock)
6191 +static inline void arch_read_unlock(arch_rwlock_t *lock)
6192 {
6193 unsigned long tmp1, tmp2;
6194
6195 __asm__ __volatile__(
6196 "1: lduw [%2], %0\n"
6197 -" sub %0, 1, %1\n"
6198 +" subcc %0, 1, %1\n"
6199 +
6200 +#ifdef CONFIG_PAX_REFCOUNT
6201 +" tvs %%icc, 6\n"
6202 +#endif
6203 +
6204 " cas [%2], %0, %1\n"
6205 " cmp %0, %1\n"
6206 " bne,pn %%xcc, 1b\n"
6207 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
6208 : "memory");
6209 }
6210
6211 -static void inline arch_write_lock(arch_rwlock_t *lock)
6212 +static inline void arch_write_lock(arch_rwlock_t *lock)
6213 {
6214 unsigned long mask, tmp1, tmp2;
6215
6216 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
6217 : "memory");
6218 }
6219
6220 -static void inline arch_write_unlock(arch_rwlock_t *lock)
6221 +static inline void arch_write_unlock(arch_rwlock_t *lock)
6222 {
6223 __asm__ __volatile__(
6224 " stw %%g0, [%0]"
6225 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
6226 : "memory");
6227 }
6228
6229 -static int inline arch_write_trylock(arch_rwlock_t *lock)
6230 +static inline int arch_write_trylock(arch_rwlock_t *lock)
6231 {
6232 unsigned long mask, tmp1, tmp2, result;
6233
6234 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
6235 index 25849ae..924c54b 100644
6236 --- a/arch/sparc/include/asm/thread_info_32.h
6237 +++ b/arch/sparc/include/asm/thread_info_32.h
6238 @@ -49,6 +49,8 @@ struct thread_info {
6239 unsigned long w_saved;
6240
6241 struct restart_block restart_block;
6242 +
6243 + unsigned long lowest_stack;
6244 };
6245
6246 /*
6247 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
6248 index a3fe4dc..cae132a 100644
6249 --- a/arch/sparc/include/asm/thread_info_64.h
6250 +++ b/arch/sparc/include/asm/thread_info_64.h
6251 @@ -63,6 +63,8 @@ struct thread_info {
6252 struct pt_regs *kern_una_regs;
6253 unsigned int kern_una_insn;
6254
6255 + unsigned long lowest_stack;
6256 +
6257 unsigned long fpregs[0] __attribute__ ((aligned(64)));
6258 };
6259
6260 @@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
6261 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
6262 /* flag bit 6 is available */
6263 #define TIF_32BIT 7 /* 32-bit binary */
6264 -/* flag bit 8 is available */
6265 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
6266 #define TIF_SECCOMP 9 /* secure computing */
6267 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
6268 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
6269 +
6270 /* NOTE: Thread flags >= 12 should be ones we have no interest
6271 * in using in assembly, else we can't use the mask as
6272 * an immediate value in instructions such as andcc.
6273 @@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
6274 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
6275 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6276 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
6277 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6278
6279 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
6280 _TIF_DO_NOTIFY_RESUME_MASK | \
6281 _TIF_NEED_RESCHED)
6282 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
6283
6284 +#define _TIF_WORK_SYSCALL \
6285 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
6286 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6287 +
6288 +
6289 /*
6290 * Thread-synchronous status.
6291 *
6292 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
6293 index 0167d26..9acd8ed 100644
6294 --- a/arch/sparc/include/asm/uaccess.h
6295 +++ b/arch/sparc/include/asm/uaccess.h
6296 @@ -1,5 +1,13 @@
6297 #ifndef ___ASM_SPARC_UACCESS_H
6298 #define ___ASM_SPARC_UACCESS_H
6299 +
6300 +#ifdef __KERNEL__
6301 +#ifndef __ASSEMBLY__
6302 +#include <linux/types.h>
6303 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
6304 +#endif
6305 +#endif
6306 +
6307 #if defined(__sparc__) && defined(__arch64__)
6308 #include <asm/uaccess_64.h>
6309 #else
6310 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
6311 index 53a28dd..50c38c3 100644
6312 --- a/arch/sparc/include/asm/uaccess_32.h
6313 +++ b/arch/sparc/include/asm/uaccess_32.h
6314 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
6315
6316 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6317 {
6318 - if (n && __access_ok((unsigned long) to, n))
6319 + if ((long)n < 0)
6320 + return n;
6321 +
6322 + if (n && __access_ok((unsigned long) to, n)) {
6323 + if (!__builtin_constant_p(n))
6324 + check_object_size(from, n, true);
6325 return __copy_user(to, (__force void __user *) from, n);
6326 - else
6327 + } else
6328 return n;
6329 }
6330
6331 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
6332 {
6333 + if ((long)n < 0)
6334 + return n;
6335 +
6336 + if (!__builtin_constant_p(n))
6337 + check_object_size(from, n, true);
6338 +
6339 return __copy_user(to, (__force void __user *) from, n);
6340 }
6341
6342 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6343 {
6344 - if (n && __access_ok((unsigned long) from, n))
6345 + if ((long)n < 0)
6346 + return n;
6347 +
6348 + if (n && __access_ok((unsigned long) from, n)) {
6349 + if (!__builtin_constant_p(n))
6350 + check_object_size(to, n, false);
6351 return __copy_user((__force void __user *) to, from, n);
6352 - else
6353 + } else
6354 return n;
6355 }
6356
6357 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
6358 {
6359 + if ((long)n < 0)
6360 + return n;
6361 +
6362 return __copy_user((__force void __user *) to, from, n);
6363 }
6364
6365 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
6366 index 73083e1..2bc62a6 100644
6367 --- a/arch/sparc/include/asm/uaccess_64.h
6368 +++ b/arch/sparc/include/asm/uaccess_64.h
6369 @@ -10,6 +10,7 @@
6370 #include <linux/compiler.h>
6371 #include <linux/string.h>
6372 #include <linux/thread_info.h>
6373 +#include <linux/kernel.h>
6374 #include <asm/asi.h>
6375 #include <asm/spitfire.h>
6376 #include <asm-generic/uaccess-unaligned.h>
6377 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6378 static inline unsigned long __must_check
6379 copy_from_user(void *to, const void __user *from, unsigned long size)
6380 {
6381 - unsigned long ret = ___copy_from_user(to, from, size);
6382 + unsigned long ret;
6383
6384 + if ((long)size < 0 || size > INT_MAX)
6385 + return size;
6386 +
6387 + if (!__builtin_constant_p(size))
6388 + check_object_size(to, size, false);
6389 +
6390 + ret = ___copy_from_user(to, from, size);
6391 if (unlikely(ret))
6392 ret = copy_from_user_fixup(to, from, size);
6393
6394 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
6395 static inline unsigned long __must_check
6396 copy_to_user(void __user *to, const void *from, unsigned long size)
6397 {
6398 - unsigned long ret = ___copy_to_user(to, from, size);
6399 + unsigned long ret;
6400
6401 + if ((long)size < 0 || size > INT_MAX)
6402 + return size;
6403 +
6404 + if (!__builtin_constant_p(size))
6405 + check_object_size(from, size, true);
6406 +
6407 + ret = ___copy_to_user(to, from, size);
6408 if (unlikely(ret))
6409 ret = copy_to_user_fixup(to, from, size);
6410 return ret;
6411 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
6412 index 6cf591b..b49e65a 100644
6413 --- a/arch/sparc/kernel/Makefile
6414 +++ b/arch/sparc/kernel/Makefile
6415 @@ -3,7 +3,7 @@
6416 #
6417
6418 asflags-y := -ansi
6419 -ccflags-y := -Werror
6420 +#ccflags-y := -Werror
6421
6422 extra-y := head_$(BITS).o
6423
6424 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6425 index 487bffb..955a925 100644
6426 --- a/arch/sparc/kernel/process_32.c
6427 +++ b/arch/sparc/kernel/process_32.c
6428 @@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
6429
6430 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6431 r->psr, r->pc, r->npc, r->y, print_tainted());
6432 - printk("PC: <%pS>\n", (void *) r->pc);
6433 + printk("PC: <%pA>\n", (void *) r->pc);
6434 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6435 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6436 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6437 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6438 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6439 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6440 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6441 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6442
6443 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6444 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6445 @@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6446 rw = (struct reg_window32 *) fp;
6447 pc = rw->ins[7];
6448 printk("[%08lx : ", pc);
6449 - printk("%pS ] ", (void *) pc);
6450 + printk("%pA ] ", (void *) pc);
6451 fp = rw->ins[6];
6452 } while (++count < 16);
6453 printk("\n");
6454 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6455 index c6e0c29..052832b 100644
6456 --- a/arch/sparc/kernel/process_64.c
6457 +++ b/arch/sparc/kernel/process_64.c
6458 @@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
6459 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6460 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6461 if (regs->tstate & TSTATE_PRIV)
6462 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6463 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6464 }
6465
6466 void show_regs(struct pt_regs *regs)
6467 {
6468 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6469 regs->tpc, regs->tnpc, regs->y, print_tainted());
6470 - printk("TPC: <%pS>\n", (void *) regs->tpc);
6471 + printk("TPC: <%pA>\n", (void *) regs->tpc);
6472 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6473 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6474 regs->u_regs[3]);
6475 @@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
6476 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6477 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6478 regs->u_regs[15]);
6479 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6480 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6481 show_regwindow(regs);
6482 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
6483 }
6484 @@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
6485 ((tp && tp->task) ? tp->task->pid : -1));
6486
6487 if (gp->tstate & TSTATE_PRIV) {
6488 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6489 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6490 (void *) gp->tpc,
6491 (void *) gp->o7,
6492 (void *) gp->i7,
6493 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
6494 index 7ff45e4..a58f271 100644
6495 --- a/arch/sparc/kernel/ptrace_64.c
6496 +++ b/arch/sparc/kernel/ptrace_64.c
6497 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
6498 return ret;
6499 }
6500
6501 +#ifdef CONFIG_GRKERNSEC_SETXID
6502 +extern void gr_delayed_cred_worker(void);
6503 +#endif
6504 +
6505 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6506 {
6507 int ret = 0;
6508 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6509 /* do the secure computing check first */
6510 secure_computing_strict(regs->u_regs[UREG_G1]);
6511
6512 +#ifdef CONFIG_GRKERNSEC_SETXID
6513 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6514 + gr_delayed_cred_worker();
6515 +#endif
6516 +
6517 if (test_thread_flag(TIF_SYSCALL_TRACE))
6518 ret = tracehook_report_syscall_entry(regs);
6519
6520 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6521
6522 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
6523 {
6524 +#ifdef CONFIG_GRKERNSEC_SETXID
6525 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6526 + gr_delayed_cred_worker();
6527 +#endif
6528 +
6529 audit_syscall_exit(regs);
6530
6531 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6532 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6533 index 0c9b31b..7cb7aee 100644
6534 --- a/arch/sparc/kernel/sys_sparc_32.c
6535 +++ b/arch/sparc/kernel/sys_sparc_32.c
6536 @@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6537 if (len > TASK_SIZE - PAGE_SIZE)
6538 return -ENOMEM;
6539 if (!addr)
6540 - addr = TASK_UNMAPPED_BASE;
6541 + addr = current->mm->mmap_base;
6542
6543 if (flags & MAP_SHARED)
6544 addr = COLOUR_ALIGN(addr);
6545 @@ -65,7 +65,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6546 /* At this point: (!vmm || addr < vmm->vm_end). */
6547 if (TASK_SIZE - PAGE_SIZE - len < addr)
6548 return -ENOMEM;
6549 - if (!vmm || addr + len <= vmm->vm_start)
6550 + if (check_heap_stack_gap(vmm, addr, len))
6551 return addr;
6552 addr = vmm->vm_end;
6553 if (flags & MAP_SHARED)
6554 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6555 index 878ef3d..8742f10 100644
6556 --- a/arch/sparc/kernel/sys_sparc_64.c
6557 +++ b/arch/sparc/kernel/sys_sparc_64.c
6558 @@ -107,7 +107,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6559 /* We do not accept a shared mapping if it would violate
6560 * cache aliasing constraints.
6561 */
6562 - if ((flags & MAP_SHARED) &&
6563 + if ((filp || (flags & MAP_SHARED)) &&
6564 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6565 return -EINVAL;
6566 return addr;
6567 @@ -122,6 +122,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6568 if (filp || (flags & MAP_SHARED))
6569 do_color_align = 1;
6570
6571 +#ifdef CONFIG_PAX_RANDMMAP
6572 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6573 +#endif
6574 +
6575 if (addr) {
6576 if (do_color_align)
6577 addr = COLOUR_ALIGN(addr, pgoff);
6578 @@ -129,15 +133,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6579 addr = PAGE_ALIGN(addr);
6580
6581 vma = find_vma(mm, addr);
6582 - if (task_size - len >= addr &&
6583 - (!vma || addr + len <= vma->vm_start))
6584 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6585 return addr;
6586 }
6587
6588 if (len > mm->cached_hole_size) {
6589 - start_addr = addr = mm->free_area_cache;
6590 + start_addr = addr = mm->free_area_cache;
6591 } else {
6592 - start_addr = addr = TASK_UNMAPPED_BASE;
6593 + start_addr = addr = mm->mmap_base;
6594 mm->cached_hole_size = 0;
6595 }
6596
6597 @@ -157,14 +160,14 @@ full_search:
6598 vma = find_vma(mm, VA_EXCLUDE_END);
6599 }
6600 if (unlikely(task_size < addr)) {
6601 - if (start_addr != TASK_UNMAPPED_BASE) {
6602 - start_addr = addr = TASK_UNMAPPED_BASE;
6603 + if (start_addr != mm->mmap_base) {
6604 + start_addr = addr = mm->mmap_base;
6605 mm->cached_hole_size = 0;
6606 goto full_search;
6607 }
6608 return -ENOMEM;
6609 }
6610 - if (likely(!vma || addr + len <= vma->vm_start)) {
6611 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6612 /*
6613 * Remember the place where we stopped the search:
6614 */
6615 @@ -198,7 +201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6616 /* We do not accept a shared mapping if it would violate
6617 * cache aliasing constraints.
6618 */
6619 - if ((flags & MAP_SHARED) &&
6620 + if ((filp || (flags & MAP_SHARED)) &&
6621 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6622 return -EINVAL;
6623 return addr;
6624 @@ -219,8 +222,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6625 addr = PAGE_ALIGN(addr);
6626
6627 vma = find_vma(mm, addr);
6628 - if (task_size - len >= addr &&
6629 - (!vma || addr + len <= vma->vm_start))
6630 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6631 return addr;
6632 }
6633
6634 @@ -241,7 +243,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6635 /* make sure it can fit in the remaining address space */
6636 if (likely(addr > len)) {
6637 vma = find_vma(mm, addr-len);
6638 - if (!vma || addr <= vma->vm_start) {
6639 + if (check_heap_stack_gap(vma, addr - len, len)) {
6640 /* remember the address as a hint for next time */
6641 return (mm->free_area_cache = addr-len);
6642 }
6643 @@ -250,18 +252,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6644 if (unlikely(mm->mmap_base < len))
6645 goto bottomup;
6646
6647 - addr = mm->mmap_base-len;
6648 - if (do_color_align)
6649 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6650 + addr = mm->mmap_base - len;
6651
6652 do {
6653 + if (do_color_align)
6654 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6655 /*
6656 * Lookup failure means no vma is above this address,
6657 * else if new region fits below vma->vm_start,
6658 * return with success:
6659 */
6660 vma = find_vma(mm, addr);
6661 - if (likely(!vma || addr+len <= vma->vm_start)) {
6662 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6663 /* remember the address as a hint for next time */
6664 return (mm->free_area_cache = addr);
6665 }
6666 @@ -271,10 +273,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6667 mm->cached_hole_size = vma->vm_start - addr;
6668
6669 /* try just below the current vma->vm_start */
6670 - addr = vma->vm_start-len;
6671 - if (do_color_align)
6672 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6673 - } while (likely(len < vma->vm_start));
6674 + addr = skip_heap_stack_gap(vma, len);
6675 + } while (!IS_ERR_VALUE(addr));
6676
6677 bottomup:
6678 /*
6679 @@ -373,6 +373,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6680 gap == RLIM_INFINITY ||
6681 sysctl_legacy_va_layout) {
6682 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6683 +
6684 +#ifdef CONFIG_PAX_RANDMMAP
6685 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6686 + mm->mmap_base += mm->delta_mmap;
6687 +#endif
6688 +
6689 mm->get_unmapped_area = arch_get_unmapped_area;
6690 mm->unmap_area = arch_unmap_area;
6691 } else {
6692 @@ -385,6 +391,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6693 gap = (task_size / 6 * 5);
6694
6695 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6696 +
6697 +#ifdef CONFIG_PAX_RANDMMAP
6698 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6699 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6700 +#endif
6701 +
6702 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6703 mm->unmap_area = arch_unmap_area_topdown;
6704 }
6705 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6706 index bf23477..b7425a6 100644
6707 --- a/arch/sparc/kernel/syscalls.S
6708 +++ b/arch/sparc/kernel/syscalls.S
6709 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6710 #endif
6711 .align 32
6712 1: ldx [%g6 + TI_FLAGS], %l5
6713 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6714 + andcc %l5, _TIF_WORK_SYSCALL, %g0
6715 be,pt %icc, rtrap
6716 nop
6717 call syscall_trace_leave
6718 @@ -189,7 +189,7 @@ linux_sparc_syscall32:
6719
6720 srl %i5, 0, %o5 ! IEU1
6721 srl %i2, 0, %o2 ! IEU0 Group
6722 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6723 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6724 bne,pn %icc, linux_syscall_trace32 ! CTI
6725 mov %i0, %l5 ! IEU1
6726 call %l7 ! CTI Group brk forced
6727 @@ -212,7 +212,7 @@ linux_sparc_syscall:
6728
6729 mov %i3, %o3 ! IEU1
6730 mov %i4, %o4 ! IEU0 Group
6731 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6732 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6733 bne,pn %icc, linux_syscall_trace ! CTI Group
6734 mov %i0, %l5 ! IEU0
6735 2: call %l7 ! CTI Group brk forced
6736 @@ -228,7 +228,7 @@ ret_sys_call:
6737
6738 cmp %o0, -ERESTART_RESTARTBLOCK
6739 bgeu,pn %xcc, 1f
6740 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6741 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6742 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
6743
6744 2:
6745 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6746 index a5785ea..405c5f7 100644
6747 --- a/arch/sparc/kernel/traps_32.c
6748 +++ b/arch/sparc/kernel/traps_32.c
6749 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6750 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6751 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6752
6753 +extern void gr_handle_kernel_exploit(void);
6754 +
6755 void die_if_kernel(char *str, struct pt_regs *regs)
6756 {
6757 static int die_counter;
6758 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6759 count++ < 30 &&
6760 (((unsigned long) rw) >= PAGE_OFFSET) &&
6761 !(((unsigned long) rw) & 0x7)) {
6762 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6763 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6764 (void *) rw->ins[7]);
6765 rw = (struct reg_window32 *)rw->ins[6];
6766 }
6767 }
6768 printk("Instruction DUMP:");
6769 instruction_dump ((unsigned long *) regs->pc);
6770 - if(regs->psr & PSR_PS)
6771 + if(regs->psr & PSR_PS) {
6772 + gr_handle_kernel_exploit();
6773 do_exit(SIGKILL);
6774 + }
6775 do_exit(SIGSEGV);
6776 }
6777
6778 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6779 index b66a779..8e8d66c 100644
6780 --- a/arch/sparc/kernel/traps_64.c
6781 +++ b/arch/sparc/kernel/traps_64.c
6782 @@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6783 i + 1,
6784 p->trapstack[i].tstate, p->trapstack[i].tpc,
6785 p->trapstack[i].tnpc, p->trapstack[i].tt);
6786 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6787 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6788 }
6789 }
6790
6791 @@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6792
6793 lvl -= 0x100;
6794 if (regs->tstate & TSTATE_PRIV) {
6795 +
6796 +#ifdef CONFIG_PAX_REFCOUNT
6797 + if (lvl == 6)
6798 + pax_report_refcount_overflow(regs);
6799 +#endif
6800 +
6801 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6802 die_if_kernel(buffer, regs);
6803 }
6804 @@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6805 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6806 {
6807 char buffer[32];
6808 -
6809 +
6810 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6811 0, lvl, SIGTRAP) == NOTIFY_STOP)
6812 return;
6813
6814 +#ifdef CONFIG_PAX_REFCOUNT
6815 + if (lvl == 6)
6816 + pax_report_refcount_overflow(regs);
6817 +#endif
6818 +
6819 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6820
6821 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6822 @@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6823 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6824 printk("%s" "ERROR(%d): ",
6825 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6826 - printk("TPC<%pS>\n", (void *) regs->tpc);
6827 + printk("TPC<%pA>\n", (void *) regs->tpc);
6828 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6829 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6830 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6831 @@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6832 smp_processor_id(),
6833 (type & 0x1) ? 'I' : 'D',
6834 regs->tpc);
6835 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6836 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6837 panic("Irrecoverable Cheetah+ parity error.");
6838 }
6839
6840 @@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6841 smp_processor_id(),
6842 (type & 0x1) ? 'I' : 'D',
6843 regs->tpc);
6844 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6845 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6846 }
6847
6848 struct sun4v_error_entry {
6849 @@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6850
6851 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6852 regs->tpc, tl);
6853 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6854 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6855 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6856 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6857 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6858 (void *) regs->u_regs[UREG_I7]);
6859 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6860 "pte[%lx] error[%lx]\n",
6861 @@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6862
6863 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6864 regs->tpc, tl);
6865 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6866 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6867 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6868 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6869 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6870 (void *) regs->u_regs[UREG_I7]);
6871 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6872 "pte[%lx] error[%lx]\n",
6873 @@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6874 fp = (unsigned long)sf->fp + STACK_BIAS;
6875 }
6876
6877 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6878 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6879 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6880 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6881 int index = tsk->curr_ret_stack;
6882 if (tsk->ret_stack && index >= graph) {
6883 pc = tsk->ret_stack[index - graph].ret;
6884 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6885 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6886 graph++;
6887 }
6888 }
6889 @@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6890 return (struct reg_window *) (fp + STACK_BIAS);
6891 }
6892
6893 +extern void gr_handle_kernel_exploit(void);
6894 +
6895 void die_if_kernel(char *str, struct pt_regs *regs)
6896 {
6897 static int die_counter;
6898 @@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6899 while (rw &&
6900 count++ < 30 &&
6901 kstack_valid(tp, (unsigned long) rw)) {
6902 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6903 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6904 (void *) rw->ins[7]);
6905
6906 rw = kernel_stack_up(rw);
6907 @@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6908 }
6909 user_instruction_dump ((unsigned int __user *) regs->tpc);
6910 }
6911 - if (regs->tstate & TSTATE_PRIV)
6912 + if (regs->tstate & TSTATE_PRIV) {
6913 + gr_handle_kernel_exploit();
6914 do_exit(SIGKILL);
6915 + }
6916 do_exit(SIGSEGV);
6917 }
6918 EXPORT_SYMBOL(die_if_kernel);
6919 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6920 index 8201c25e..072a2a7 100644
6921 --- a/arch/sparc/kernel/unaligned_64.c
6922 +++ b/arch/sparc/kernel/unaligned_64.c
6923 @@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
6924 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6925
6926 if (__ratelimit(&ratelimit)) {
6927 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6928 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6929 regs->tpc, (void *) regs->tpc);
6930 }
6931 }
6932 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6933 index 8410065f2..4fd4ca22 100644
6934 --- a/arch/sparc/lib/Makefile
6935 +++ b/arch/sparc/lib/Makefile
6936 @@ -2,7 +2,7 @@
6937 #
6938
6939 asflags-y := -ansi -DST_DIV0=0x02
6940 -ccflags-y := -Werror
6941 +#ccflags-y := -Werror
6942
6943 lib-$(CONFIG_SPARC32) += ashrdi3.o
6944 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6945 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6946 index 85c233d..68500e0 100644
6947 --- a/arch/sparc/lib/atomic_64.S
6948 +++ b/arch/sparc/lib/atomic_64.S
6949 @@ -17,7 +17,12 @@
6950 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6951 BACKOFF_SETUP(%o2)
6952 1: lduw [%o1], %g1
6953 - add %g1, %o0, %g7
6954 + addcc %g1, %o0, %g7
6955 +
6956 +#ifdef CONFIG_PAX_REFCOUNT
6957 + tvs %icc, 6
6958 +#endif
6959 +
6960 cas [%o1], %g1, %g7
6961 cmp %g1, %g7
6962 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6963 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6964 2: BACKOFF_SPIN(%o2, %o3, 1b)
6965 ENDPROC(atomic_add)
6966
6967 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6968 + BACKOFF_SETUP(%o2)
6969 +1: lduw [%o1], %g1
6970 + add %g1, %o0, %g7
6971 + cas [%o1], %g1, %g7
6972 + cmp %g1, %g7
6973 + bne,pn %icc, 2f
6974 + nop
6975 + retl
6976 + nop
6977 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6978 +ENDPROC(atomic_add_unchecked)
6979 +
6980 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6981 BACKOFF_SETUP(%o2)
6982 1: lduw [%o1], %g1
6983 - sub %g1, %o0, %g7
6984 + subcc %g1, %o0, %g7
6985 +
6986 +#ifdef CONFIG_PAX_REFCOUNT
6987 + tvs %icc, 6
6988 +#endif
6989 +
6990 cas [%o1], %g1, %g7
6991 cmp %g1, %g7
6992 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6993 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6994 2: BACKOFF_SPIN(%o2, %o3, 1b)
6995 ENDPROC(atomic_sub)
6996
6997 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6998 + BACKOFF_SETUP(%o2)
6999 +1: lduw [%o1], %g1
7000 + sub %g1, %o0, %g7
7001 + cas [%o1], %g1, %g7
7002 + cmp %g1, %g7
7003 + bne,pn %icc, 2f
7004 + nop
7005 + retl
7006 + nop
7007 +2: BACKOFF_SPIN(%o2, %o3, 1b)
7008 +ENDPROC(atomic_sub_unchecked)
7009 +
7010 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7011 BACKOFF_SETUP(%o2)
7012 1: lduw [%o1], %g1
7013 - add %g1, %o0, %g7
7014 + addcc %g1, %o0, %g7
7015 +
7016 +#ifdef CONFIG_PAX_REFCOUNT
7017 + tvs %icc, 6
7018 +#endif
7019 +
7020 cas [%o1], %g1, %g7
7021 cmp %g1, %g7
7022 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7023 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7024 2: BACKOFF_SPIN(%o2, %o3, 1b)
7025 ENDPROC(atomic_add_ret)
7026
7027 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7028 + BACKOFF_SETUP(%o2)
7029 +1: lduw [%o1], %g1
7030 + addcc %g1, %o0, %g7
7031 + cas [%o1], %g1, %g7
7032 + cmp %g1, %g7
7033 + bne,pn %icc, 2f
7034 + add %g7, %o0, %g7
7035 + sra %g7, 0, %o0
7036 + retl
7037 + nop
7038 +2: BACKOFF_SPIN(%o2, %o3, 1b)
7039 +ENDPROC(atomic_add_ret_unchecked)
7040 +
7041 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7042 BACKOFF_SETUP(%o2)
7043 1: lduw [%o1], %g1
7044 - sub %g1, %o0, %g7
7045 + subcc %g1, %o0, %g7
7046 +
7047 +#ifdef CONFIG_PAX_REFCOUNT
7048 + tvs %icc, 6
7049 +#endif
7050 +
7051 cas [%o1], %g1, %g7
7052 cmp %g1, %g7
7053 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7054 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
7055 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7056 BACKOFF_SETUP(%o2)
7057 1: ldx [%o1], %g1
7058 - add %g1, %o0, %g7
7059 + addcc %g1, %o0, %g7
7060 +
7061 +#ifdef CONFIG_PAX_REFCOUNT
7062 + tvs %xcc, 6
7063 +#endif
7064 +
7065 casx [%o1], %g1, %g7
7066 cmp %g1, %g7
7067 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7068 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7069 2: BACKOFF_SPIN(%o2, %o3, 1b)
7070 ENDPROC(atomic64_add)
7071
7072 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7073 + BACKOFF_SETUP(%o2)
7074 +1: ldx [%o1], %g1
7075 + addcc %g1, %o0, %g7
7076 + casx [%o1], %g1, %g7
7077 + cmp %g1, %g7
7078 + bne,pn %xcc, 2f
7079 + nop
7080 + retl
7081 + nop
7082 +2: BACKOFF_SPIN(%o2, %o3, 1b)
7083 +ENDPROC(atomic64_add_unchecked)
7084 +
7085 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7086 BACKOFF_SETUP(%o2)
7087 1: ldx [%o1], %g1
7088 - sub %g1, %o0, %g7
7089 + subcc %g1, %o0, %g7
7090 +
7091 +#ifdef CONFIG_PAX_REFCOUNT
7092 + tvs %xcc, 6
7093 +#endif
7094 +
7095 casx [%o1], %g1, %g7
7096 cmp %g1, %g7
7097 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7098 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7099 2: BACKOFF_SPIN(%o2, %o3, 1b)
7100 ENDPROC(atomic64_sub)
7101
7102 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7103 + BACKOFF_SETUP(%o2)
7104 +1: ldx [%o1], %g1
7105 + subcc %g1, %o0, %g7
7106 + casx [%o1], %g1, %g7
7107 + cmp %g1, %g7
7108 + bne,pn %xcc, 2f
7109 + nop
7110 + retl
7111 + nop
7112 +2: BACKOFF_SPIN(%o2, %o3, 1b)
7113 +ENDPROC(atomic64_sub_unchecked)
7114 +
7115 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7116 BACKOFF_SETUP(%o2)
7117 1: ldx [%o1], %g1
7118 - add %g1, %o0, %g7
7119 + addcc %g1, %o0, %g7
7120 +
7121 +#ifdef CONFIG_PAX_REFCOUNT
7122 + tvs %xcc, 6
7123 +#endif
7124 +
7125 casx [%o1], %g1, %g7
7126 cmp %g1, %g7
7127 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7128 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7129 2: BACKOFF_SPIN(%o2, %o3, 1b)
7130 ENDPROC(atomic64_add_ret)
7131
7132 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7133 + BACKOFF_SETUP(%o2)
7134 +1: ldx [%o1], %g1
7135 + addcc %g1, %o0, %g7
7136 + casx [%o1], %g1, %g7
7137 + cmp %g1, %g7
7138 + bne,pn %xcc, 2f
7139 + add %g7, %o0, %g7
7140 + mov %g7, %o0
7141 + retl
7142 + nop
7143 +2: BACKOFF_SPIN(%o2, %o3, 1b)
7144 +ENDPROC(atomic64_add_ret_unchecked)
7145 +
7146 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7147 BACKOFF_SETUP(%o2)
7148 1: ldx [%o1], %g1
7149 - sub %g1, %o0, %g7
7150 + subcc %g1, %o0, %g7
7151 +
7152 +#ifdef CONFIG_PAX_REFCOUNT
7153 + tvs %xcc, 6
7154 +#endif
7155 +
7156 casx [%o1], %g1, %g7
7157 cmp %g1, %g7
7158 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7159 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
7160 index 0c4e35e..745d3e4 100644
7161 --- a/arch/sparc/lib/ksyms.c
7162 +++ b/arch/sparc/lib/ksyms.c
7163 @@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
7164
7165 /* Atomic counter implementation. */
7166 EXPORT_SYMBOL(atomic_add);
7167 +EXPORT_SYMBOL(atomic_add_unchecked);
7168 EXPORT_SYMBOL(atomic_add_ret);
7169 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
7170 EXPORT_SYMBOL(atomic_sub);
7171 +EXPORT_SYMBOL(atomic_sub_unchecked);
7172 EXPORT_SYMBOL(atomic_sub_ret);
7173 EXPORT_SYMBOL(atomic64_add);
7174 +EXPORT_SYMBOL(atomic64_add_unchecked);
7175 EXPORT_SYMBOL(atomic64_add_ret);
7176 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
7177 EXPORT_SYMBOL(atomic64_sub);
7178 +EXPORT_SYMBOL(atomic64_sub_unchecked);
7179 EXPORT_SYMBOL(atomic64_sub_ret);
7180 EXPORT_SYMBOL(atomic64_dec_if_positive);
7181
7182 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
7183 index 30c3ecc..736f015 100644
7184 --- a/arch/sparc/mm/Makefile
7185 +++ b/arch/sparc/mm/Makefile
7186 @@ -2,7 +2,7 @@
7187 #
7188
7189 asflags-y := -ansi
7190 -ccflags-y := -Werror
7191 +#ccflags-y := -Werror
7192
7193 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
7194 obj-y += fault_$(BITS).o
7195 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
7196 index e98bfda..ea8d221 100644
7197 --- a/arch/sparc/mm/fault_32.c
7198 +++ b/arch/sparc/mm/fault_32.c
7199 @@ -21,6 +21,9 @@
7200 #include <linux/perf_event.h>
7201 #include <linux/interrupt.h>
7202 #include <linux/kdebug.h>
7203 +#include <linux/slab.h>
7204 +#include <linux/pagemap.h>
7205 +#include <linux/compiler.h>
7206
7207 #include <asm/page.h>
7208 #include <asm/pgtable.h>
7209 @@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
7210 return safe_compute_effective_address(regs, insn);
7211 }
7212
7213 +#ifdef CONFIG_PAX_PAGEEXEC
7214 +#ifdef CONFIG_PAX_DLRESOLVE
7215 +static void pax_emuplt_close(struct vm_area_struct *vma)
7216 +{
7217 + vma->vm_mm->call_dl_resolve = 0UL;
7218 +}
7219 +
7220 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7221 +{
7222 + unsigned int *kaddr;
7223 +
7224 + vmf->page = alloc_page(GFP_HIGHUSER);
7225 + if (!vmf->page)
7226 + return VM_FAULT_OOM;
7227 +
7228 + kaddr = kmap(vmf->page);
7229 + memset(kaddr, 0, PAGE_SIZE);
7230 + kaddr[0] = 0x9DE3BFA8U; /* save */
7231 + flush_dcache_page(vmf->page);
7232 + kunmap(vmf->page);
7233 + return VM_FAULT_MAJOR;
7234 +}
7235 +
7236 +static const struct vm_operations_struct pax_vm_ops = {
7237 + .close = pax_emuplt_close,
7238 + .fault = pax_emuplt_fault
7239 +};
7240 +
7241 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7242 +{
7243 + int ret;
7244 +
7245 + INIT_LIST_HEAD(&vma->anon_vma_chain);
7246 + vma->vm_mm = current->mm;
7247 + vma->vm_start = addr;
7248 + vma->vm_end = addr + PAGE_SIZE;
7249 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7250 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7251 + vma->vm_ops = &pax_vm_ops;
7252 +
7253 + ret = insert_vm_struct(current->mm, vma);
7254 + if (ret)
7255 + return ret;
7256 +
7257 + ++current->mm->total_vm;
7258 + return 0;
7259 +}
7260 +#endif
7261 +
7262 +/*
7263 + * PaX: decide what to do with offenders (regs->pc = fault address)
7264 + *
7265 + * returns 1 when task should be killed
7266 + * 2 when patched PLT trampoline was detected
7267 + * 3 when unpatched PLT trampoline was detected
7268 + */
7269 +static int pax_handle_fetch_fault(struct pt_regs *regs)
7270 +{
7271 +
7272 +#ifdef CONFIG_PAX_EMUPLT
7273 + int err;
7274 +
7275 + do { /* PaX: patched PLT emulation #1 */
7276 + unsigned int sethi1, sethi2, jmpl;
7277 +
7278 + err = get_user(sethi1, (unsigned int *)regs->pc);
7279 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
7280 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
7281 +
7282 + if (err)
7283 + break;
7284 +
7285 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7286 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
7287 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
7288 + {
7289 + unsigned int addr;
7290 +
7291 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7292 + addr = regs->u_regs[UREG_G1];
7293 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7294 + regs->pc = addr;
7295 + regs->npc = addr+4;
7296 + return 2;
7297 + }
7298 + } while (0);
7299 +
7300 + do { /* PaX: patched PLT emulation #2 */
7301 + unsigned int ba;
7302 +
7303 + err = get_user(ba, (unsigned int *)regs->pc);
7304 +
7305 + if (err)
7306 + break;
7307 +
7308 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7309 + unsigned int addr;
7310 +
7311 + if ((ba & 0xFFC00000U) == 0x30800000U)
7312 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7313 + else
7314 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7315 + regs->pc = addr;
7316 + regs->npc = addr+4;
7317 + return 2;
7318 + }
7319 + } while (0);
7320 +
7321 + do { /* PaX: patched PLT emulation #3 */
7322 + unsigned int sethi, bajmpl, nop;
7323 +
7324 + err = get_user(sethi, (unsigned int *)regs->pc);
7325 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
7326 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
7327 +
7328 + if (err)
7329 + break;
7330 +
7331 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7332 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7333 + nop == 0x01000000U)
7334 + {
7335 + unsigned int addr;
7336 +
7337 + addr = (sethi & 0x003FFFFFU) << 10;
7338 + regs->u_regs[UREG_G1] = addr;
7339 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7340 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7341 + else
7342 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7343 + regs->pc = addr;
7344 + regs->npc = addr+4;
7345 + return 2;
7346 + }
7347 + } while (0);
7348 +
7349 + do { /* PaX: unpatched PLT emulation step 1 */
7350 + unsigned int sethi, ba, nop;
7351 +
7352 + err = get_user(sethi, (unsigned int *)regs->pc);
7353 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
7354 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
7355 +
7356 + if (err)
7357 + break;
7358 +
7359 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7360 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7361 + nop == 0x01000000U)
7362 + {
7363 + unsigned int addr, save, call;
7364 +
7365 + if ((ba & 0xFFC00000U) == 0x30800000U)
7366 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7367 + else
7368 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7369 +
7370 + err = get_user(save, (unsigned int *)addr);
7371 + err |= get_user(call, (unsigned int *)(addr+4));
7372 + err |= get_user(nop, (unsigned int *)(addr+8));
7373 + if (err)
7374 + break;
7375 +
7376 +#ifdef CONFIG_PAX_DLRESOLVE
7377 + if (save == 0x9DE3BFA8U &&
7378 + (call & 0xC0000000U) == 0x40000000U &&
7379 + nop == 0x01000000U)
7380 + {
7381 + struct vm_area_struct *vma;
7382 + unsigned long call_dl_resolve;
7383 +
7384 + down_read(&current->mm->mmap_sem);
7385 + call_dl_resolve = current->mm->call_dl_resolve;
7386 + up_read(&current->mm->mmap_sem);
7387 + if (likely(call_dl_resolve))
7388 + goto emulate;
7389 +
7390 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7391 +
7392 + down_write(&current->mm->mmap_sem);
7393 + if (current->mm->call_dl_resolve) {
7394 + call_dl_resolve = current->mm->call_dl_resolve;
7395 + up_write(&current->mm->mmap_sem);
7396 + if (vma)
7397 + kmem_cache_free(vm_area_cachep, vma);
7398 + goto emulate;
7399 + }
7400 +
7401 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7402 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7403 + up_write(&current->mm->mmap_sem);
7404 + if (vma)
7405 + kmem_cache_free(vm_area_cachep, vma);
7406 + return 1;
7407 + }
7408 +
7409 + if (pax_insert_vma(vma, call_dl_resolve)) {
7410 + up_write(&current->mm->mmap_sem);
7411 + kmem_cache_free(vm_area_cachep, vma);
7412 + return 1;
7413 + }
7414 +
7415 + current->mm->call_dl_resolve = call_dl_resolve;
7416 + up_write(&current->mm->mmap_sem);
7417 +
7418 +emulate:
7419 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7420 + regs->pc = call_dl_resolve;
7421 + regs->npc = addr+4;
7422 + return 3;
7423 + }
7424 +#endif
7425 +
7426 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7427 + if ((save & 0xFFC00000U) == 0x05000000U &&
7428 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7429 + nop == 0x01000000U)
7430 + {
7431 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7432 + regs->u_regs[UREG_G2] = addr + 4;
7433 + addr = (save & 0x003FFFFFU) << 10;
7434 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7435 + regs->pc = addr;
7436 + regs->npc = addr+4;
7437 + return 3;
7438 + }
7439 + }
7440 + } while (0);
7441 +
7442 + do { /* PaX: unpatched PLT emulation step 2 */
7443 + unsigned int save, call, nop;
7444 +
7445 + err = get_user(save, (unsigned int *)(regs->pc-4));
7446 + err |= get_user(call, (unsigned int *)regs->pc);
7447 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
7448 + if (err)
7449 + break;
7450 +
7451 + if (save == 0x9DE3BFA8U &&
7452 + (call & 0xC0000000U) == 0x40000000U &&
7453 + nop == 0x01000000U)
7454 + {
7455 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7456 +
7457 + regs->u_regs[UREG_RETPC] = regs->pc;
7458 + regs->pc = dl_resolve;
7459 + regs->npc = dl_resolve+4;
7460 + return 3;
7461 + }
7462 + } while (0);
7463 +#endif
7464 +
7465 + return 1;
7466 +}
7467 +
7468 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7469 +{
7470 + unsigned long i;
7471 +
7472 + printk(KERN_ERR "PAX: bytes at PC: ");
7473 + for (i = 0; i < 8; i++) {
7474 + unsigned int c;
7475 + if (get_user(c, (unsigned int *)pc+i))
7476 + printk(KERN_CONT "???????? ");
7477 + else
7478 + printk(KERN_CONT "%08x ", c);
7479 + }
7480 + printk("\n");
7481 +}
7482 +#endif
7483 +
7484 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
7485 int text_fault)
7486 {
7487 @@ -230,6 +504,24 @@ good_area:
7488 if (!(vma->vm_flags & VM_WRITE))
7489 goto bad_area;
7490 } else {
7491 +
7492 +#ifdef CONFIG_PAX_PAGEEXEC
7493 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7494 + up_read(&mm->mmap_sem);
7495 + switch (pax_handle_fetch_fault(regs)) {
7496 +
7497 +#ifdef CONFIG_PAX_EMUPLT
7498 + case 2:
7499 + case 3:
7500 + return;
7501 +#endif
7502 +
7503 + }
7504 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7505 + do_group_exit(SIGKILL);
7506 + }
7507 +#endif
7508 +
7509 /* Allow reads even for write-only mappings */
7510 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
7511 goto bad_area;
7512 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7513 index 097aee7..5ca6697 100644
7514 --- a/arch/sparc/mm/fault_64.c
7515 +++ b/arch/sparc/mm/fault_64.c
7516 @@ -21,6 +21,9 @@
7517 #include <linux/kprobes.h>
7518 #include <linux/kdebug.h>
7519 #include <linux/percpu.h>
7520 +#include <linux/slab.h>
7521 +#include <linux/pagemap.h>
7522 +#include <linux/compiler.h>
7523
7524 #include <asm/page.h>
7525 #include <asm/pgtable.h>
7526 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7527 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7528 regs->tpc);
7529 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7530 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7531 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7532 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7533 dump_stack();
7534 unhandled_fault(regs->tpc, current, regs);
7535 @@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
7536 show_regs(regs);
7537 }
7538
7539 +#ifdef CONFIG_PAX_PAGEEXEC
7540 +#ifdef CONFIG_PAX_DLRESOLVE
7541 +static void pax_emuplt_close(struct vm_area_struct *vma)
7542 +{
7543 + vma->vm_mm->call_dl_resolve = 0UL;
7544 +}
7545 +
7546 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7547 +{
7548 + unsigned int *kaddr;
7549 +
7550 + vmf->page = alloc_page(GFP_HIGHUSER);
7551 + if (!vmf->page)
7552 + return VM_FAULT_OOM;
7553 +
7554 + kaddr = kmap(vmf->page);
7555 + memset(kaddr, 0, PAGE_SIZE);
7556 + kaddr[0] = 0x9DE3BFA8U; /* save */
7557 + flush_dcache_page(vmf->page);
7558 + kunmap(vmf->page);
7559 + return VM_FAULT_MAJOR;
7560 +}
7561 +
7562 +static const struct vm_operations_struct pax_vm_ops = {
7563 + .close = pax_emuplt_close,
7564 + .fault = pax_emuplt_fault
7565 +};
7566 +
7567 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7568 +{
7569 + int ret;
7570 +
7571 + INIT_LIST_HEAD(&vma->anon_vma_chain);
7572 + vma->vm_mm = current->mm;
7573 + vma->vm_start = addr;
7574 + vma->vm_end = addr + PAGE_SIZE;
7575 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7576 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7577 + vma->vm_ops = &pax_vm_ops;
7578 +
7579 + ret = insert_vm_struct(current->mm, vma);
7580 + if (ret)
7581 + return ret;
7582 +
7583 + ++current->mm->total_vm;
7584 + return 0;
7585 +}
7586 +#endif
7587 +
7588 +/*
7589 + * PaX: decide what to do with offenders (regs->tpc = fault address)
7590 + *
7591 + * returns 1 when task should be killed
7592 + * 2 when patched PLT trampoline was detected
7593 + * 3 when unpatched PLT trampoline was detected
7594 + */
7595 +static int pax_handle_fetch_fault(struct pt_regs *regs)
7596 +{
7597 +
7598 +#ifdef CONFIG_PAX_EMUPLT
7599 + int err;
7600 +
7601 + do { /* PaX: patched PLT emulation #1 */
7602 + unsigned int sethi1, sethi2, jmpl;
7603 +
7604 + err = get_user(sethi1, (unsigned int *)regs->tpc);
7605 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7606 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7607 +
7608 + if (err)
7609 + break;
7610 +
7611 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7612 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
7613 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
7614 + {
7615 + unsigned long addr;
7616 +
7617 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7618 + addr = regs->u_regs[UREG_G1];
7619 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7620 +
7621 + if (test_thread_flag(TIF_32BIT))
7622 + addr &= 0xFFFFFFFFUL;
7623 +
7624 + regs->tpc = addr;
7625 + regs->tnpc = addr+4;
7626 + return 2;
7627 + }
7628 + } while (0);
7629 +
7630 + do { /* PaX: patched PLT emulation #2 */
7631 + unsigned int ba;
7632 +
7633 + err = get_user(ba, (unsigned int *)regs->tpc);
7634 +
7635 + if (err)
7636 + break;
7637 +
7638 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7639 + unsigned long addr;
7640 +
7641 + if ((ba & 0xFFC00000U) == 0x30800000U)
7642 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7643 + else
7644 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7645 +
7646 + if (test_thread_flag(TIF_32BIT))
7647 + addr &= 0xFFFFFFFFUL;
7648 +
7649 + regs->tpc = addr;
7650 + regs->tnpc = addr+4;
7651 + return 2;
7652 + }
7653 + } while (0);
7654 +
7655 + do { /* PaX: patched PLT emulation #3 */
7656 + unsigned int sethi, bajmpl, nop;
7657 +
7658 + err = get_user(sethi, (unsigned int *)regs->tpc);
7659 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
7660 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7661 +
7662 + if (err)
7663 + break;
7664 +
7665 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7666 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7667 + nop == 0x01000000U)
7668 + {
7669 + unsigned long addr;
7670 +
7671 + addr = (sethi & 0x003FFFFFU) << 10;
7672 + regs->u_regs[UREG_G1] = addr;
7673 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7674 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7675 + else
7676 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7677 +
7678 + if (test_thread_flag(TIF_32BIT))
7679 + addr &= 0xFFFFFFFFUL;
7680 +
7681 + regs->tpc = addr;
7682 + regs->tnpc = addr+4;
7683 + return 2;
7684 + }
7685 + } while (0);
7686 +
7687 + do { /* PaX: patched PLT emulation #4 */
7688 + unsigned int sethi, mov1, call, mov2;
7689 +
7690 + err = get_user(sethi, (unsigned int *)regs->tpc);
7691 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7692 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
7693 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7694 +
7695 + if (err)
7696 + break;
7697 +
7698 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7699 + mov1 == 0x8210000FU &&
7700 + (call & 0xC0000000U) == 0x40000000U &&
7701 + mov2 == 0x9E100001U)
7702 + {
7703 + unsigned long addr;
7704 +
7705 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7706 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7707 +
7708 + if (test_thread_flag(TIF_32BIT))
7709 + addr &= 0xFFFFFFFFUL;
7710 +
7711 + regs->tpc = addr;
7712 + regs->tnpc = addr+4;
7713 + return 2;
7714 + }
7715 + } while (0);
7716 +
7717 + do { /* PaX: patched PLT emulation #5 */
7718 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7719 +
7720 + err = get_user(sethi, (unsigned int *)regs->tpc);
7721 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7722 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7723 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7724 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7725 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7726 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7727 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7728 +
7729 + if (err)
7730 + break;
7731 +
7732 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7733 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7734 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7735 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7736 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7737 + sllx == 0x83287020U &&
7738 + jmpl == 0x81C04005U &&
7739 + nop == 0x01000000U)
7740 + {
7741 + unsigned long addr;
7742 +
7743 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7744 + regs->u_regs[UREG_G1] <<= 32;
7745 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7746 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7747 + regs->tpc = addr;
7748 + regs->tnpc = addr+4;
7749 + return 2;
7750 + }
7751 + } while (0);
7752 +
7753 + do { /* PaX: patched PLT emulation #6 */
7754 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7755 +
7756 + err = get_user(sethi, (unsigned int *)regs->tpc);
7757 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7758 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7759 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7760 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7761 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7762 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7763 +
7764 + if (err)
7765 + break;
7766 +
7767 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7768 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7769 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7770 + sllx == 0x83287020U &&
7771 + (or & 0xFFFFE000U) == 0x8A116000U &&
7772 + jmpl == 0x81C04005U &&
7773 + nop == 0x01000000U)
7774 + {
7775 + unsigned long addr;
7776 +
7777 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7778 + regs->u_regs[UREG_G1] <<= 32;
7779 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7780 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7781 + regs->tpc = addr;
7782 + regs->tnpc = addr+4;
7783 + return 2;
7784 + }
7785 + } while (0);
7786 +
7787 + do { /* PaX: unpatched PLT emulation step 1 */
7788 + unsigned int sethi, ba, nop;
7789 +
7790 + err = get_user(sethi, (unsigned int *)regs->tpc);
7791 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7792 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7793 +
7794 + if (err)
7795 + break;
7796 +
7797 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7798 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7799 + nop == 0x01000000U)
7800 + {
7801 + unsigned long addr;
7802 + unsigned int save, call;
7803 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7804 +
7805 + if ((ba & 0xFFC00000U) == 0x30800000U)
7806 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7807 + else
7808 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7809 +
7810 + if (test_thread_flag(TIF_32BIT))
7811 + addr &= 0xFFFFFFFFUL;
7812 +
7813 + err = get_user(save, (unsigned int *)addr);
7814 + err |= get_user(call, (unsigned int *)(addr+4));
7815 + err |= get_user(nop, (unsigned int *)(addr+8));
7816 + if (err)
7817 + break;
7818 +
7819 +#ifdef CONFIG_PAX_DLRESOLVE
7820 + if (save == 0x9DE3BFA8U &&
7821 + (call & 0xC0000000U) == 0x40000000U &&
7822 + nop == 0x01000000U)
7823 + {
7824 + struct vm_area_struct *vma;
7825 + unsigned long call_dl_resolve;
7826 +
7827 + down_read(&current->mm->mmap_sem);
7828 + call_dl_resolve = current->mm->call_dl_resolve;
7829 + up_read(&current->mm->mmap_sem);
7830 + if (likely(call_dl_resolve))
7831 + goto emulate;
7832 +
7833 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7834 +
7835 + down_write(&current->mm->mmap_sem);
7836 + if (current->mm->call_dl_resolve) {
7837 + call_dl_resolve = current->mm->call_dl_resolve;
7838 + up_write(&current->mm->mmap_sem);
7839 + if (vma)
7840 + kmem_cache_free(vm_area_cachep, vma);
7841 + goto emulate;
7842 + }
7843 +
7844 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7845 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7846 + up_write(&current->mm->mmap_sem);
7847 + if (vma)
7848 + kmem_cache_free(vm_area_cachep, vma);
7849 + return 1;
7850 + }
7851 +
7852 + if (pax_insert_vma(vma, call_dl_resolve)) {
7853 + up_write(&current->mm->mmap_sem);
7854 + kmem_cache_free(vm_area_cachep, vma);
7855 + return 1;
7856 + }
7857 +
7858 + current->mm->call_dl_resolve = call_dl_resolve;
7859 + up_write(&current->mm->mmap_sem);
7860 +
7861 +emulate:
7862 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7863 + regs->tpc = call_dl_resolve;
7864 + regs->tnpc = addr+4;
7865 + return 3;
7866 + }
7867 +#endif
7868 +
7869 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7870 + if ((save & 0xFFC00000U) == 0x05000000U &&
7871 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7872 + nop == 0x01000000U)
7873 + {
7874 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7875 + regs->u_regs[UREG_G2] = addr + 4;
7876 + addr = (save & 0x003FFFFFU) << 10;
7877 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7878 +
7879 + if (test_thread_flag(TIF_32BIT))
7880 + addr &= 0xFFFFFFFFUL;
7881 +
7882 + regs->tpc = addr;
7883 + regs->tnpc = addr+4;
7884 + return 3;
7885 + }
7886 +
7887 + /* PaX: 64-bit PLT stub */
7888 + err = get_user(sethi1, (unsigned int *)addr);
7889 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7890 + err |= get_user(or1, (unsigned int *)(addr+8));
7891 + err |= get_user(or2, (unsigned int *)(addr+12));
7892 + err |= get_user(sllx, (unsigned int *)(addr+16));
7893 + err |= get_user(add, (unsigned int *)(addr+20));
7894 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7895 + err |= get_user(nop, (unsigned int *)(addr+28));
7896 + if (err)
7897 + break;
7898 +
7899 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7900 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7901 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7902 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7903 + sllx == 0x89293020U &&
7904 + add == 0x8A010005U &&
7905 + jmpl == 0x89C14000U &&
7906 + nop == 0x01000000U)
7907 + {
7908 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7909 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7910 + regs->u_regs[UREG_G4] <<= 32;
7911 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7912 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7913 + regs->u_regs[UREG_G4] = addr + 24;
7914 + addr = regs->u_regs[UREG_G5];
7915 + regs->tpc = addr;
7916 + regs->tnpc = addr+4;
7917 + return 3;
7918 + }
7919 + }
7920 + } while (0);
7921 +
7922 +#ifdef CONFIG_PAX_DLRESOLVE
7923 + do { /* PaX: unpatched PLT emulation step 2 */
7924 + unsigned int save, call, nop;
7925 +
7926 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7927 + err |= get_user(call, (unsigned int *)regs->tpc);
7928 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7929 + if (err)
7930 + break;
7931 +
7932 + if (save == 0x9DE3BFA8U &&
7933 + (call & 0xC0000000U) == 0x40000000U &&
7934 + nop == 0x01000000U)
7935 + {
7936 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7937 +
7938 + if (test_thread_flag(TIF_32BIT))
7939 + dl_resolve &= 0xFFFFFFFFUL;
7940 +
7941 + regs->u_regs[UREG_RETPC] = regs->tpc;
7942 + regs->tpc = dl_resolve;
7943 + regs->tnpc = dl_resolve+4;
7944 + return 3;
7945 + }
7946 + } while (0);
7947 +#endif
7948 +
7949 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7950 + unsigned int sethi, ba, nop;
7951 +
7952 + err = get_user(sethi, (unsigned int *)regs->tpc);
7953 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7954 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7955 +
7956 + if (err)
7957 + break;
7958 +
7959 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7960 + (ba & 0xFFF00000U) == 0x30600000U &&
7961 + nop == 0x01000000U)
7962 + {
7963 + unsigned long addr;
7964 +
7965 + addr = (sethi & 0x003FFFFFU) << 10;
7966 + regs->u_regs[UREG_G1] = addr;
7967 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7968 +
7969 + if (test_thread_flag(TIF_32BIT))
7970 + addr &= 0xFFFFFFFFUL;
7971 +
7972 + regs->tpc = addr;
7973 + regs->tnpc = addr+4;
7974 + return 2;
7975 + }
7976 + } while (0);
7977 +
7978 +#endif
7979 +
7980 + return 1;
7981 +}
7982 +
7983 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7984 +{
7985 + unsigned long i;
7986 +
7987 + printk(KERN_ERR "PAX: bytes at PC: ");
7988 + for (i = 0; i < 8; i++) {
7989 + unsigned int c;
7990 + if (get_user(c, (unsigned int *)pc+i))
7991 + printk(KERN_CONT "???????? ");
7992 + else
7993 + printk(KERN_CONT "%08x ", c);
7994 + }
7995 + printk("\n");
7996 +}
7997 +#endif
7998 +
7999 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8000 {
8001 struct mm_struct *mm = current->mm;
8002 @@ -341,6 +804,29 @@ retry:
8003 if (!vma)
8004 goto bad_area;
8005
8006 +#ifdef CONFIG_PAX_PAGEEXEC
8007 + /* PaX: detect ITLB misses on non-exec pages */
8008 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
8009 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
8010 + {
8011 + if (address != regs->tpc)
8012 + goto good_area;
8013 +
8014 + up_read(&mm->mmap_sem);
8015 + switch (pax_handle_fetch_fault(regs)) {
8016 +
8017 +#ifdef CONFIG_PAX_EMUPLT
8018 + case 2:
8019 + case 3:
8020 + return;
8021 +#endif
8022 +
8023 + }
8024 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
8025 + do_group_exit(SIGKILL);
8026 + }
8027 +#endif
8028 +
8029 /* Pure DTLB misses do not tell us whether the fault causing
8030 * load/store/atomic was a write or not, it only says that there
8031 * was no match. So in such a case we (carefully) read the
8032 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
8033 index f76f83d..0f28457 100644
8034 --- a/arch/sparc/mm/hugetlbpage.c
8035 +++ b/arch/sparc/mm/hugetlbpage.c
8036 @@ -67,7 +67,7 @@ full_search:
8037 }
8038 return -ENOMEM;
8039 }
8040 - if (likely(!vma || addr + len <= vma->vm_start)) {
8041 + if (likely(check_heap_stack_gap(vma, addr, len))) {
8042 /*
8043 * Remember the place where we stopped the search:
8044 */
8045 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8046 /* make sure it can fit in the remaining address space */
8047 if (likely(addr > len)) {
8048 vma = find_vma(mm, addr-len);
8049 - if (!vma || addr <= vma->vm_start) {
8050 + if (check_heap_stack_gap(vma, addr - len, len)) {
8051 /* remember the address as a hint for next time */
8052 return (mm->free_area_cache = addr-len);
8053 }
8054 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8055 if (unlikely(mm->mmap_base < len))
8056 goto bottomup;
8057
8058 - addr = (mm->mmap_base-len) & HPAGE_MASK;
8059 + addr = mm->mmap_base - len;
8060
8061 do {
8062 + addr &= HPAGE_MASK;
8063 /*
8064 * Lookup failure means no vma is above this address,
8065 * else if new region fits below vma->vm_start,
8066 * return with success:
8067 */
8068 vma = find_vma(mm, addr);
8069 - if (likely(!vma || addr+len <= vma->vm_start)) {
8070 + if (likely(check_heap_stack_gap(vma, addr, len))) {
8071 /* remember the address as a hint for next time */
8072 return (mm->free_area_cache = addr);
8073 }
8074 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8075 mm->cached_hole_size = vma->vm_start - addr;
8076
8077 /* try just below the current vma->vm_start */
8078 - addr = (vma->vm_start-len) & HPAGE_MASK;
8079 - } while (likely(len < vma->vm_start));
8080 + addr = skip_heap_stack_gap(vma, len);
8081 + } while (!IS_ERR_VALUE(addr));
8082
8083 bottomup:
8084 /*
8085 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8086 if (addr) {
8087 addr = ALIGN(addr, HPAGE_SIZE);
8088 vma = find_vma(mm, addr);
8089 - if (task_size - len >= addr &&
8090 - (!vma || addr + len <= vma->vm_start))
8091 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
8092 return addr;
8093 }
8094 if (mm->get_unmapped_area == arch_get_unmapped_area)
8095 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
8096 index f4500c6..889656c 100644
8097 --- a/arch/tile/include/asm/atomic_64.h
8098 +++ b/arch/tile/include/asm/atomic_64.h
8099 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8100
8101 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8102
8103 +#define atomic64_read_unchecked(v) atomic64_read(v)
8104 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8105 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8106 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8107 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8108 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
8109 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8110 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
8111 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8112 +
8113 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
8114 #define smp_mb__before_atomic_dec() smp_mb()
8115 #define smp_mb__after_atomic_dec() smp_mb()
8116 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
8117 index a9a5299..0fce79e 100644
8118 --- a/arch/tile/include/asm/cache.h
8119 +++ b/arch/tile/include/asm/cache.h
8120 @@ -15,11 +15,12 @@
8121 #ifndef _ASM_TILE_CACHE_H
8122 #define _ASM_TILE_CACHE_H
8123
8124 +#include <linux/const.h>
8125 #include <arch/chip.h>
8126
8127 /* bytes per L1 data cache line */
8128 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
8129 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8130 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8131
8132 /* bytes per L2 cache line */
8133 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
8134 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
8135 index 9ab078a..d6635c2 100644
8136 --- a/arch/tile/include/asm/uaccess.h
8137 +++ b/arch/tile/include/asm/uaccess.h
8138 @@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
8139 const void __user *from,
8140 unsigned long n)
8141 {
8142 - int sz = __compiletime_object_size(to);
8143 + size_t sz = __compiletime_object_size(to);
8144
8145 - if (likely(sz == -1 || sz >= n))
8146 + if (likely(sz == (size_t)-1 || sz >= n))
8147 n = _copy_from_user(to, from, n);
8148 else
8149 copy_from_user_overflow();
8150 diff --git a/arch/um/Makefile b/arch/um/Makefile
8151 index 133f7de..1d6f2f1 100644
8152 --- a/arch/um/Makefile
8153 +++ b/arch/um/Makefile
8154 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
8155 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
8156 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
8157
8158 +ifdef CONSTIFY_PLUGIN
8159 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8160 +endif
8161 +
8162 #This will adjust *FLAGS accordingly to the platform.
8163 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
8164
8165 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
8166 index 19e1bdd..3665b77 100644
8167 --- a/arch/um/include/asm/cache.h
8168 +++ b/arch/um/include/asm/cache.h
8169 @@ -1,6 +1,7 @@
8170 #ifndef __UM_CACHE_H
8171 #define __UM_CACHE_H
8172
8173 +#include <linux/const.h>
8174
8175 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
8176 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8177 @@ -12,6 +13,6 @@
8178 # define L1_CACHE_SHIFT 5
8179 #endif
8180
8181 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8182 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8183
8184 #endif
8185 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
8186 index 2e0a6b1..a64d0f5 100644
8187 --- a/arch/um/include/asm/kmap_types.h
8188 +++ b/arch/um/include/asm/kmap_types.h
8189 @@ -8,6 +8,6 @@
8190
8191 /* No more #include "asm/arch/kmap_types.h" ! */
8192
8193 -#define KM_TYPE_NR 14
8194 +#define KM_TYPE_NR 15
8195
8196 #endif
8197 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
8198 index 5ff53d9..5850cdf 100644
8199 --- a/arch/um/include/asm/page.h
8200 +++ b/arch/um/include/asm/page.h
8201 @@ -14,6 +14,9 @@
8202 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
8203 #define PAGE_MASK (~(PAGE_SIZE-1))
8204
8205 +#define ktla_ktva(addr) (addr)
8206 +#define ktva_ktla(addr) (addr)
8207 +
8208 #ifndef __ASSEMBLY__
8209
8210 struct page;
8211 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
8212 index 0032f92..cd151e0 100644
8213 --- a/arch/um/include/asm/pgtable-3level.h
8214 +++ b/arch/um/include/asm/pgtable-3level.h
8215 @@ -58,6 +58,7 @@
8216 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
8217 #define pud_populate(mm, pud, pmd) \
8218 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
8219 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8220
8221 #ifdef CONFIG_64BIT
8222 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
8223 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
8224 index b6d699c..df7ac1d 100644
8225 --- a/arch/um/kernel/process.c
8226 +++ b/arch/um/kernel/process.c
8227 @@ -387,22 +387,6 @@ int singlestepping(void * t)
8228 return 2;
8229 }
8230
8231 -/*
8232 - * Only x86 and x86_64 have an arch_align_stack().
8233 - * All other arches have "#define arch_align_stack(x) (x)"
8234 - * in their asm/system.h
8235 - * As this is included in UML from asm-um/system-generic.h,
8236 - * we can use it to behave as the subarch does.
8237 - */
8238 -#ifndef arch_align_stack
8239 -unsigned long arch_align_stack(unsigned long sp)
8240 -{
8241 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8242 - sp -= get_random_int() % 8192;
8243 - return sp & ~0xf;
8244 -}
8245 -#endif
8246 -
8247 unsigned long get_wchan(struct task_struct *p)
8248 {
8249 unsigned long stack_page, sp, ip;
8250 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
8251 index ad8f795..2c7eec6 100644
8252 --- a/arch/unicore32/include/asm/cache.h
8253 +++ b/arch/unicore32/include/asm/cache.h
8254 @@ -12,8 +12,10 @@
8255 #ifndef __UNICORE_CACHE_H__
8256 #define __UNICORE_CACHE_H__
8257
8258 -#define L1_CACHE_SHIFT (5)
8259 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8260 +#include <linux/const.h>
8261 +
8262 +#define L1_CACHE_SHIFT 5
8263 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8264
8265 /*
8266 * Memory returned by kmalloc() may be used for DMA, so we must make
8267 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
8268 index 46c3bff..c2286e7 100644
8269 --- a/arch/x86/Kconfig
8270 +++ b/arch/x86/Kconfig
8271 @@ -241,7 +241,7 @@ config X86_HT
8272
8273 config X86_32_LAZY_GS
8274 def_bool y
8275 - depends on X86_32 && !CC_STACKPROTECTOR
8276 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
8277
8278 config ARCH_HWEIGHT_CFLAGS
8279 string
8280 @@ -1056,7 +1056,7 @@ choice
8281
8282 config NOHIGHMEM
8283 bool "off"
8284 - depends on !X86_NUMAQ
8285 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8286 ---help---
8287 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
8288 However, the address space of 32-bit x86 processors is only 4
8289 @@ -1093,7 +1093,7 @@ config NOHIGHMEM
8290
8291 config HIGHMEM4G
8292 bool "4GB"
8293 - depends on !X86_NUMAQ
8294 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8295 ---help---
8296 Select this if you have a 32-bit processor and between 1 and 4
8297 gigabytes of physical RAM.
8298 @@ -1147,7 +1147,7 @@ config PAGE_OFFSET
8299 hex
8300 default 0xB0000000 if VMSPLIT_3G_OPT
8301 default 0x80000000 if VMSPLIT_2G
8302 - default 0x78000000 if VMSPLIT_2G_OPT
8303 + default 0x70000000 if VMSPLIT_2G_OPT
8304 default 0x40000000 if VMSPLIT_1G
8305 default 0xC0000000
8306 depends on X86_32
8307 @@ -1548,6 +1548,7 @@ config SECCOMP
8308
8309 config CC_STACKPROTECTOR
8310 bool "Enable -fstack-protector buffer overflow detection"
8311 + depends on X86_64 || !PAX_MEMORY_UDEREF
8312 ---help---
8313 This option turns on the -fstack-protector GCC feature. This
8314 feature puts, at the beginning of functions, a canary value on
8315 @@ -1605,6 +1606,7 @@ config KEXEC_JUMP
8316 config PHYSICAL_START
8317 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
8318 default "0x1000000"
8319 + range 0x400000 0x40000000
8320 ---help---
8321 This gives the physical address where the kernel is loaded.
8322
8323 @@ -1668,6 +1670,7 @@ config X86_NEED_RELOCS
8324 config PHYSICAL_ALIGN
8325 hex "Alignment value to which kernel should be aligned" if X86_32
8326 default "0x1000000"
8327 + range 0x400000 0x1000000 if PAX_KERNEXEC
8328 range 0x2000 0x1000000
8329 ---help---
8330 This value puts the alignment restrictions on physical address
8331 @@ -1699,9 +1702,10 @@ config HOTPLUG_CPU
8332 Say N if you want to disable CPU hotplug.
8333
8334 config COMPAT_VDSO
8335 - def_bool y
8336 + def_bool n
8337 prompt "Compat VDSO support"
8338 depends on X86_32 || IA32_EMULATION
8339 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8340 ---help---
8341 Map the 32-bit VDSO to the predictable old-style address too.
8342
8343 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8344 index f3b86d0..17fd30f 100644
8345 --- a/arch/x86/Kconfig.cpu
8346 +++ b/arch/x86/Kconfig.cpu
8347 @@ -335,7 +335,7 @@ config X86_PPRO_FENCE
8348
8349 config X86_F00F_BUG
8350 def_bool y
8351 - depends on M586MMX || M586TSC || M586 || M486 || M386
8352 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8353
8354 config X86_INVD_BUG
8355 def_bool y
8356 @@ -359,7 +359,7 @@ config X86_POPAD_OK
8357
8358 config X86_ALIGNMENT_16
8359 def_bool y
8360 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8361 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8362
8363 config X86_INTEL_USERCOPY
8364 def_bool y
8365 @@ -405,7 +405,7 @@ config X86_CMPXCHG64
8366 # generates cmov.
8367 config X86_CMOV
8368 def_bool y
8369 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8370 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8371
8372 config X86_MINIMUM_CPU_FAMILY
8373 int
8374 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8375 index b322f12..652d0d9 100644
8376 --- a/arch/x86/Kconfig.debug
8377 +++ b/arch/x86/Kconfig.debug
8378 @@ -84,7 +84,7 @@ config X86_PTDUMP
8379 config DEBUG_RODATA
8380 bool "Write protect kernel read-only data structures"
8381 default y
8382 - depends on DEBUG_KERNEL
8383 + depends on DEBUG_KERNEL && BROKEN
8384 ---help---
8385 Mark the kernel read-only data as write-protected in the pagetables,
8386 in order to catch accidental (and incorrect) writes to such const
8387 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
8388
8389 config DEBUG_SET_MODULE_RONX
8390 bool "Set loadable kernel module data as NX and text as RO"
8391 - depends on MODULES
8392 + depends on MODULES && BROKEN
8393 ---help---
8394 This option helps catch unintended modifications to loadable
8395 kernel module's text and read-only data. It also prevents execution
8396 @@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
8397
8398 config DEBUG_STRICT_USER_COPY_CHECKS
8399 bool "Strict copy size checks"
8400 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
8401 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
8402 ---help---
8403 Enabling this option turns a certain set of sanity checks for user
8404 copy operations into compile time failures.
8405 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8406 index 05afcca..b6ecb51 100644
8407 --- a/arch/x86/Makefile
8408 +++ b/arch/x86/Makefile
8409 @@ -50,6 +50,7 @@ else
8410 UTS_MACHINE := x86_64
8411 CHECKFLAGS += -D__x86_64__ -m64
8412
8413 + biarch := $(call cc-option,-m64)
8414 KBUILD_AFLAGS += -m64
8415 KBUILD_CFLAGS += -m64
8416
8417 @@ -229,3 +230,12 @@ define archhelp
8418 echo ' FDARGS="..." arguments for the booted kernel'
8419 echo ' FDINITRD=file initrd for the booted kernel'
8420 endef
8421 +
8422 +define OLD_LD
8423 +
8424 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8425 +*** Please upgrade your binutils to 2.18 or newer
8426 +endef
8427 +
8428 +archprepare:
8429 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8430 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8431 index ccce0ed..fd9da25 100644
8432 --- a/arch/x86/boot/Makefile
8433 +++ b/arch/x86/boot/Makefile
8434 @@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8435 $(call cc-option, -fno-stack-protector) \
8436 $(call cc-option, -mpreferred-stack-boundary=2)
8437 KBUILD_CFLAGS += $(call cc-option, -m32)
8438 +ifdef CONSTIFY_PLUGIN
8439 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8440 +endif
8441 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8442 GCOV_PROFILE := n
8443
8444 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8445 index 878e4b9..20537ab 100644
8446 --- a/arch/x86/boot/bitops.h
8447 +++ b/arch/x86/boot/bitops.h
8448 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8449 u8 v;
8450 const u32 *p = (const u32 *)addr;
8451
8452 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8453 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8454 return v;
8455 }
8456
8457 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8458
8459 static inline void set_bit(int nr, void *addr)
8460 {
8461 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8462 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8463 }
8464
8465 #endif /* BOOT_BITOPS_H */
8466 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8467 index 18997e5..83d9c67 100644
8468 --- a/arch/x86/boot/boot.h
8469 +++ b/arch/x86/boot/boot.h
8470 @@ -85,7 +85,7 @@ static inline void io_delay(void)
8471 static inline u16 ds(void)
8472 {
8473 u16 seg;
8474 - asm("movw %%ds,%0" : "=rm" (seg));
8475 + asm volatile("movw %%ds,%0" : "=rm" (seg));
8476 return seg;
8477 }
8478
8479 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8480 static inline int memcmp(const void *s1, const void *s2, size_t len)
8481 {
8482 u8 diff;
8483 - asm("repe; cmpsb; setnz %0"
8484 + asm volatile("repe; cmpsb; setnz %0"
8485 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8486 return diff;
8487 }
8488 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8489 index 8a84501..b2d165f 100644
8490 --- a/arch/x86/boot/compressed/Makefile
8491 +++ b/arch/x86/boot/compressed/Makefile
8492 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8493 KBUILD_CFLAGS += $(cflags-y)
8494 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8495 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8496 +ifdef CONSTIFY_PLUGIN
8497 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8498 +endif
8499
8500 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8501 GCOV_PROFILE := n
8502 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
8503 index e87b0ca..2bce457 100644
8504 --- a/arch/x86/boot/compressed/eboot.c
8505 +++ b/arch/x86/boot/compressed/eboot.c
8506 @@ -144,7 +144,6 @@ again:
8507 *addr = max_addr;
8508 }
8509
8510 -free_pool:
8511 efi_call_phys1(sys_table->boottime->free_pool, map);
8512
8513 fail:
8514 @@ -208,7 +207,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
8515 if (i == map_size / desc_size)
8516 status = EFI_NOT_FOUND;
8517
8518 -free_pool:
8519 efi_call_phys1(sys_table->boottime->free_pool, map);
8520 fail:
8521 return status;
8522 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8523 index aa4aaf1..6656f2f 100644
8524 --- a/arch/x86/boot/compressed/head_32.S
8525 +++ b/arch/x86/boot/compressed/head_32.S
8526 @@ -116,7 +116,7 @@ preferred_addr:
8527 notl %eax
8528 andl %eax, %ebx
8529 #else
8530 - movl $LOAD_PHYSICAL_ADDR, %ebx
8531 + movl $____LOAD_PHYSICAL_ADDR, %ebx
8532 #endif
8533
8534 /* Target address to relocate to for decompression */
8535 @@ -202,7 +202,7 @@ relocated:
8536 * and where it was actually loaded.
8537 */
8538 movl %ebp, %ebx
8539 - subl $LOAD_PHYSICAL_ADDR, %ebx
8540 + subl $____LOAD_PHYSICAL_ADDR, %ebx
8541 jz 2f /* Nothing to be done if loaded at compiled addr. */
8542 /*
8543 * Process relocations.
8544 @@ -210,8 +210,7 @@ relocated:
8545
8546 1: subl $4, %edi
8547 movl (%edi), %ecx
8548 - testl %ecx, %ecx
8549 - jz 2f
8550 + jecxz 2f
8551 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8552 jmp 1b
8553 2:
8554 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8555 index 2c4b171..e1fa5b1 100644
8556 --- a/arch/x86/boot/compressed/head_64.S
8557 +++ b/arch/x86/boot/compressed/head_64.S
8558 @@ -91,7 +91,7 @@ ENTRY(startup_32)
8559 notl %eax
8560 andl %eax, %ebx
8561 #else
8562 - movl $LOAD_PHYSICAL_ADDR, %ebx
8563 + movl $____LOAD_PHYSICAL_ADDR, %ebx
8564 #endif
8565
8566 /* Target address to relocate to for decompression */
8567 @@ -273,7 +273,7 @@ preferred_addr:
8568 notq %rax
8569 andq %rax, %rbp
8570 #else
8571 - movq $LOAD_PHYSICAL_ADDR, %rbp
8572 + movq $____LOAD_PHYSICAL_ADDR, %rbp
8573 #endif
8574
8575 /* Target address to relocate to for decompression */
8576 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8577 index 88f7ff6..ed695dd 100644
8578 --- a/arch/x86/boot/compressed/misc.c
8579 +++ b/arch/x86/boot/compressed/misc.c
8580 @@ -303,7 +303,7 @@ static void parse_elf(void *output)
8581 case PT_LOAD:
8582 #ifdef CONFIG_RELOCATABLE
8583 dest = output;
8584 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8585 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8586 #else
8587 dest = (void *)(phdr->p_paddr);
8588 #endif
8589 @@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8590 error("Destination address too large");
8591 #endif
8592 #ifndef CONFIG_RELOCATABLE
8593 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8594 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8595 error("Wrong destination address");
8596 #endif
8597
8598 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8599 index 4d3ff03..e4972ff 100644
8600 --- a/arch/x86/boot/cpucheck.c
8601 +++ b/arch/x86/boot/cpucheck.c
8602 @@ -74,7 +74,7 @@ static int has_fpu(void)
8603 u16 fcw = -1, fsw = -1;
8604 u32 cr0;
8605
8606 - asm("movl %%cr0,%0" : "=r" (cr0));
8607 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
8608 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8609 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8610 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8611 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8612 {
8613 u32 f0, f1;
8614
8615 - asm("pushfl ; "
8616 + asm volatile("pushfl ; "
8617 "pushfl ; "
8618 "popl %0 ; "
8619 "movl %0,%1 ; "
8620 @@ -115,7 +115,7 @@ static void get_flags(void)
8621 set_bit(X86_FEATURE_FPU, cpu.flags);
8622
8623 if (has_eflag(X86_EFLAGS_ID)) {
8624 - asm("cpuid"
8625 + asm volatile("cpuid"
8626 : "=a" (max_intel_level),
8627 "=b" (cpu_vendor[0]),
8628 "=d" (cpu_vendor[1]),
8629 @@ -124,7 +124,7 @@ static void get_flags(void)
8630
8631 if (max_intel_level >= 0x00000001 &&
8632 max_intel_level <= 0x0000ffff) {
8633 - asm("cpuid"
8634 + asm volatile("cpuid"
8635 : "=a" (tfms),
8636 "=c" (cpu.flags[4]),
8637 "=d" (cpu.flags[0])
8638 @@ -136,7 +136,7 @@ static void get_flags(void)
8639 cpu.model += ((tfms >> 16) & 0xf) << 4;
8640 }
8641
8642 - asm("cpuid"
8643 + asm volatile("cpuid"
8644 : "=a" (max_amd_level)
8645 : "a" (0x80000000)
8646 : "ebx", "ecx", "edx");
8647 @@ -144,7 +144,7 @@ static void get_flags(void)
8648 if (max_amd_level >= 0x80000001 &&
8649 max_amd_level <= 0x8000ffff) {
8650 u32 eax = 0x80000001;
8651 - asm("cpuid"
8652 + asm volatile("cpuid"
8653 : "+a" (eax),
8654 "=c" (cpu.flags[6]),
8655 "=d" (cpu.flags[1])
8656 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8657 u32 ecx = MSR_K7_HWCR;
8658 u32 eax, edx;
8659
8660 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8661 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8662 eax &= ~(1 << 15);
8663 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8664 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8665
8666 get_flags(); /* Make sure it really did something */
8667 err = check_flags();
8668 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8669 u32 ecx = MSR_VIA_FCR;
8670 u32 eax, edx;
8671
8672 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8673 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8674 eax |= (1<<1)|(1<<7);
8675 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8676 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8677
8678 set_bit(X86_FEATURE_CX8, cpu.flags);
8679 err = check_flags();
8680 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8681 u32 eax, edx;
8682 u32 level = 1;
8683
8684 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8685 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8686 - asm("cpuid"
8687 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8688 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8689 + asm volatile("cpuid"
8690 : "+a" (level), "=d" (cpu.flags[0])
8691 : : "ecx", "ebx");
8692 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8693 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8694
8695 err = check_flags();
8696 }
8697 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8698 index 8c132a6..13e5c96 100644
8699 --- a/arch/x86/boot/header.S
8700 +++ b/arch/x86/boot/header.S
8701 @@ -387,10 +387,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
8702 # single linked list of
8703 # struct setup_data
8704
8705 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8706 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8707
8708 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8709 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8710 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
8711 +#else
8712 #define VO_INIT_SIZE (VO__end - VO__text)
8713 +#endif
8714 #if ZO_INIT_SIZE > VO_INIT_SIZE
8715 #define INIT_SIZE ZO_INIT_SIZE
8716 #else
8717 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8718 index db75d07..8e6d0af 100644
8719 --- a/arch/x86/boot/memory.c
8720 +++ b/arch/x86/boot/memory.c
8721 @@ -19,7 +19,7 @@
8722
8723 static int detect_memory_e820(void)
8724 {
8725 - int count = 0;
8726 + unsigned int count = 0;
8727 struct biosregs ireg, oreg;
8728 struct e820entry *desc = boot_params.e820_map;
8729 static struct e820entry buf; /* static so it is zeroed */
8730 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8731 index 11e8c6e..fdbb1ed 100644
8732 --- a/arch/x86/boot/video-vesa.c
8733 +++ b/arch/x86/boot/video-vesa.c
8734 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8735
8736 boot_params.screen_info.vesapm_seg = oreg.es;
8737 boot_params.screen_info.vesapm_off = oreg.di;
8738 + boot_params.screen_info.vesapm_size = oreg.cx;
8739 }
8740
8741 /*
8742 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8743 index 43eda28..5ab5fdb 100644
8744 --- a/arch/x86/boot/video.c
8745 +++ b/arch/x86/boot/video.c
8746 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8747 static unsigned int get_entry(void)
8748 {
8749 char entry_buf[4];
8750 - int i, len = 0;
8751 + unsigned int i, len = 0;
8752 int key;
8753 unsigned int v;
8754
8755 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8756 index 5b577d5..3c1fed4 100644
8757 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8758 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8759 @@ -8,6 +8,8 @@
8760 * including this sentence is retained in full.
8761 */
8762
8763 +#include <asm/alternative-asm.h>
8764 +
8765 .extern crypto_ft_tab
8766 .extern crypto_it_tab
8767 .extern crypto_fl_tab
8768 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8769 je B192; \
8770 leaq 32(r9),r9;
8771
8772 +#define ret pax_force_retaddr 0, 1; ret
8773 +
8774 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8775 movq r1,r2; \
8776 movq r3,r4; \
8777 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8778 index 3470624..201259d 100644
8779 --- a/arch/x86/crypto/aesni-intel_asm.S
8780 +++ b/arch/x86/crypto/aesni-intel_asm.S
8781 @@ -31,6 +31,7 @@
8782
8783 #include <linux/linkage.h>
8784 #include <asm/inst.h>
8785 +#include <asm/alternative-asm.h>
8786
8787 #ifdef __x86_64__
8788 .data
8789 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8790 pop %r14
8791 pop %r13
8792 pop %r12
8793 + pax_force_retaddr 0, 1
8794 ret
8795 +ENDPROC(aesni_gcm_dec)
8796
8797
8798 /*****************************************************************************
8799 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8800 pop %r14
8801 pop %r13
8802 pop %r12
8803 + pax_force_retaddr 0, 1
8804 ret
8805 +ENDPROC(aesni_gcm_enc)
8806
8807 #endif
8808
8809 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8810 pxor %xmm1, %xmm0
8811 movaps %xmm0, (TKEYP)
8812 add $0x10, TKEYP
8813 + pax_force_retaddr_bts
8814 ret
8815
8816 .align 4
8817 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8818 shufps $0b01001110, %xmm2, %xmm1
8819 movaps %xmm1, 0x10(TKEYP)
8820 add $0x20, TKEYP
8821 + pax_force_retaddr_bts
8822 ret
8823
8824 .align 4
8825 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8826
8827 movaps %xmm0, (TKEYP)
8828 add $0x10, TKEYP
8829 + pax_force_retaddr_bts
8830 ret
8831
8832 .align 4
8833 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8834 pxor %xmm1, %xmm2
8835 movaps %xmm2, (TKEYP)
8836 add $0x10, TKEYP
8837 + pax_force_retaddr_bts
8838 ret
8839
8840 /*
8841 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8842 #ifndef __x86_64__
8843 popl KEYP
8844 #endif
8845 + pax_force_retaddr 0, 1
8846 ret
8847 +ENDPROC(aesni_set_key)
8848
8849 /*
8850 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8851 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8852 popl KLEN
8853 popl KEYP
8854 #endif
8855 + pax_force_retaddr 0, 1
8856 ret
8857 +ENDPROC(aesni_enc)
8858
8859 /*
8860 * _aesni_enc1: internal ABI
8861 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8862 AESENC KEY STATE
8863 movaps 0x70(TKEYP), KEY
8864 AESENCLAST KEY STATE
8865 + pax_force_retaddr_bts
8866 ret
8867
8868 /*
8869 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8870 AESENCLAST KEY STATE2
8871 AESENCLAST KEY STATE3
8872 AESENCLAST KEY STATE4
8873 + pax_force_retaddr_bts
8874 ret
8875
8876 /*
8877 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8878 popl KLEN
8879 popl KEYP
8880 #endif
8881 + pax_force_retaddr 0, 1
8882 ret
8883 +ENDPROC(aesni_dec)
8884
8885 /*
8886 * _aesni_dec1: internal ABI
8887 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8888 AESDEC KEY STATE
8889 movaps 0x70(TKEYP), KEY
8890 AESDECLAST KEY STATE
8891 + pax_force_retaddr_bts
8892 ret
8893
8894 /*
8895 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8896 AESDECLAST KEY STATE2
8897 AESDECLAST KEY STATE3
8898 AESDECLAST KEY STATE4
8899 + pax_force_retaddr_bts
8900 ret
8901
8902 /*
8903 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8904 popl KEYP
8905 popl LEN
8906 #endif
8907 + pax_force_retaddr 0, 1
8908 ret
8909 +ENDPROC(aesni_ecb_enc)
8910
8911 /*
8912 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8913 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8914 popl KEYP
8915 popl LEN
8916 #endif
8917 + pax_force_retaddr 0, 1
8918 ret
8919 +ENDPROC(aesni_ecb_dec)
8920
8921 /*
8922 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8923 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8924 popl LEN
8925 popl IVP
8926 #endif
8927 + pax_force_retaddr 0, 1
8928 ret
8929 +ENDPROC(aesni_cbc_enc)
8930
8931 /*
8932 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8933 @@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8934 popl LEN
8935 popl IVP
8936 #endif
8937 + pax_force_retaddr 0, 1
8938 ret
8939 +ENDPROC(aesni_cbc_dec)
8940
8941 #ifdef __x86_64__
8942 .align 16
8943 @@ -2526,6 +2553,7 @@ _aesni_inc_init:
8944 mov $1, TCTR_LOW
8945 MOVQ_R64_XMM TCTR_LOW INC
8946 MOVQ_R64_XMM CTR TCTR_LOW
8947 + pax_force_retaddr_bts
8948 ret
8949
8950 /*
8951 @@ -2554,6 +2582,7 @@ _aesni_inc:
8952 .Linc_low:
8953 movaps CTR, IV
8954 PSHUFB_XMM BSWAP_MASK IV
8955 + pax_force_retaddr_bts
8956 ret
8957
8958 /*
8959 @@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8960 .Lctr_enc_ret:
8961 movups IV, (IVP)
8962 .Lctr_enc_just_ret:
8963 + pax_force_retaddr 0, 1
8964 ret
8965 +ENDPROC(aesni_ctr_enc)
8966 #endif
8967 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8968 index 391d245..67f35c2 100644
8969 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8970 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8971 @@ -20,6 +20,8 @@
8972 *
8973 */
8974
8975 +#include <asm/alternative-asm.h>
8976 +
8977 .file "blowfish-x86_64-asm.S"
8978 .text
8979
8980 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8981 jnz __enc_xor;
8982
8983 write_block();
8984 + pax_force_retaddr 0, 1
8985 ret;
8986 __enc_xor:
8987 xor_block();
8988 + pax_force_retaddr 0, 1
8989 ret;
8990
8991 .align 8
8992 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8993
8994 movq %r11, %rbp;
8995
8996 + pax_force_retaddr 0, 1
8997 ret;
8998
8999 /**********************************************************************
9000 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
9001
9002 popq %rbx;
9003 popq %rbp;
9004 + pax_force_retaddr 0, 1
9005 ret;
9006
9007 __enc_xor4:
9008 @@ -349,6 +355,7 @@ __enc_xor4:
9009
9010 popq %rbx;
9011 popq %rbp;
9012 + pax_force_retaddr 0, 1
9013 ret;
9014
9015 .align 8
9016 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
9017 popq %rbx;
9018 popq %rbp;
9019
9020 + pax_force_retaddr 0, 1
9021 ret;
9022
9023 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
9024 index 0b33743..7a56206 100644
9025 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
9026 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
9027 @@ -20,6 +20,8 @@
9028 *
9029 */
9030
9031 +#include <asm/alternative-asm.h>
9032 +
9033 .file "camellia-x86_64-asm_64.S"
9034 .text
9035
9036 @@ -229,12 +231,14 @@ __enc_done:
9037 enc_outunpack(mov, RT1);
9038
9039 movq RRBP, %rbp;
9040 + pax_force_retaddr 0, 1
9041 ret;
9042
9043 __enc_xor:
9044 enc_outunpack(xor, RT1);
9045
9046 movq RRBP, %rbp;
9047 + pax_force_retaddr 0, 1
9048 ret;
9049
9050 .global camellia_dec_blk;
9051 @@ -275,6 +279,7 @@ __dec_rounds16:
9052 dec_outunpack();
9053
9054 movq RRBP, %rbp;
9055 + pax_force_retaddr 0, 1
9056 ret;
9057
9058 /**********************************************************************
9059 @@ -468,6 +473,7 @@ __enc2_done:
9060
9061 movq RRBP, %rbp;
9062 popq %rbx;
9063 + pax_force_retaddr 0, 1
9064 ret;
9065
9066 __enc2_xor:
9067 @@ -475,6 +481,7 @@ __enc2_xor:
9068
9069 movq RRBP, %rbp;
9070 popq %rbx;
9071 + pax_force_retaddr 0, 1
9072 ret;
9073
9074 .global camellia_dec_blk_2way;
9075 @@ -517,4 +524,5 @@ __dec2_rounds16:
9076
9077 movq RRBP, %rbp;
9078 movq RXOR, %rbx;
9079 + pax_force_retaddr 0, 1
9080 ret;
9081 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9082 index a41a3aa..bdf5753 100644
9083 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9084 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9085 @@ -23,6 +23,8 @@
9086 *
9087 */
9088
9089 +#include <asm/alternative-asm.h>
9090 +
9091 .file "cast5-avx-x86_64-asm_64.S"
9092
9093 .extern cast5_s1
9094 @@ -293,6 +295,7 @@ __skip_enc:
9095 leaq 3*(2*4*4)(%r11), %rax;
9096 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9097
9098 + pax_force_retaddr 0, 1
9099 ret;
9100
9101 __enc_xor16:
9102 @@ -303,6 +306,7 @@ __enc_xor16:
9103 leaq 3*(2*4*4)(%r11), %rax;
9104 outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9105
9106 + pax_force_retaddr 0, 1
9107 ret;
9108
9109 .align 16
9110 @@ -369,6 +373,7 @@ __dec_tail:
9111 leaq 3*(2*4*4)(%r11), %rax;
9112 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9113
9114 + pax_force_retaddr 0, 1
9115 ret;
9116
9117 __skip_dec:
9118 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9119 index 218d283..819e6da 100644
9120 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9121 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9122 @@ -23,6 +23,8 @@
9123 *
9124 */
9125
9126 +#include <asm/alternative-asm.h>
9127 +
9128 .file "cast6-avx-x86_64-asm_64.S"
9129
9130 .extern cast6_s1
9131 @@ -324,12 +326,14 @@ __cast6_enc_blk_8way:
9132 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9133 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9134
9135 + pax_force_retaddr 0, 1
9136 ret;
9137
9138 __enc_xor8:
9139 outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9140 outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9141
9142 + pax_force_retaddr 0, 1
9143 ret;
9144
9145 .align 16
9146 @@ -380,4 +384,5 @@ cast6_dec_blk_8way:
9147 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9148 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9149
9150 + pax_force_retaddr 0, 1
9151 ret;
9152 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9153 index 6214a9b..1f4fc9a 100644
9154 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
9155 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9156 @@ -1,3 +1,5 @@
9157 +#include <asm/alternative-asm.h>
9158 +
9159 # enter ECRYPT_encrypt_bytes
9160 .text
9161 .p2align 5
9162 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
9163 add %r11,%rsp
9164 mov %rdi,%rax
9165 mov %rsi,%rdx
9166 + pax_force_retaddr 0, 1
9167 ret
9168 # bytesatleast65:
9169 ._bytesatleast65:
9170 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
9171 add %r11,%rsp
9172 mov %rdi,%rax
9173 mov %rsi,%rdx
9174 + pax_force_retaddr
9175 ret
9176 # enter ECRYPT_ivsetup
9177 .text
9178 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
9179 add %r11,%rsp
9180 mov %rdi,%rax
9181 mov %rsi,%rdx
9182 + pax_force_retaddr
9183 ret
9184 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9185 index 504106b..4e50951 100644
9186 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9187 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9188 @@ -24,6 +24,8 @@
9189 *
9190 */
9191
9192 +#include <asm/alternative-asm.h>
9193 +
9194 .file "serpent-avx-x86_64-asm_64.S"
9195 .text
9196
9197 @@ -638,12 +640,14 @@ __serpent_enc_blk_8way_avx:
9198 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9199 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9200
9201 + pax_force_retaddr
9202 ret;
9203
9204 __enc_xor8:
9205 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9206 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9207
9208 + pax_force_retaddr
9209 ret;
9210
9211 .align 8
9212 @@ -701,4 +705,5 @@ serpent_dec_blk_8way_avx:
9213 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9214 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9215
9216 + pax_force_retaddr
9217 ret;
9218 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9219 index 3ee1ff0..cbc568b 100644
9220 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9221 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9222 @@ -24,6 +24,8 @@
9223 *
9224 */
9225
9226 +#include <asm/alternative-asm.h>
9227 +
9228 .file "serpent-sse2-x86_64-asm_64.S"
9229 .text
9230
9231 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
9232 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9233 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9234
9235 + pax_force_retaddr
9236 ret;
9237
9238 __enc_xor8:
9239 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9240 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9241
9242 + pax_force_retaddr
9243 ret;
9244
9245 .align 8
9246 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
9247 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9248 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9249
9250 + pax_force_retaddr
9251 ret;
9252 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
9253 index 49d6987..df66bd4 100644
9254 --- a/arch/x86/crypto/sha1_ssse3_asm.S
9255 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
9256 @@ -28,6 +28,8 @@
9257 * (at your option) any later version.
9258 */
9259
9260 +#include <asm/alternative-asm.h>
9261 +
9262 #define CTX %rdi // arg1
9263 #define BUF %rsi // arg2
9264 #define CNT %rdx // arg3
9265 @@ -104,6 +106,7 @@
9266 pop %r12
9267 pop %rbp
9268 pop %rbx
9269 + pax_force_retaddr 0, 1
9270 ret
9271
9272 .size \name, .-\name
9273 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9274 index 1585abb..4a9af16 100644
9275 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9276 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9277 @@ -23,6 +23,8 @@
9278 *
9279 */
9280
9281 +#include <asm/alternative-asm.h>
9282 +
9283 .file "twofish-avx-x86_64-asm_64.S"
9284 .text
9285
9286 @@ -303,12 +305,14 @@ __twofish_enc_blk_8way:
9287 outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9288 outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9289
9290 + pax_force_retaddr
9291 ret;
9292
9293 __enc_xor8:
9294 outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9295 outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9296
9297 + pax_force_retaddr
9298 ret;
9299
9300 .align 8
9301 @@ -354,4 +358,5 @@ twofish_dec_blk_8way:
9302 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
9303 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
9304
9305 + pax_force_retaddr
9306 ret;
9307 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9308 index 5b012a2..36d5364 100644
9309 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9310 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9311 @@ -20,6 +20,8 @@
9312 *
9313 */
9314
9315 +#include <asm/alternative-asm.h>
9316 +
9317 .file "twofish-x86_64-asm-3way.S"
9318 .text
9319
9320 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
9321 popq %r13;
9322 popq %r14;
9323 popq %r15;
9324 + pax_force_retaddr 0, 1
9325 ret;
9326
9327 __enc_xor3:
9328 @@ -271,6 +274,7 @@ __enc_xor3:
9329 popq %r13;
9330 popq %r14;
9331 popq %r15;
9332 + pax_force_retaddr 0, 1
9333 ret;
9334
9335 .global twofish_dec_blk_3way
9336 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
9337 popq %r13;
9338 popq %r14;
9339 popq %r15;
9340 + pax_force_retaddr 0, 1
9341 ret;
9342
9343 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
9344 index 7bcf3fc..f53832f 100644
9345 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
9346 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
9347 @@ -21,6 +21,7 @@
9348 .text
9349
9350 #include <asm/asm-offsets.h>
9351 +#include <asm/alternative-asm.h>
9352
9353 #define a_offset 0
9354 #define b_offset 4
9355 @@ -268,6 +269,7 @@ twofish_enc_blk:
9356
9357 popq R1
9358 movq $1,%rax
9359 + pax_force_retaddr 0, 1
9360 ret
9361
9362 twofish_dec_blk:
9363 @@ -319,4 +321,5 @@ twofish_dec_blk:
9364
9365 popq R1
9366 movq $1,%rax
9367 + pax_force_retaddr 0, 1
9368 ret
9369 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
9370 index 07b3a68..bd2a388 100644
9371 --- a/arch/x86/ia32/ia32_aout.c
9372 +++ b/arch/x86/ia32/ia32_aout.c
9373 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
9374 unsigned long dump_start, dump_size;
9375 struct user32 dump;
9376
9377 + memset(&dump, 0, sizeof(dump));
9378 +
9379 fs = get_fs();
9380 set_fs(KERNEL_DS);
9381 has_dumped = 1;
9382 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
9383 index efc6a95..95abfe2 100644
9384 --- a/arch/x86/ia32/ia32_signal.c
9385 +++ b/arch/x86/ia32/ia32_signal.c
9386 @@ -163,8 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
9387 }
9388 seg = get_fs();
9389 set_fs(KERNEL_DS);
9390 - ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
9391 - (stack_t __force __user *) &uoss, regs->sp);
9392 + ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
9393 + (stack_t __force_user *) &uoss, regs->sp);
9394 set_fs(seg);
9395 if (ret >= 0 && uoss_ptr) {
9396 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
9397 @@ -396,7 +396,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
9398 sp -= frame_size;
9399 /* Align the stack pointer according to the i386 ABI,
9400 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
9401 - sp = ((sp + 4) & -16ul) - 4;
9402 + sp = ((sp - 12) & -16ul) - 4;
9403 return (void __user *) sp;
9404 }
9405
9406 @@ -454,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
9407 * These are actually not used anymore, but left because some
9408 * gdb versions depend on them as a marker.
9409 */
9410 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9411 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9412 } put_user_catch(err);
9413
9414 if (err)
9415 @@ -496,7 +496,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9416 0xb8,
9417 __NR_ia32_rt_sigreturn,
9418 0x80cd,
9419 - 0,
9420 + 0
9421 };
9422
9423 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
9424 @@ -522,16 +522,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9425
9426 if (ka->sa.sa_flags & SA_RESTORER)
9427 restorer = ka->sa.sa_restorer;
9428 + else if (current->mm->context.vdso)
9429 + /* Return stub is in 32bit vsyscall page */
9430 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
9431 else
9432 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
9433 - rt_sigreturn);
9434 + restorer = &frame->retcode;
9435 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
9436
9437 /*
9438 * Not actually used anymore, but left because some gdb
9439 * versions need it.
9440 */
9441 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9442 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9443 } put_user_catch(err);
9444
9445 err |= copy_siginfo_to_user32(&frame->info, info);
9446 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
9447 index 076745f..ae8f6cb 100644
9448 --- a/arch/x86/ia32/ia32entry.S
9449 +++ b/arch/x86/ia32/ia32entry.S
9450 @@ -15,8 +15,10 @@
9451 #include <asm/irqflags.h>
9452 #include <asm/asm.h>
9453 #include <asm/smap.h>
9454 +#include <asm/pgtable.h>
9455 #include <linux/linkage.h>
9456 #include <linux/err.h>
9457 +#include <asm/alternative-asm.h>
9458
9459 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
9460 #include <linux/elf-em.h>
9461 @@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
9462 ENDPROC(native_irq_enable_sysexit)
9463 #endif
9464
9465 + .macro pax_enter_kernel_user
9466 + pax_set_fptr_mask
9467 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9468 + call pax_enter_kernel_user
9469 +#endif
9470 + .endm
9471 +
9472 + .macro pax_exit_kernel_user
9473 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9474 + call pax_exit_kernel_user
9475 +#endif
9476 +#ifdef CONFIG_PAX_RANDKSTACK
9477 + pushq %rax
9478 + pushq %r11
9479 + call pax_randomize_kstack
9480 + popq %r11
9481 + popq %rax
9482 +#endif
9483 + .endm
9484 +
9485 +.macro pax_erase_kstack
9486 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
9487 + call pax_erase_kstack
9488 +#endif
9489 +.endm
9490 +
9491 /*
9492 * 32bit SYSENTER instruction entry.
9493 *
9494 @@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
9495 CFI_REGISTER rsp,rbp
9496 SWAPGS_UNSAFE_STACK
9497 movq PER_CPU_VAR(kernel_stack), %rsp
9498 - addq $(KERNEL_STACK_OFFSET),%rsp
9499 - /*
9500 - * No need to follow this irqs on/off section: the syscall
9501 - * disabled irqs, here we enable it straight after entry:
9502 - */
9503 - ENABLE_INTERRUPTS(CLBR_NONE)
9504 movl %ebp,%ebp /* zero extension */
9505 pushq_cfi $__USER32_DS
9506 /*CFI_REL_OFFSET ss,0*/
9507 @@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
9508 CFI_REL_OFFSET rsp,0
9509 pushfq_cfi
9510 /*CFI_REL_OFFSET rflags,0*/
9511 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
9512 - CFI_REGISTER rip,r10
9513 + orl $X86_EFLAGS_IF,(%rsp)
9514 + GET_THREAD_INFO(%r11)
9515 + movl TI_sysenter_return(%r11), %r11d
9516 + CFI_REGISTER rip,r11
9517 pushq_cfi $__USER32_CS
9518 /*CFI_REL_OFFSET cs,0*/
9519 movl %eax, %eax
9520 - pushq_cfi %r10
9521 + pushq_cfi %r11
9522 CFI_REL_OFFSET rip,0
9523 pushq_cfi %rax
9524 cld
9525 SAVE_ARGS 0,1,0
9526 + pax_enter_kernel_user
9527 +
9528 +#ifdef CONFIG_PAX_RANDKSTACK
9529 + pax_erase_kstack
9530 +#endif
9531 +
9532 + /*
9533 + * No need to follow this irqs on/off section: the syscall
9534 + * disabled irqs, here we enable it straight after entry:
9535 + */
9536 + ENABLE_INTERRUPTS(CLBR_NONE)
9537 /* no need to do an access_ok check here because rbp has been
9538 32bit zero extended */
9539 +
9540 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9541 + mov $PAX_USER_SHADOW_BASE,%r11
9542 + add %r11,%rbp
9543 +#endif
9544 +
9545 ASM_STAC
9546 1: movl (%rbp),%ebp
9547 _ASM_EXTABLE(1b,ia32_badarg)
9548 ASM_CLAC
9549 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9550 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9551 + GET_THREAD_INFO(%r11)
9552 + orl $TS_COMPAT,TI_status(%r11)
9553 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9554 CFI_REMEMBER_STATE
9555 jnz sysenter_tracesys
9556 cmpq $(IA32_NR_syscalls-1),%rax
9557 @@ -162,12 +204,15 @@ sysenter_do_call:
9558 sysenter_dispatch:
9559 call *ia32_sys_call_table(,%rax,8)
9560 movq %rax,RAX-ARGOFFSET(%rsp)
9561 + GET_THREAD_INFO(%r11)
9562 DISABLE_INTERRUPTS(CLBR_NONE)
9563 TRACE_IRQS_OFF
9564 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9565 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9566 jnz sysexit_audit
9567 sysexit_from_sys_call:
9568 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9569 + pax_exit_kernel_user
9570 + pax_erase_kstack
9571 + andl $~TS_COMPAT,TI_status(%r11)
9572 /* clear IF, that popfq doesn't enable interrupts early */
9573 andl $~0x200,EFLAGS-R11(%rsp)
9574 movl RIP-R11(%rsp),%edx /* User %eip */
9575 @@ -193,6 +238,9 @@ sysexit_from_sys_call:
9576 movl %eax,%esi /* 2nd arg: syscall number */
9577 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9578 call __audit_syscall_entry
9579 +
9580 + pax_erase_kstack
9581 +
9582 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9583 cmpq $(IA32_NR_syscalls-1),%rax
9584 ja ia32_badsys
9585 @@ -204,7 +252,7 @@ sysexit_from_sys_call:
9586 .endm
9587
9588 .macro auditsys_exit exit
9589 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9590 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9591 jnz ia32_ret_from_sys_call
9592 TRACE_IRQS_ON
9593 sti
9594 @@ -215,11 +263,12 @@ sysexit_from_sys_call:
9595 1: setbe %al /* 1 if error, 0 if not */
9596 movzbl %al,%edi /* zero-extend that into %edi */
9597 call __audit_syscall_exit
9598 + GET_THREAD_INFO(%r11)
9599 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
9600 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9601 cli
9602 TRACE_IRQS_OFF
9603 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9604 + testl %edi,TI_flags(%r11)
9605 jz \exit
9606 CLEAR_RREGS -ARGOFFSET
9607 jmp int_with_check
9608 @@ -237,7 +286,7 @@ sysexit_audit:
9609
9610 sysenter_tracesys:
9611 #ifdef CONFIG_AUDITSYSCALL
9612 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9613 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9614 jz sysenter_auditsys
9615 #endif
9616 SAVE_REST
9617 @@ -249,6 +298,9 @@ sysenter_tracesys:
9618 RESTORE_REST
9619 cmpq $(IA32_NR_syscalls-1),%rax
9620 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
9621 +
9622 + pax_erase_kstack
9623 +
9624 jmp sysenter_do_call
9625 CFI_ENDPROC
9626 ENDPROC(ia32_sysenter_target)
9627 @@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
9628 ENTRY(ia32_cstar_target)
9629 CFI_STARTPROC32 simple
9630 CFI_SIGNAL_FRAME
9631 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9632 + CFI_DEF_CFA rsp,0
9633 CFI_REGISTER rip,rcx
9634 /*CFI_REGISTER rflags,r11*/
9635 SWAPGS_UNSAFE_STACK
9636 movl %esp,%r8d
9637 CFI_REGISTER rsp,r8
9638 movq PER_CPU_VAR(kernel_stack),%rsp
9639 + SAVE_ARGS 8*6,0,0
9640 + pax_enter_kernel_user
9641 +
9642 +#ifdef CONFIG_PAX_RANDKSTACK
9643 + pax_erase_kstack
9644 +#endif
9645 +
9646 /*
9647 * No need to follow this irqs on/off section: the syscall
9648 * disabled irqs and here we enable it straight after entry:
9649 */
9650 ENABLE_INTERRUPTS(CLBR_NONE)
9651 - SAVE_ARGS 8,0,0
9652 movl %eax,%eax /* zero extension */
9653 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9654 movq %rcx,RIP-ARGOFFSET(%rsp)
9655 @@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
9656 /* no need to do an access_ok check here because r8 has been
9657 32bit zero extended */
9658 /* hardware stack frame is complete now */
9659 +
9660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9661 + mov $PAX_USER_SHADOW_BASE,%r11
9662 + add %r11,%r8
9663 +#endif
9664 +
9665 ASM_STAC
9666 1: movl (%r8),%r9d
9667 _ASM_EXTABLE(1b,ia32_badarg)
9668 ASM_CLAC
9669 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9670 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9671 + GET_THREAD_INFO(%r11)
9672 + orl $TS_COMPAT,TI_status(%r11)
9673 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9674 CFI_REMEMBER_STATE
9675 jnz cstar_tracesys
9676 cmpq $IA32_NR_syscalls-1,%rax
9677 @@ -319,12 +384,15 @@ cstar_do_call:
9678 cstar_dispatch:
9679 call *ia32_sys_call_table(,%rax,8)
9680 movq %rax,RAX-ARGOFFSET(%rsp)
9681 + GET_THREAD_INFO(%r11)
9682 DISABLE_INTERRUPTS(CLBR_NONE)
9683 TRACE_IRQS_OFF
9684 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9685 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9686 jnz sysretl_audit
9687 sysretl_from_sys_call:
9688 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9689 + pax_exit_kernel_user
9690 + pax_erase_kstack
9691 + andl $~TS_COMPAT,TI_status(%r11)
9692 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
9693 movl RIP-ARGOFFSET(%rsp),%ecx
9694 CFI_REGISTER rip,rcx
9695 @@ -352,7 +420,7 @@ sysretl_audit:
9696
9697 cstar_tracesys:
9698 #ifdef CONFIG_AUDITSYSCALL
9699 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9700 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9701 jz cstar_auditsys
9702 #endif
9703 xchgl %r9d,%ebp
9704 @@ -366,6 +434,9 @@ cstar_tracesys:
9705 xchgl %ebp,%r9d
9706 cmpq $(IA32_NR_syscalls-1),%rax
9707 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
9708 +
9709 + pax_erase_kstack
9710 +
9711 jmp cstar_do_call
9712 END(ia32_cstar_target)
9713
9714 @@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
9715 CFI_REL_OFFSET rip,RIP-RIP
9716 PARAVIRT_ADJUST_EXCEPTION_FRAME
9717 SWAPGS
9718 - /*
9719 - * No need to follow this irqs on/off section: the syscall
9720 - * disabled irqs and here we enable it straight after entry:
9721 - */
9722 - ENABLE_INTERRUPTS(CLBR_NONE)
9723 movl %eax,%eax
9724 pushq_cfi %rax
9725 cld
9726 /* note the registers are not zero extended to the sf.
9727 this could be a problem. */
9728 SAVE_ARGS 0,1,0
9729 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9730 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9731 + pax_enter_kernel_user
9732 +
9733 +#ifdef CONFIG_PAX_RANDKSTACK
9734 + pax_erase_kstack
9735 +#endif
9736 +
9737 + /*
9738 + * No need to follow this irqs on/off section: the syscall
9739 + * disabled irqs and here we enable it straight after entry:
9740 + */
9741 + ENABLE_INTERRUPTS(CLBR_NONE)
9742 + GET_THREAD_INFO(%r11)
9743 + orl $TS_COMPAT,TI_status(%r11)
9744 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9745 jnz ia32_tracesys
9746 cmpq $(IA32_NR_syscalls-1),%rax
9747 ja ia32_badsys
9748 @@ -442,6 +520,9 @@ ia32_tracesys:
9749 RESTORE_REST
9750 cmpq $(IA32_NR_syscalls-1),%rax
9751 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
9752 +
9753 + pax_erase_kstack
9754 +
9755 jmp ia32_do_call
9756 END(ia32_syscall)
9757
9758 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9759 index 86d68d1..f9960fe 100644
9760 --- a/arch/x86/ia32/sys_ia32.c
9761 +++ b/arch/x86/ia32/sys_ia32.c
9762 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9763 */
9764 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9765 {
9766 - typeof(ubuf->st_uid) uid = 0;
9767 - typeof(ubuf->st_gid) gid = 0;
9768 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
9769 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
9770 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
9771 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
9772 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9773 @@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9774 mm_segment_t old_fs = get_fs();
9775
9776 set_fs(KERNEL_DS);
9777 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9778 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9779 set_fs(old_fs);
9780 if (put_compat_timespec(&t, interval))
9781 return -EFAULT;
9782 @@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9783 mm_segment_t old_fs = get_fs();
9784
9785 set_fs(KERNEL_DS);
9786 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9787 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9788 set_fs(old_fs);
9789 if (!ret) {
9790 switch (_NSIG_WORDS) {
9791 @@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9792 if (copy_siginfo_from_user32(&info, uinfo))
9793 return -EFAULT;
9794 set_fs(KERNEL_DS);
9795 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9796 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9797 set_fs(old_fs);
9798 return ret;
9799 }
9800 @@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9801 return -EFAULT;
9802
9803 set_fs(KERNEL_DS);
9804 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9805 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9806 count);
9807 set_fs(old_fs);
9808
9809 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9810 index 372231c..a5aa1a1 100644
9811 --- a/arch/x86/include/asm/alternative-asm.h
9812 +++ b/arch/x86/include/asm/alternative-asm.h
9813 @@ -18,6 +18,45 @@
9814 .endm
9815 #endif
9816
9817 +#ifdef KERNEXEC_PLUGIN
9818 + .macro pax_force_retaddr_bts rip=0
9819 + btsq $63,\rip(%rsp)
9820 + .endm
9821 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9822 + .macro pax_force_retaddr rip=0, reload=0
9823 + btsq $63,\rip(%rsp)
9824 + .endm
9825 + .macro pax_force_fptr ptr
9826 + btsq $63,\ptr
9827 + .endm
9828 + .macro pax_set_fptr_mask
9829 + .endm
9830 +#endif
9831 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9832 + .macro pax_force_retaddr rip=0, reload=0
9833 + .if \reload
9834 + pax_set_fptr_mask
9835 + .endif
9836 + orq %r10,\rip(%rsp)
9837 + .endm
9838 + .macro pax_force_fptr ptr
9839 + orq %r10,\ptr
9840 + .endm
9841 + .macro pax_set_fptr_mask
9842 + movabs $0x8000000000000000,%r10
9843 + .endm
9844 +#endif
9845 +#else
9846 + .macro pax_force_retaddr rip=0, reload=0
9847 + .endm
9848 + .macro pax_force_fptr ptr
9849 + .endm
9850 + .macro pax_force_retaddr_bts rip=0
9851 + .endm
9852 + .macro pax_set_fptr_mask
9853 + .endm
9854 +#endif
9855 +
9856 .macro altinstruction_entry orig alt feature orig_len alt_len
9857 .long \orig - .
9858 .long \alt - .
9859 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9860 index 58ed6d9..f1cbe58 100644
9861 --- a/arch/x86/include/asm/alternative.h
9862 +++ b/arch/x86/include/asm/alternative.h
9863 @@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9864 ".pushsection .discard,\"aw\",@progbits\n" \
9865 DISCARD_ENTRY(1) \
9866 ".popsection\n" \
9867 - ".pushsection .altinstr_replacement, \"ax\"\n" \
9868 + ".pushsection .altinstr_replacement, \"a\"\n" \
9869 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
9870 ".popsection"
9871
9872 @@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9873 DISCARD_ENTRY(1) \
9874 DISCARD_ENTRY(2) \
9875 ".popsection\n" \
9876 - ".pushsection .altinstr_replacement, \"ax\"\n" \
9877 + ".pushsection .altinstr_replacement, \"a\"\n" \
9878 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
9879 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
9880 ".popsection"
9881 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9882 index 3388034..ba52312 100644
9883 --- a/arch/x86/include/asm/apic.h
9884 +++ b/arch/x86/include/asm/apic.h
9885 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9886
9887 #ifdef CONFIG_X86_LOCAL_APIC
9888
9889 -extern unsigned int apic_verbosity;
9890 +extern int apic_verbosity;
9891 extern int local_apic_timer_c2_ok;
9892
9893 extern int disable_apic;
9894 @@ -391,7 +391,7 @@ struct apic {
9895 */
9896 int (*x86_32_numa_cpu_node)(int cpu);
9897 #endif
9898 -};
9899 +} __do_const;
9900
9901 /*
9902 * Pointer to the local APIC driver in use on this system (there's
9903 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9904 index 20370c6..a2eb9b0 100644
9905 --- a/arch/x86/include/asm/apm.h
9906 +++ b/arch/x86/include/asm/apm.h
9907 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9908 __asm__ __volatile__(APM_DO_ZERO_SEGS
9909 "pushl %%edi\n\t"
9910 "pushl %%ebp\n\t"
9911 - "lcall *%%cs:apm_bios_entry\n\t"
9912 + "lcall *%%ss:apm_bios_entry\n\t"
9913 "setc %%al\n\t"
9914 "popl %%ebp\n\t"
9915 "popl %%edi\n\t"
9916 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9917 __asm__ __volatile__(APM_DO_ZERO_SEGS
9918 "pushl %%edi\n\t"
9919 "pushl %%ebp\n\t"
9920 - "lcall *%%cs:apm_bios_entry\n\t"
9921 + "lcall *%%ss:apm_bios_entry\n\t"
9922 "setc %%bl\n\t"
9923 "popl %%ebp\n\t"
9924 "popl %%edi\n\t"
9925 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9926 index b6c3b82..b4c077a 100644
9927 --- a/arch/x86/include/asm/atomic.h
9928 +++ b/arch/x86/include/asm/atomic.h
9929 @@ -22,7 +22,18 @@
9930 */
9931 static inline int atomic_read(const atomic_t *v)
9932 {
9933 - return (*(volatile int *)&(v)->counter);
9934 + return (*(volatile const int *)&(v)->counter);
9935 +}
9936 +
9937 +/**
9938 + * atomic_read_unchecked - read atomic variable
9939 + * @v: pointer of type atomic_unchecked_t
9940 + *
9941 + * Atomically reads the value of @v.
9942 + */
9943 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9944 +{
9945 + return (*(volatile const int *)&(v)->counter);
9946 }
9947
9948 /**
9949 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9950 }
9951
9952 /**
9953 + * atomic_set_unchecked - set atomic variable
9954 + * @v: pointer of type atomic_unchecked_t
9955 + * @i: required value
9956 + *
9957 + * Atomically sets the value of @v to @i.
9958 + */
9959 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9960 +{
9961 + v->counter = i;
9962 +}
9963 +
9964 +/**
9965 * atomic_add - add integer to atomic variable
9966 * @i: integer value to add
9967 * @v: pointer of type atomic_t
9968 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9969 */
9970 static inline void atomic_add(int i, atomic_t *v)
9971 {
9972 - asm volatile(LOCK_PREFIX "addl %1,%0"
9973 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9974 +
9975 +#ifdef CONFIG_PAX_REFCOUNT
9976 + "jno 0f\n"
9977 + LOCK_PREFIX "subl %1,%0\n"
9978 + "int $4\n0:\n"
9979 + _ASM_EXTABLE(0b, 0b)
9980 +#endif
9981 +
9982 + : "+m" (v->counter)
9983 + : "ir" (i));
9984 +}
9985 +
9986 +/**
9987 + * atomic_add_unchecked - add integer to atomic variable
9988 + * @i: integer value to add
9989 + * @v: pointer of type atomic_unchecked_t
9990 + *
9991 + * Atomically adds @i to @v.
9992 + */
9993 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9994 +{
9995 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9996 : "+m" (v->counter)
9997 : "ir" (i));
9998 }
9999 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
10000 */
10001 static inline void atomic_sub(int i, atomic_t *v)
10002 {
10003 - asm volatile(LOCK_PREFIX "subl %1,%0"
10004 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
10005 +
10006 +#ifdef CONFIG_PAX_REFCOUNT
10007 + "jno 0f\n"
10008 + LOCK_PREFIX "addl %1,%0\n"
10009 + "int $4\n0:\n"
10010 + _ASM_EXTABLE(0b, 0b)
10011 +#endif
10012 +
10013 + : "+m" (v->counter)
10014 + : "ir" (i));
10015 +}
10016 +
10017 +/**
10018 + * atomic_sub_unchecked - subtract integer from atomic variable
10019 + * @i: integer value to subtract
10020 + * @v: pointer of type atomic_unchecked_t
10021 + *
10022 + * Atomically subtracts @i from @v.
10023 + */
10024 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10025 +{
10026 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
10027 : "+m" (v->counter)
10028 : "ir" (i));
10029 }
10030 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10031 {
10032 unsigned char c;
10033
10034 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10035 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
10036 +
10037 +#ifdef CONFIG_PAX_REFCOUNT
10038 + "jno 0f\n"
10039 + LOCK_PREFIX "addl %2,%0\n"
10040 + "int $4\n0:\n"
10041 + _ASM_EXTABLE(0b, 0b)
10042 +#endif
10043 +
10044 + "sete %1\n"
10045 : "+m" (v->counter), "=qm" (c)
10046 : "ir" (i) : "memory");
10047 return c;
10048 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10049 */
10050 static inline void atomic_inc(atomic_t *v)
10051 {
10052 - asm volatile(LOCK_PREFIX "incl %0"
10053 + asm volatile(LOCK_PREFIX "incl %0\n"
10054 +
10055 +#ifdef CONFIG_PAX_REFCOUNT
10056 + "jno 0f\n"
10057 + LOCK_PREFIX "decl %0\n"
10058 + "int $4\n0:\n"
10059 + _ASM_EXTABLE(0b, 0b)
10060 +#endif
10061 +
10062 + : "+m" (v->counter));
10063 +}
10064 +
10065 +/**
10066 + * atomic_inc_unchecked - increment atomic variable
10067 + * @v: pointer of type atomic_unchecked_t
10068 + *
10069 + * Atomically increments @v by 1.
10070 + */
10071 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10072 +{
10073 + asm volatile(LOCK_PREFIX "incl %0\n"
10074 : "+m" (v->counter));
10075 }
10076
10077 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
10078 */
10079 static inline void atomic_dec(atomic_t *v)
10080 {
10081 - asm volatile(LOCK_PREFIX "decl %0"
10082 + asm volatile(LOCK_PREFIX "decl %0\n"
10083 +
10084 +#ifdef CONFIG_PAX_REFCOUNT
10085 + "jno 0f\n"
10086 + LOCK_PREFIX "incl %0\n"
10087 + "int $4\n0:\n"
10088 + _ASM_EXTABLE(0b, 0b)
10089 +#endif
10090 +
10091 + : "+m" (v->counter));
10092 +}
10093 +
10094 +/**
10095 + * atomic_dec_unchecked - decrement atomic variable
10096 + * @v: pointer of type atomic_unchecked_t
10097 + *
10098 + * Atomically decrements @v by 1.
10099 + */
10100 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10101 +{
10102 + asm volatile(LOCK_PREFIX "decl %0\n"
10103 : "+m" (v->counter));
10104 }
10105
10106 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10107 {
10108 unsigned char c;
10109
10110 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
10111 + asm volatile(LOCK_PREFIX "decl %0\n"
10112 +
10113 +#ifdef CONFIG_PAX_REFCOUNT
10114 + "jno 0f\n"
10115 + LOCK_PREFIX "incl %0\n"
10116 + "int $4\n0:\n"
10117 + _ASM_EXTABLE(0b, 0b)
10118 +#endif
10119 +
10120 + "sete %1\n"
10121 : "+m" (v->counter), "=qm" (c)
10122 : : "memory");
10123 return c != 0;
10124 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10125 {
10126 unsigned char c;
10127
10128 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
10129 + asm volatile(LOCK_PREFIX "incl %0\n"
10130 +
10131 +#ifdef CONFIG_PAX_REFCOUNT
10132 + "jno 0f\n"
10133 + LOCK_PREFIX "decl %0\n"
10134 + "int $4\n0:\n"
10135 + _ASM_EXTABLE(0b, 0b)
10136 +#endif
10137 +
10138 + "sete %1\n"
10139 + : "+m" (v->counter), "=qm" (c)
10140 + : : "memory");
10141 + return c != 0;
10142 +}
10143 +
10144 +/**
10145 + * atomic_inc_and_test_unchecked - increment and test
10146 + * @v: pointer of type atomic_unchecked_t
10147 + *
10148 + * Atomically increments @v by 1
10149 + * and returns true if the result is zero, or false for all
10150 + * other cases.
10151 + */
10152 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10153 +{
10154 + unsigned char c;
10155 +
10156 + asm volatile(LOCK_PREFIX "incl %0\n"
10157 + "sete %1\n"
10158 : "+m" (v->counter), "=qm" (c)
10159 : : "memory");
10160 return c != 0;
10161 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10162 {
10163 unsigned char c;
10164
10165 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10166 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
10167 +
10168 +#ifdef CONFIG_PAX_REFCOUNT
10169 + "jno 0f\n"
10170 + LOCK_PREFIX "subl %2,%0\n"
10171 + "int $4\n0:\n"
10172 + _ASM_EXTABLE(0b, 0b)
10173 +#endif
10174 +
10175 + "sets %1\n"
10176 : "+m" (v->counter), "=qm" (c)
10177 : "ir" (i) : "memory");
10178 return c;
10179 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
10180 goto no_xadd;
10181 #endif
10182 /* Modern 486+ processor */
10183 - return i + xadd(&v->counter, i);
10184 + return i + xadd_check_overflow(&v->counter, i);
10185
10186 #ifdef CONFIG_M386
10187 no_xadd: /* Legacy 386 processor */
10188 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
10189 }
10190
10191 /**
10192 + * atomic_add_return_unchecked - add integer and return
10193 + * @i: integer value to add
10194 + * @v: pointer of type atomic_unchecked_t
10195 + *
10196 + * Atomically adds @i to @v and returns @i + @v
10197 + */
10198 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10199 +{
10200 +#ifdef CONFIG_M386
10201 + int __i;
10202 + unsigned long flags;
10203 + if (unlikely(boot_cpu_data.x86 <= 3))
10204 + goto no_xadd;
10205 +#endif
10206 + /* Modern 486+ processor */
10207 + return i + xadd(&v->counter, i);
10208 +
10209 +#ifdef CONFIG_M386
10210 +no_xadd: /* Legacy 386 processor */
10211 + raw_local_irq_save(flags);
10212 + __i = atomic_read_unchecked(v);
10213 + atomic_set_unchecked(v, i + __i);
10214 + raw_local_irq_restore(flags);
10215 + return i + __i;
10216 +#endif
10217 +}
10218 +
10219 +/**
10220 * atomic_sub_return - subtract integer and return
10221 * @v: pointer of type atomic_t
10222 * @i: integer value to subtract
10223 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10224 }
10225
10226 #define atomic_inc_return(v) (atomic_add_return(1, v))
10227 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10228 +{
10229 + return atomic_add_return_unchecked(1, v);
10230 +}
10231 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10232
10233 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10234 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10235 return cmpxchg(&v->counter, old, new);
10236 }
10237
10238 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10239 +{
10240 + return cmpxchg(&v->counter, old, new);
10241 +}
10242 +
10243 static inline int atomic_xchg(atomic_t *v, int new)
10244 {
10245 return xchg(&v->counter, new);
10246 }
10247
10248 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10249 +{
10250 + return xchg(&v->counter, new);
10251 +}
10252 +
10253 /**
10254 * __atomic_add_unless - add unless the number is already a given value
10255 * @v: pointer of type atomic_t
10256 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
10257 */
10258 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10259 {
10260 - int c, old;
10261 + int c, old, new;
10262 c = atomic_read(v);
10263 for (;;) {
10264 - if (unlikely(c == (u)))
10265 + if (unlikely(c == u))
10266 break;
10267 - old = atomic_cmpxchg((v), c, c + (a));
10268 +
10269 + asm volatile("addl %2,%0\n"
10270 +
10271 +#ifdef CONFIG_PAX_REFCOUNT
10272 + "jno 0f\n"
10273 + "subl %2,%0\n"
10274 + "int $4\n0:\n"
10275 + _ASM_EXTABLE(0b, 0b)
10276 +#endif
10277 +
10278 + : "=r" (new)
10279 + : "0" (c), "ir" (a));
10280 +
10281 + old = atomic_cmpxchg(v, c, new);
10282 if (likely(old == c))
10283 break;
10284 c = old;
10285 @@ -241,6 +458,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10286 }
10287
10288 /**
10289 + * atomic_inc_not_zero_hint - increment if not null
10290 + * @v: pointer of type atomic_t
10291 + * @hint: probable value of the atomic before the increment
10292 + *
10293 + * This version of atomic_inc_not_zero() gives a hint of probable
10294 + * value of the atomic. This helps processor to not read the memory
10295 + * before doing the atomic read/modify/write cycle, lowering
10296 + * number of bus transactions on some arches.
10297 + *
10298 + * Returns: 0 if increment was not done, 1 otherwise.
10299 + */
10300 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
10301 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
10302 +{
10303 + int val, c = hint, new;
10304 +
10305 + /* sanity test, should be removed by compiler if hint is a constant */
10306 + if (!hint)
10307 + return __atomic_add_unless(v, 1, 0);
10308 +
10309 + do {
10310 + asm volatile("incl %0\n"
10311 +
10312 +#ifdef CONFIG_PAX_REFCOUNT
10313 + "jno 0f\n"
10314 + "decl %0\n"
10315 + "int $4\n0:\n"
10316 + _ASM_EXTABLE(0b, 0b)
10317 +#endif
10318 +
10319 + : "=r" (new)
10320 + : "0" (c));
10321 +
10322 + val = atomic_cmpxchg(v, c, new);
10323 + if (val == c)
10324 + return 1;
10325 + c = val;
10326 + } while (c);
10327 +
10328 + return 0;
10329 +}
10330 +
10331 +/**
10332 * atomic_inc_short - increment of a short integer
10333 * @v: pointer to type int
10334 *
10335 @@ -269,14 +529,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
10336 #endif
10337
10338 /* These are x86-specific, used by some header files */
10339 -#define atomic_clear_mask(mask, addr) \
10340 - asm volatile(LOCK_PREFIX "andl %0,%1" \
10341 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
10342 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
10343 +{
10344 + asm volatile(LOCK_PREFIX "andl %1,%0"
10345 + : "+m" (v->counter)
10346 + : "r" (~(mask))
10347 + : "memory");
10348 +}
10349
10350 -#define atomic_set_mask(mask, addr) \
10351 - asm volatile(LOCK_PREFIX "orl %0,%1" \
10352 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
10353 - : "memory")
10354 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10355 +{
10356 + asm volatile(LOCK_PREFIX "andl %1,%0"
10357 + : "+m" (v->counter)
10358 + : "r" (~(mask))
10359 + : "memory");
10360 +}
10361 +
10362 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
10363 +{
10364 + asm volatile(LOCK_PREFIX "orl %1,%0"
10365 + : "+m" (v->counter)
10366 + : "r" (mask)
10367 + : "memory");
10368 +}
10369 +
10370 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10371 +{
10372 + asm volatile(LOCK_PREFIX "orl %1,%0"
10373 + : "+m" (v->counter)
10374 + : "r" (mask)
10375 + : "memory");
10376 +}
10377
10378 /* Atomic operations are already serializing on x86 */
10379 #define smp_mb__before_atomic_dec() barrier()
10380 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
10381 index b154de7..aadebd8 100644
10382 --- a/arch/x86/include/asm/atomic64_32.h
10383 +++ b/arch/x86/include/asm/atomic64_32.h
10384 @@ -12,6 +12,14 @@ typedef struct {
10385 u64 __aligned(8) counter;
10386 } atomic64_t;
10387
10388 +#ifdef CONFIG_PAX_REFCOUNT
10389 +typedef struct {
10390 + u64 __aligned(8) counter;
10391 +} atomic64_unchecked_t;
10392 +#else
10393 +typedef atomic64_t atomic64_unchecked_t;
10394 +#endif
10395 +
10396 #define ATOMIC64_INIT(val) { (val) }
10397
10398 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
10399 @@ -37,21 +45,31 @@ typedef struct {
10400 ATOMIC64_DECL_ONE(sym##_386)
10401
10402 ATOMIC64_DECL_ONE(add_386);
10403 +ATOMIC64_DECL_ONE(add_unchecked_386);
10404 ATOMIC64_DECL_ONE(sub_386);
10405 +ATOMIC64_DECL_ONE(sub_unchecked_386);
10406 ATOMIC64_DECL_ONE(inc_386);
10407 +ATOMIC64_DECL_ONE(inc_unchecked_386);
10408 ATOMIC64_DECL_ONE(dec_386);
10409 +ATOMIC64_DECL_ONE(dec_unchecked_386);
10410 #endif
10411
10412 #define alternative_atomic64(f, out, in...) \
10413 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
10414
10415 ATOMIC64_DECL(read);
10416 +ATOMIC64_DECL(read_unchecked);
10417 ATOMIC64_DECL(set);
10418 +ATOMIC64_DECL(set_unchecked);
10419 ATOMIC64_DECL(xchg);
10420 ATOMIC64_DECL(add_return);
10421 +ATOMIC64_DECL(add_return_unchecked);
10422 ATOMIC64_DECL(sub_return);
10423 +ATOMIC64_DECL(sub_return_unchecked);
10424 ATOMIC64_DECL(inc_return);
10425 +ATOMIC64_DECL(inc_return_unchecked);
10426 ATOMIC64_DECL(dec_return);
10427 +ATOMIC64_DECL(dec_return_unchecked);
10428 ATOMIC64_DECL(dec_if_positive);
10429 ATOMIC64_DECL(inc_not_zero);
10430 ATOMIC64_DECL(add_unless);
10431 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
10432 }
10433
10434 /**
10435 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
10436 + * @p: pointer to type atomic64_unchecked_t
10437 + * @o: expected value
10438 + * @n: new value
10439 + *
10440 + * Atomically sets @v to @n if it was equal to @o and returns
10441 + * the old value.
10442 + */
10443 +
10444 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
10445 +{
10446 + return cmpxchg64(&v->counter, o, n);
10447 +}
10448 +
10449 +/**
10450 * atomic64_xchg - xchg atomic64 variable
10451 * @v: pointer to type atomic64_t
10452 * @n: value to assign
10453 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
10454 }
10455
10456 /**
10457 + * atomic64_set_unchecked - set atomic64 variable
10458 + * @v: pointer to type atomic64_unchecked_t
10459 + * @n: value to assign
10460 + *
10461 + * Atomically sets the value of @v to @n.
10462 + */
10463 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
10464 +{
10465 + unsigned high = (unsigned)(i >> 32);
10466 + unsigned low = (unsigned)i;
10467 + alternative_atomic64(set, /* no output */,
10468 + "S" (v), "b" (low), "c" (high)
10469 + : "eax", "edx", "memory");
10470 +}
10471 +
10472 +/**
10473 * atomic64_read - read atomic64 variable
10474 * @v: pointer to type atomic64_t
10475 *
10476 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
10477 }
10478
10479 /**
10480 + * atomic64_read_unchecked - read atomic64 variable
10481 + * @v: pointer to type atomic64_unchecked_t
10482 + *
10483 + * Atomically reads the value of @v and returns it.
10484 + */
10485 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
10486 +{
10487 + long long r;
10488 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
10489 + return r;
10490 + }
10491 +
10492 +/**
10493 * atomic64_add_return - add and return
10494 * @i: integer value to add
10495 * @v: pointer to type atomic64_t
10496 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
10497 return i;
10498 }
10499
10500 +/**
10501 + * atomic64_add_return_unchecked - add and return
10502 + * @i: integer value to add
10503 + * @v: pointer to type atomic64_unchecked_t
10504 + *
10505 + * Atomically adds @i to @v and returns @i + *@v
10506 + */
10507 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
10508 +{
10509 + alternative_atomic64(add_return_unchecked,
10510 + ASM_OUTPUT2("+A" (i), "+c" (v)),
10511 + ASM_NO_INPUT_CLOBBER("memory"));
10512 + return i;
10513 +}
10514 +
10515 /*
10516 * Other variants with different arithmetic operators:
10517 */
10518 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
10519 return a;
10520 }
10521
10522 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10523 +{
10524 + long long a;
10525 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
10526 + "S" (v) : "memory", "ecx");
10527 + return a;
10528 +}
10529 +
10530 static inline long long atomic64_dec_return(atomic64_t *v)
10531 {
10532 long long a;
10533 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
10534 }
10535
10536 /**
10537 + * atomic64_add_unchecked - add integer to atomic64 variable
10538 + * @i: integer value to add
10539 + * @v: pointer to type atomic64_unchecked_t
10540 + *
10541 + * Atomically adds @i to @v.
10542 + */
10543 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
10544 +{
10545 + __alternative_atomic64(add_unchecked, add_return_unchecked,
10546 + ASM_OUTPUT2("+A" (i), "+c" (v)),
10547 + ASM_NO_INPUT_CLOBBER("memory"));
10548 + return i;
10549 +}
10550 +
10551 +/**
10552 * atomic64_sub - subtract the atomic64 variable
10553 * @i: integer value to subtract
10554 * @v: pointer to type atomic64_t
10555 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
10556 index 0e1cbfc..5623683 100644
10557 --- a/arch/x86/include/asm/atomic64_64.h
10558 +++ b/arch/x86/include/asm/atomic64_64.h
10559 @@ -18,7 +18,19 @@
10560 */
10561 static inline long atomic64_read(const atomic64_t *v)
10562 {
10563 - return (*(volatile long *)&(v)->counter);
10564 + return (*(volatile const long *)&(v)->counter);
10565 +}
10566 +
10567 +/**
10568 + * atomic64_read_unchecked - read atomic64 variable
10569 + * @v: pointer of type atomic64_unchecked_t
10570 + *
10571 + * Atomically reads the value of @v.
10572 + * Doesn't imply a read memory barrier.
10573 + */
10574 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10575 +{
10576 + return (*(volatile const long *)&(v)->counter);
10577 }
10578
10579 /**
10580 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10581 }
10582
10583 /**
10584 + * atomic64_set_unchecked - set atomic64 variable
10585 + * @v: pointer to type atomic64_unchecked_t
10586 + * @i: required value
10587 + *
10588 + * Atomically sets the value of @v to @i.
10589 + */
10590 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10591 +{
10592 + v->counter = i;
10593 +}
10594 +
10595 +/**
10596 * atomic64_add - add integer to atomic64 variable
10597 * @i: integer value to add
10598 * @v: pointer to type atomic64_t
10599 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10600 */
10601 static inline void atomic64_add(long i, atomic64_t *v)
10602 {
10603 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
10604 +
10605 +#ifdef CONFIG_PAX_REFCOUNT
10606 + "jno 0f\n"
10607 + LOCK_PREFIX "subq %1,%0\n"
10608 + "int $4\n0:\n"
10609 + _ASM_EXTABLE(0b, 0b)
10610 +#endif
10611 +
10612 + : "=m" (v->counter)
10613 + : "er" (i), "m" (v->counter));
10614 +}
10615 +
10616 +/**
10617 + * atomic64_add_unchecked - add integer to atomic64 variable
10618 + * @i: integer value to add
10619 + * @v: pointer to type atomic64_unchecked_t
10620 + *
10621 + * Atomically adds @i to @v.
10622 + */
10623 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10624 +{
10625 asm volatile(LOCK_PREFIX "addq %1,%0"
10626 : "=m" (v->counter)
10627 : "er" (i), "m" (v->counter));
10628 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
10629 */
10630 static inline void atomic64_sub(long i, atomic64_t *v)
10631 {
10632 - asm volatile(LOCK_PREFIX "subq %1,%0"
10633 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
10634 +
10635 +#ifdef CONFIG_PAX_REFCOUNT
10636 + "jno 0f\n"
10637 + LOCK_PREFIX "addq %1,%0\n"
10638 + "int $4\n0:\n"
10639 + _ASM_EXTABLE(0b, 0b)
10640 +#endif
10641 +
10642 + : "=m" (v->counter)
10643 + : "er" (i), "m" (v->counter));
10644 +}
10645 +
10646 +/**
10647 + * atomic64_sub_unchecked - subtract the atomic64 variable
10648 + * @i: integer value to subtract
10649 + * @v: pointer to type atomic64_unchecked_t
10650 + *
10651 + * Atomically subtracts @i from @v.
10652 + */
10653 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
10654 +{
10655 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
10656 : "=m" (v->counter)
10657 : "er" (i), "m" (v->counter));
10658 }
10659 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10660 {
10661 unsigned char c;
10662
10663 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10664 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
10665 +
10666 +#ifdef CONFIG_PAX_REFCOUNT
10667 + "jno 0f\n"
10668 + LOCK_PREFIX "addq %2,%0\n"
10669 + "int $4\n0:\n"
10670 + _ASM_EXTABLE(0b, 0b)
10671 +#endif
10672 +
10673 + "sete %1\n"
10674 : "=m" (v->counter), "=qm" (c)
10675 : "er" (i), "m" (v->counter) : "memory");
10676 return c;
10677 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10678 */
10679 static inline void atomic64_inc(atomic64_t *v)
10680 {
10681 + asm volatile(LOCK_PREFIX "incq %0\n"
10682 +
10683 +#ifdef CONFIG_PAX_REFCOUNT
10684 + "jno 0f\n"
10685 + LOCK_PREFIX "decq %0\n"
10686 + "int $4\n0:\n"
10687 + _ASM_EXTABLE(0b, 0b)
10688 +#endif
10689 +
10690 + : "=m" (v->counter)
10691 + : "m" (v->counter));
10692 +}
10693 +
10694 +/**
10695 + * atomic64_inc_unchecked - increment atomic64 variable
10696 + * @v: pointer to type atomic64_unchecked_t
10697 + *
10698 + * Atomically increments @v by 1.
10699 + */
10700 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10701 +{
10702 asm volatile(LOCK_PREFIX "incq %0"
10703 : "=m" (v->counter)
10704 : "m" (v->counter));
10705 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
10706 */
10707 static inline void atomic64_dec(atomic64_t *v)
10708 {
10709 - asm volatile(LOCK_PREFIX "decq %0"
10710 + asm volatile(LOCK_PREFIX "decq %0\n"
10711 +
10712 +#ifdef CONFIG_PAX_REFCOUNT
10713 + "jno 0f\n"
10714 + LOCK_PREFIX "incq %0\n"
10715 + "int $4\n0:\n"
10716 + _ASM_EXTABLE(0b, 0b)
10717 +#endif
10718 +
10719 + : "=m" (v->counter)
10720 + : "m" (v->counter));
10721 +}
10722 +
10723 +/**
10724 + * atomic64_dec_unchecked - decrement atomic64 variable
10725 + * @v: pointer to type atomic64_t
10726 + *
10727 + * Atomically decrements @v by 1.
10728 + */
10729 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10730 +{
10731 + asm volatile(LOCK_PREFIX "decq %0\n"
10732 : "=m" (v->counter)
10733 : "m" (v->counter));
10734 }
10735 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
10736 {
10737 unsigned char c;
10738
10739 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
10740 + asm volatile(LOCK_PREFIX "decq %0\n"
10741 +
10742 +#ifdef CONFIG_PAX_REFCOUNT
10743 + "jno 0f\n"
10744 + LOCK_PREFIX "incq %0\n"
10745 + "int $4\n0:\n"
10746 + _ASM_EXTABLE(0b, 0b)
10747 +#endif
10748 +
10749 + "sete %1\n"
10750 : "=m" (v->counter), "=qm" (c)
10751 : "m" (v->counter) : "memory");
10752 return c != 0;
10753 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10754 {
10755 unsigned char c;
10756
10757 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
10758 + asm volatile(LOCK_PREFIX "incq %0\n"
10759 +
10760 +#ifdef CONFIG_PAX_REFCOUNT
10761 + "jno 0f\n"
10762 + LOCK_PREFIX "decq %0\n"
10763 + "int $4\n0:\n"
10764 + _ASM_EXTABLE(0b, 0b)
10765 +#endif
10766 +
10767 + "sete %1\n"
10768 : "=m" (v->counter), "=qm" (c)
10769 : "m" (v->counter) : "memory");
10770 return c != 0;
10771 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10772 {
10773 unsigned char c;
10774
10775 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10776 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
10777 +
10778 +#ifdef CONFIG_PAX_REFCOUNT
10779 + "jno 0f\n"
10780 + LOCK_PREFIX "subq %2,%0\n"
10781 + "int $4\n0:\n"
10782 + _ASM_EXTABLE(0b, 0b)
10783 +#endif
10784 +
10785 + "sets %1\n"
10786 : "=m" (v->counter), "=qm" (c)
10787 : "er" (i), "m" (v->counter) : "memory");
10788 return c;
10789 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10790 */
10791 static inline long atomic64_add_return(long i, atomic64_t *v)
10792 {
10793 + return i + xadd_check_overflow(&v->counter, i);
10794 +}
10795 +
10796 +/**
10797 + * atomic64_add_return_unchecked - add and return
10798 + * @i: integer value to add
10799 + * @v: pointer to type atomic64_unchecked_t
10800 + *
10801 + * Atomically adds @i to @v and returns @i + @v
10802 + */
10803 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10804 +{
10805 return i + xadd(&v->counter, i);
10806 }
10807
10808 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10809 }
10810
10811 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10812 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10813 +{
10814 + return atomic64_add_return_unchecked(1, v);
10815 +}
10816 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10817
10818 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10819 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10820 return cmpxchg(&v->counter, old, new);
10821 }
10822
10823 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10824 +{
10825 + return cmpxchg(&v->counter, old, new);
10826 +}
10827 +
10828 static inline long atomic64_xchg(atomic64_t *v, long new)
10829 {
10830 return xchg(&v->counter, new);
10831 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10832 */
10833 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10834 {
10835 - long c, old;
10836 + long c, old, new;
10837 c = atomic64_read(v);
10838 for (;;) {
10839 - if (unlikely(c == (u)))
10840 + if (unlikely(c == u))
10841 break;
10842 - old = atomic64_cmpxchg((v), c, c + (a));
10843 +
10844 + asm volatile("add %2,%0\n"
10845 +
10846 +#ifdef CONFIG_PAX_REFCOUNT
10847 + "jno 0f\n"
10848 + "sub %2,%0\n"
10849 + "int $4\n0:\n"
10850 + _ASM_EXTABLE(0b, 0b)
10851 +#endif
10852 +
10853 + : "=r" (new)
10854 + : "0" (c), "ir" (a));
10855 +
10856 + old = atomic64_cmpxchg(v, c, new);
10857 if (likely(old == c))
10858 break;
10859 c = old;
10860 }
10861 - return c != (u);
10862 + return c != u;
10863 }
10864
10865 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10866 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10867 index 6dfd019..0c6699f 100644
10868 --- a/arch/x86/include/asm/bitops.h
10869 +++ b/arch/x86/include/asm/bitops.h
10870 @@ -40,7 +40,7 @@
10871 * a mask operation on a byte.
10872 */
10873 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10874 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10875 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10876 #define CONST_MASK(nr) (1 << ((nr) & 7))
10877
10878 /**
10879 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10880 index b13fe63..0dab13a 100644
10881 --- a/arch/x86/include/asm/boot.h
10882 +++ b/arch/x86/include/asm/boot.h
10883 @@ -11,10 +11,15 @@
10884 #include <asm/pgtable_types.h>
10885
10886 /* Physical address where kernel should be loaded. */
10887 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10888 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10889 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10890 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10891
10892 +#ifndef __ASSEMBLY__
10893 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10894 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10895 +#endif
10896 +
10897 /* Minimum kernel alignment, as a power of two */
10898 #ifdef CONFIG_X86_64
10899 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10900 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10901 index 48f99f1..d78ebf9 100644
10902 --- a/arch/x86/include/asm/cache.h
10903 +++ b/arch/x86/include/asm/cache.h
10904 @@ -5,12 +5,13 @@
10905
10906 /* L1 cache line size */
10907 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10908 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10909 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10910
10911 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10912 +#define __read_only __attribute__((__section__(".data..read_only")))
10913
10914 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10915 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10916 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10917
10918 #ifdef CONFIG_X86_VSMP
10919 #ifdef CONFIG_SMP
10920 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10921 index 9863ee3..4a1f8e1 100644
10922 --- a/arch/x86/include/asm/cacheflush.h
10923 +++ b/arch/x86/include/asm/cacheflush.h
10924 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10925 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10926
10927 if (pg_flags == _PGMT_DEFAULT)
10928 - return -1;
10929 + return ~0UL;
10930 else if (pg_flags == _PGMT_WC)
10931 return _PAGE_CACHE_WC;
10932 else if (pg_flags == _PGMT_UC_MINUS)
10933 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10934 index 46fc474..b02b0f9 100644
10935 --- a/arch/x86/include/asm/checksum_32.h
10936 +++ b/arch/x86/include/asm/checksum_32.h
10937 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10938 int len, __wsum sum,
10939 int *src_err_ptr, int *dst_err_ptr);
10940
10941 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10942 + int len, __wsum sum,
10943 + int *src_err_ptr, int *dst_err_ptr);
10944 +
10945 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10946 + int len, __wsum sum,
10947 + int *src_err_ptr, int *dst_err_ptr);
10948 +
10949 /*
10950 * Note: when you get a NULL pointer exception here this means someone
10951 * passed in an incorrect kernel address to one of these functions.
10952 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10953 int *err_ptr)
10954 {
10955 might_sleep();
10956 - return csum_partial_copy_generic((__force void *)src, dst,
10957 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10958 len, sum, err_ptr, NULL);
10959 }
10960
10961 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10962 {
10963 might_sleep();
10964 if (access_ok(VERIFY_WRITE, dst, len))
10965 - return csum_partial_copy_generic(src, (__force void *)dst,
10966 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10967 len, sum, NULL, err_ptr);
10968
10969 if (len)
10970 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10971 index 8d871ea..c1a0dc9 100644
10972 --- a/arch/x86/include/asm/cmpxchg.h
10973 +++ b/arch/x86/include/asm/cmpxchg.h
10974 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10975 __compiletime_error("Bad argument size for cmpxchg");
10976 extern void __xadd_wrong_size(void)
10977 __compiletime_error("Bad argument size for xadd");
10978 +extern void __xadd_check_overflow_wrong_size(void)
10979 + __compiletime_error("Bad argument size for xadd_check_overflow");
10980 extern void __add_wrong_size(void)
10981 __compiletime_error("Bad argument size for add");
10982 +extern void __add_check_overflow_wrong_size(void)
10983 + __compiletime_error("Bad argument size for add_check_overflow");
10984
10985 /*
10986 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10987 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10988 __ret; \
10989 })
10990
10991 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10992 + ({ \
10993 + __typeof__ (*(ptr)) __ret = (arg); \
10994 + switch (sizeof(*(ptr))) { \
10995 + case __X86_CASE_L: \
10996 + asm volatile (lock #op "l %0, %1\n" \
10997 + "jno 0f\n" \
10998 + "mov %0,%1\n" \
10999 + "int $4\n0:\n" \
11000 + _ASM_EXTABLE(0b, 0b) \
11001 + : "+r" (__ret), "+m" (*(ptr)) \
11002 + : : "memory", "cc"); \
11003 + break; \
11004 + case __X86_CASE_Q: \
11005 + asm volatile (lock #op "q %q0, %1\n" \
11006 + "jno 0f\n" \
11007 + "mov %0,%1\n" \
11008 + "int $4\n0:\n" \
11009 + _ASM_EXTABLE(0b, 0b) \
11010 + : "+r" (__ret), "+m" (*(ptr)) \
11011 + : : "memory", "cc"); \
11012 + break; \
11013 + default: \
11014 + __ ## op ## _check_overflow_wrong_size(); \
11015 + } \
11016 + __ret; \
11017 + })
11018 +
11019 /*
11020 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
11021 * Since this is generally used to protect other memory information, we
11022 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
11023 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
11024 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
11025
11026 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
11027 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
11028 +
11029 #define __add(ptr, inc, lock) \
11030 ({ \
11031 __typeof__ (*(ptr)) __ret = (inc); \
11032 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
11033 index 8c297aa..7a90f03 100644
11034 --- a/arch/x86/include/asm/cpufeature.h
11035 +++ b/arch/x86/include/asm/cpufeature.h
11036 @@ -205,7 +205,7 @@
11037 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
11038 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
11039 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
11040 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
11041 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
11042 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
11043 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
11044 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
11045 @@ -379,7 +379,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
11046 ".section .discard,\"aw\",@progbits\n"
11047 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
11048 ".previous\n"
11049 - ".section .altinstr_replacement,\"ax\"\n"
11050 + ".section .altinstr_replacement,\"a\"\n"
11051 "3: movb $1,%0\n"
11052 "4:\n"
11053 ".previous\n"
11054 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
11055 index 8bf1c06..f723dfd 100644
11056 --- a/arch/x86/include/asm/desc.h
11057 +++ b/arch/x86/include/asm/desc.h
11058 @@ -4,6 +4,7 @@
11059 #include <asm/desc_defs.h>
11060 #include <asm/ldt.h>
11061 #include <asm/mmu.h>
11062 +#include <asm/pgtable.h>
11063
11064 #include <linux/smp.h>
11065 #include <linux/percpu.h>
11066 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11067
11068 desc->type = (info->read_exec_only ^ 1) << 1;
11069 desc->type |= info->contents << 2;
11070 + desc->type |= info->seg_not_present ^ 1;
11071
11072 desc->s = 1;
11073 desc->dpl = 0x3;
11074 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11075 }
11076
11077 extern struct desc_ptr idt_descr;
11078 -extern gate_desc idt_table[];
11079 extern struct desc_ptr nmi_idt_descr;
11080 -extern gate_desc nmi_idt_table[];
11081 -
11082 -struct gdt_page {
11083 - struct desc_struct gdt[GDT_ENTRIES];
11084 -} __attribute__((aligned(PAGE_SIZE)));
11085 -
11086 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
11087 +extern gate_desc idt_table[256];
11088 +extern gate_desc nmi_idt_table[256];
11089
11090 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
11091 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
11092 {
11093 - return per_cpu(gdt_page, cpu).gdt;
11094 + return cpu_gdt_table[cpu];
11095 }
11096
11097 #ifdef CONFIG_X86_64
11098 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
11099 unsigned long base, unsigned dpl, unsigned flags,
11100 unsigned short seg)
11101 {
11102 - gate->a = (seg << 16) | (base & 0xffff);
11103 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
11104 + gate->gate.offset_low = base;
11105 + gate->gate.seg = seg;
11106 + gate->gate.reserved = 0;
11107 + gate->gate.type = type;
11108 + gate->gate.s = 0;
11109 + gate->gate.dpl = dpl;
11110 + gate->gate.p = 1;
11111 + gate->gate.offset_high = base >> 16;
11112 }
11113
11114 #endif
11115 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
11116
11117 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
11118 {
11119 + pax_open_kernel();
11120 memcpy(&idt[entry], gate, sizeof(*gate));
11121 + pax_close_kernel();
11122 }
11123
11124 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
11125 {
11126 + pax_open_kernel();
11127 memcpy(&ldt[entry], desc, 8);
11128 + pax_close_kernel();
11129 }
11130
11131 static inline void
11132 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
11133 default: size = sizeof(*gdt); break;
11134 }
11135
11136 + pax_open_kernel();
11137 memcpy(&gdt[entry], desc, size);
11138 + pax_close_kernel();
11139 }
11140
11141 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
11142 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
11143
11144 static inline void native_load_tr_desc(void)
11145 {
11146 + pax_open_kernel();
11147 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
11148 + pax_close_kernel();
11149 }
11150
11151 static inline void native_load_gdt(const struct desc_ptr *dtr)
11152 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
11153 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
11154 unsigned int i;
11155
11156 + pax_open_kernel();
11157 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
11158 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
11159 + pax_close_kernel();
11160 }
11161
11162 #define _LDT_empty(info) \
11163 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
11164 }
11165
11166 #ifdef CONFIG_X86_64
11167 -static inline void set_nmi_gate(int gate, void *addr)
11168 +static inline void set_nmi_gate(int gate, const void *addr)
11169 {
11170 gate_desc s;
11171
11172 @@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
11173 }
11174 #endif
11175
11176 -static inline void _set_gate(int gate, unsigned type, void *addr,
11177 +static inline void _set_gate(int gate, unsigned type, const void *addr,
11178 unsigned dpl, unsigned ist, unsigned seg)
11179 {
11180 gate_desc s;
11181 @@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
11182 * Pentium F0 0F bugfix can have resulted in the mapped
11183 * IDT being write-protected.
11184 */
11185 -static inline void set_intr_gate(unsigned int n, void *addr)
11186 +static inline void set_intr_gate(unsigned int n, const void *addr)
11187 {
11188 BUG_ON((unsigned)n > 0xFF);
11189 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
11190 @@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
11191 /*
11192 * This routine sets up an interrupt gate at directory privilege level 3.
11193 */
11194 -static inline void set_system_intr_gate(unsigned int n, void *addr)
11195 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
11196 {
11197 BUG_ON((unsigned)n > 0xFF);
11198 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
11199 }
11200
11201 -static inline void set_system_trap_gate(unsigned int n, void *addr)
11202 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
11203 {
11204 BUG_ON((unsigned)n > 0xFF);
11205 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
11206 }
11207
11208 -static inline void set_trap_gate(unsigned int n, void *addr)
11209 +static inline void set_trap_gate(unsigned int n, const void *addr)
11210 {
11211 BUG_ON((unsigned)n > 0xFF);
11212 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
11213 @@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
11214 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
11215 {
11216 BUG_ON((unsigned)n > 0xFF);
11217 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
11218 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
11219 }
11220
11221 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
11222 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
11223 {
11224 BUG_ON((unsigned)n > 0xFF);
11225 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
11226 }
11227
11228 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
11229 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
11230 {
11231 BUG_ON((unsigned)n > 0xFF);
11232 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
11233 }
11234
11235 +#ifdef CONFIG_X86_32
11236 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
11237 +{
11238 + struct desc_struct d;
11239 +
11240 + if (likely(limit))
11241 + limit = (limit - 1UL) >> PAGE_SHIFT;
11242 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
11243 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
11244 +}
11245 +#endif
11246 +
11247 #endif /* _ASM_X86_DESC_H */
11248 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
11249 index 278441f..b95a174 100644
11250 --- a/arch/x86/include/asm/desc_defs.h
11251 +++ b/arch/x86/include/asm/desc_defs.h
11252 @@ -31,6 +31,12 @@ struct desc_struct {
11253 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
11254 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
11255 };
11256 + struct {
11257 + u16 offset_low;
11258 + u16 seg;
11259 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
11260 + unsigned offset_high: 16;
11261 + } gate;
11262 };
11263 } __attribute__((packed));
11264
11265 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11266 index 3778256..c5d4fce 100644
11267 --- a/arch/x86/include/asm/e820.h
11268 +++ b/arch/x86/include/asm/e820.h
11269 @@ -69,7 +69,7 @@ struct e820map {
11270 #define ISA_START_ADDRESS 0xa0000
11271 #define ISA_END_ADDRESS 0x100000
11272
11273 -#define BIOS_BEGIN 0x000a0000
11274 +#define BIOS_BEGIN 0x000c0000
11275 #define BIOS_END 0x00100000
11276
11277 #define BIOS_ROM_BASE 0xffe00000
11278 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11279 index 5939f44..f8845f6 100644
11280 --- a/arch/x86/include/asm/elf.h
11281 +++ b/arch/x86/include/asm/elf.h
11282 @@ -243,7 +243,25 @@ extern int force_personality32;
11283 the loader. We need to make sure that it is out of the way of the program
11284 that it will "exec", and that there is sufficient room for the brk. */
11285
11286 +#ifdef CONFIG_PAX_SEGMEXEC
11287 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11288 +#else
11289 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11290 +#endif
11291 +
11292 +#ifdef CONFIG_PAX_ASLR
11293 +#ifdef CONFIG_X86_32
11294 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11295 +
11296 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11297 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11298 +#else
11299 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
11300 +
11301 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11302 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11303 +#endif
11304 +#endif
11305
11306 /* This yields a mask that user programs can use to figure out what
11307 instruction set this CPU supports. This could be done in user space,
11308 @@ -296,16 +314,12 @@ do { \
11309
11310 #define ARCH_DLINFO \
11311 do { \
11312 - if (vdso_enabled) \
11313 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11314 - (unsigned long)current->mm->context.vdso); \
11315 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11316 } while (0)
11317
11318 #define ARCH_DLINFO_X32 \
11319 do { \
11320 - if (vdso_enabled) \
11321 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11322 - (unsigned long)current->mm->context.vdso); \
11323 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11324 } while (0)
11325
11326 #define AT_SYSINFO 32
11327 @@ -320,7 +334,7 @@ else \
11328
11329 #endif /* !CONFIG_X86_32 */
11330
11331 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11332 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11333
11334 #define VDSO_ENTRY \
11335 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11336 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
11337 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11338 #define compat_arch_setup_additional_pages syscall32_setup_pages
11339
11340 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11341 -#define arch_randomize_brk arch_randomize_brk
11342 -
11343 /*
11344 * True on X86_32 or when emulating IA32 on X86_64
11345 */
11346 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11347 index 75ce3f4..882e801 100644
11348 --- a/arch/x86/include/asm/emergency-restart.h
11349 +++ b/arch/x86/include/asm/emergency-restart.h
11350 @@ -13,6 +13,6 @@ enum reboot_type {
11351
11352 extern enum reboot_type reboot_type;
11353
11354 -extern void machine_emergency_restart(void);
11355 +extern void machine_emergency_restart(void) __noreturn;
11356
11357 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11358 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
11359 index 41ab26e..a88c9e6 100644
11360 --- a/arch/x86/include/asm/fpu-internal.h
11361 +++ b/arch/x86/include/asm/fpu-internal.h
11362 @@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
11363 ({ \
11364 int err; \
11365 asm volatile(ASM_STAC "\n" \
11366 - "1:" #insn "\n\t" \
11367 + "1:" \
11368 + __copyuser_seg \
11369 + #insn "\n\t" \
11370 "2: " ASM_CLAC "\n" \
11371 ".section .fixup,\"ax\"\n" \
11372 "3: movl $-1,%[err]\n" \
11373 @@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
11374 "emms\n\t" /* clear stack tags */
11375 "fildl %P[addr]", /* set F?P to defined value */
11376 X86_FEATURE_FXSAVE_LEAK,
11377 - [addr] "m" (tsk->thread.fpu.has_fpu));
11378 + [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
11379
11380 return fpu_restore_checking(&tsk->thread.fpu);
11381 }
11382 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11383 index f373046..02653e2 100644
11384 --- a/arch/x86/include/asm/futex.h
11385 +++ b/arch/x86/include/asm/futex.h
11386 @@ -12,6 +12,7 @@
11387 #include <asm/smap.h>
11388
11389 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11390 + typecheck(u32 __user *, uaddr); \
11391 asm volatile("\t" ASM_STAC "\n" \
11392 "1:\t" insn "\n" \
11393 "2:\t" ASM_CLAC "\n" \
11394 @@ -20,15 +21,16 @@
11395 "\tjmp\t2b\n" \
11396 "\t.previous\n" \
11397 _ASM_EXTABLE(1b, 3b) \
11398 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11399 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
11400 : "i" (-EFAULT), "0" (oparg), "1" (0))
11401
11402 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11403 + typecheck(u32 __user *, uaddr); \
11404 asm volatile("\t" ASM_STAC "\n" \
11405 "1:\tmovl %2, %0\n" \
11406 "\tmovl\t%0, %3\n" \
11407 "\t" insn "\n" \
11408 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
11409 + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
11410 "\tjnz\t1b\n" \
11411 "3:\t" ASM_CLAC "\n" \
11412 "\t.section .fixup,\"ax\"\n" \
11413 @@ -38,7 +40,7 @@
11414 _ASM_EXTABLE(1b, 4b) \
11415 _ASM_EXTABLE(2b, 4b) \
11416 : "=&a" (oldval), "=&r" (ret), \
11417 - "+m" (*uaddr), "=&r" (tem) \
11418 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11419 : "r" (oparg), "i" (-EFAULT), "1" (0))
11420
11421 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11422 @@ -65,10 +67,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11423
11424 switch (op) {
11425 case FUTEX_OP_SET:
11426 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11427 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11428 break;
11429 case FUTEX_OP_ADD:
11430 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11431 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11432 uaddr, oparg);
11433 break;
11434 case FUTEX_OP_OR:
11435 @@ -128,14 +130,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
11436 return -EFAULT;
11437
11438 asm volatile("\t" ASM_STAC "\n"
11439 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
11440 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
11441 "2:\t" ASM_CLAC "\n"
11442 "\t.section .fixup, \"ax\"\n"
11443 "3:\tmov %3, %0\n"
11444 "\tjmp 2b\n"
11445 "\t.previous\n"
11446 _ASM_EXTABLE(1b, 3b)
11447 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
11448 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
11449 : "i" (-EFAULT), "r" (newval), "1" (oldval)
11450 : "memory"
11451 );
11452 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11453 index eb92a6e..b98b2f4 100644
11454 --- a/arch/x86/include/asm/hw_irq.h
11455 +++ b/arch/x86/include/asm/hw_irq.h
11456 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
11457 extern void enable_IO_APIC(void);
11458
11459 /* Statistics */
11460 -extern atomic_t irq_err_count;
11461 -extern atomic_t irq_mis_count;
11462 +extern atomic_unchecked_t irq_err_count;
11463 +extern atomic_unchecked_t irq_mis_count;
11464
11465 /* EISA */
11466 extern void eisa_set_level_irq(unsigned int irq);
11467 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
11468 index d8e8eef..15b1179 100644
11469 --- a/arch/x86/include/asm/io.h
11470 +++ b/arch/x86/include/asm/io.h
11471 @@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
11472 return ioremap_nocache(offset, size);
11473 }
11474
11475 -extern void iounmap(volatile void __iomem *addr);
11476 +extern void iounmap(const volatile void __iomem *addr);
11477
11478 extern void set_iounmap_nonlazy(void);
11479
11480 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
11481
11482 #include <linux/vmalloc.h>
11483
11484 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11485 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11486 +{
11487 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11488 +}
11489 +
11490 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11491 +{
11492 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11493 +}
11494 +
11495 /*
11496 * Convert a virtual cached pointer to an uncached pointer
11497 */
11498 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11499 index bba3cf8..06bc8da 100644
11500 --- a/arch/x86/include/asm/irqflags.h
11501 +++ b/arch/x86/include/asm/irqflags.h
11502 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
11503 sti; \
11504 sysexit
11505
11506 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
11507 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11508 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
11509 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11510 +
11511 #else
11512 #define INTERRUPT_RETURN iret
11513 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11514 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11515 index d3ddd17..c9fb0cc 100644
11516 --- a/arch/x86/include/asm/kprobes.h
11517 +++ b/arch/x86/include/asm/kprobes.h
11518 @@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
11519 #define RELATIVEJUMP_SIZE 5
11520 #define RELATIVECALL_OPCODE 0xe8
11521 #define RELATIVE_ADDR_SIZE 4
11522 -#define MAX_STACK_SIZE 64
11523 -#define MIN_STACK_SIZE(ADDR) \
11524 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11525 - THREAD_SIZE - (unsigned long)(ADDR))) \
11526 - ? (MAX_STACK_SIZE) \
11527 - : (((unsigned long)current_thread_info()) + \
11528 - THREAD_SIZE - (unsigned long)(ADDR)))
11529 +#define MAX_STACK_SIZE 64UL
11530 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11531
11532 #define flush_insn_slot(p) do { } while (0)
11533
11534 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11535 index b2e11f4..f293e2e 100644
11536 --- a/arch/x86/include/asm/kvm_host.h
11537 +++ b/arch/x86/include/asm/kvm_host.h
11538 @@ -707,7 +707,7 @@ struct kvm_x86_ops {
11539 int (*check_intercept)(struct kvm_vcpu *vcpu,
11540 struct x86_instruction_info *info,
11541 enum x86_intercept_stage stage);
11542 -};
11543 +} __do_const;
11544
11545 struct kvm_arch_async_pf {
11546 u32 token;
11547 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11548 index c8bed0d..85c03fd 100644
11549 --- a/arch/x86/include/asm/local.h
11550 +++ b/arch/x86/include/asm/local.h
11551 @@ -10,33 +10,97 @@ typedef struct {
11552 atomic_long_t a;
11553 } local_t;
11554
11555 +typedef struct {
11556 + atomic_long_unchecked_t a;
11557 +} local_unchecked_t;
11558 +
11559 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
11560
11561 #define local_read(l) atomic_long_read(&(l)->a)
11562 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
11563 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
11564 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
11565
11566 static inline void local_inc(local_t *l)
11567 {
11568 - asm volatile(_ASM_INC "%0"
11569 + asm volatile(_ASM_INC "%0\n"
11570 +
11571 +#ifdef CONFIG_PAX_REFCOUNT
11572 + "jno 0f\n"
11573 + _ASM_DEC "%0\n"
11574 + "int $4\n0:\n"
11575 + _ASM_EXTABLE(0b, 0b)
11576 +#endif
11577 +
11578 + : "+m" (l->a.counter));
11579 +}
11580 +
11581 +static inline void local_inc_unchecked(local_unchecked_t *l)
11582 +{
11583 + asm volatile(_ASM_INC "%0\n"
11584 : "+m" (l->a.counter));
11585 }
11586
11587 static inline void local_dec(local_t *l)
11588 {
11589 - asm volatile(_ASM_DEC "%0"
11590 + asm volatile(_ASM_DEC "%0\n"
11591 +
11592 +#ifdef CONFIG_PAX_REFCOUNT
11593 + "jno 0f\n"
11594 + _ASM_INC "%0\n"
11595 + "int $4\n0:\n"
11596 + _ASM_EXTABLE(0b, 0b)
11597 +#endif
11598 +
11599 + : "+m" (l->a.counter));
11600 +}
11601 +
11602 +static inline void local_dec_unchecked(local_unchecked_t *l)
11603 +{
11604 + asm volatile(_ASM_DEC "%0\n"
11605 : "+m" (l->a.counter));
11606 }
11607
11608 static inline void local_add(long i, local_t *l)
11609 {
11610 - asm volatile(_ASM_ADD "%1,%0"
11611 + asm volatile(_ASM_ADD "%1,%0\n"
11612 +
11613 +#ifdef CONFIG_PAX_REFCOUNT
11614 + "jno 0f\n"
11615 + _ASM_SUB "%1,%0\n"
11616 + "int $4\n0:\n"
11617 + _ASM_EXTABLE(0b, 0b)
11618 +#endif
11619 +
11620 + : "+m" (l->a.counter)
11621 + : "ir" (i));
11622 +}
11623 +
11624 +static inline void local_add_unchecked(long i, local_unchecked_t *l)
11625 +{
11626 + asm volatile(_ASM_ADD "%1,%0\n"
11627 : "+m" (l->a.counter)
11628 : "ir" (i));
11629 }
11630
11631 static inline void local_sub(long i, local_t *l)
11632 {
11633 - asm volatile(_ASM_SUB "%1,%0"
11634 + asm volatile(_ASM_SUB "%1,%0\n"
11635 +
11636 +#ifdef CONFIG_PAX_REFCOUNT
11637 + "jno 0f\n"
11638 + _ASM_ADD "%1,%0\n"
11639 + "int $4\n0:\n"
11640 + _ASM_EXTABLE(0b, 0b)
11641 +#endif
11642 +
11643 + : "+m" (l->a.counter)
11644 + : "ir" (i));
11645 +}
11646 +
11647 +static inline void local_sub_unchecked(long i, local_unchecked_t *l)
11648 +{
11649 + asm volatile(_ASM_SUB "%1,%0\n"
11650 : "+m" (l->a.counter)
11651 : "ir" (i));
11652 }
11653 @@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11654 {
11655 unsigned char c;
11656
11657 - asm volatile(_ASM_SUB "%2,%0; sete %1"
11658 + asm volatile(_ASM_SUB "%2,%0\n"
11659 +
11660 +#ifdef CONFIG_PAX_REFCOUNT
11661 + "jno 0f\n"
11662 + _ASM_ADD "%2,%0\n"
11663 + "int $4\n0:\n"
11664 + _ASM_EXTABLE(0b, 0b)
11665 +#endif
11666 +
11667 + "sete %1\n"
11668 : "+m" (l->a.counter), "=qm" (c)
11669 : "ir" (i) : "memory");
11670 return c;
11671 @@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
11672 {
11673 unsigned char c;
11674
11675 - asm volatile(_ASM_DEC "%0; sete %1"
11676 + asm volatile(_ASM_DEC "%0\n"
11677 +
11678 +#ifdef CONFIG_PAX_REFCOUNT
11679 + "jno 0f\n"
11680 + _ASM_INC "%0\n"
11681 + "int $4\n0:\n"
11682 + _ASM_EXTABLE(0b, 0b)
11683 +#endif
11684 +
11685 + "sete %1\n"
11686 : "+m" (l->a.counter), "=qm" (c)
11687 : : "memory");
11688 return c != 0;
11689 @@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
11690 {
11691 unsigned char c;
11692
11693 - asm volatile(_ASM_INC "%0; sete %1"
11694 + asm volatile(_ASM_INC "%0\n"
11695 +
11696 +#ifdef CONFIG_PAX_REFCOUNT
11697 + "jno 0f\n"
11698 + _ASM_DEC "%0\n"
11699 + "int $4\n0:\n"
11700 + _ASM_EXTABLE(0b, 0b)
11701 +#endif
11702 +
11703 + "sete %1\n"
11704 : "+m" (l->a.counter), "=qm" (c)
11705 : : "memory");
11706 return c != 0;
11707 @@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
11708 {
11709 unsigned char c;
11710
11711 - asm volatile(_ASM_ADD "%2,%0; sets %1"
11712 + asm volatile(_ASM_ADD "%2,%0\n"
11713 +
11714 +#ifdef CONFIG_PAX_REFCOUNT
11715 + "jno 0f\n"
11716 + _ASM_SUB "%2,%0\n"
11717 + "int $4\n0:\n"
11718 + _ASM_EXTABLE(0b, 0b)
11719 +#endif
11720 +
11721 + "sets %1\n"
11722 : "+m" (l->a.counter), "=qm" (c)
11723 : "ir" (i) : "memory");
11724 return c;
11725 @@ -132,7 +232,15 @@ static inline long local_add_return(long i, local_t *l)
11726 #endif
11727 /* Modern 486+ processor */
11728 __i = i;
11729 - asm volatile(_ASM_XADD "%0, %1;"
11730 + asm volatile(_ASM_XADD "%0, %1\n"
11731 +
11732 +#ifdef CONFIG_PAX_REFCOUNT
11733 + "jno 0f\n"
11734 + _ASM_MOV "%0,%1\n"
11735 + "int $4\n0:\n"
11736 + _ASM_EXTABLE(0b, 0b)
11737 +#endif
11738 +
11739 : "+r" (i), "+m" (l->a.counter)
11740 : : "memory");
11741 return i + __i;
11742 @@ -147,6 +255,38 @@ no_xadd: /* Legacy 386 processor */
11743 #endif
11744 }
11745
11746 +/**
11747 + * local_add_return_unchecked - add and return
11748 + * @i: integer value to add
11749 + * @l: pointer to type local_unchecked_t
11750 + *
11751 + * Atomically adds @i to @l and returns @i + @l
11752 + */
11753 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
11754 +{
11755 + long __i;
11756 +#ifdef CONFIG_M386
11757 + unsigned long flags;
11758 + if (unlikely(boot_cpu_data.x86 <= 3))
11759 + goto no_xadd;
11760 +#endif
11761 + /* Modern 486+ processor */
11762 + __i = i;
11763 + asm volatile(_ASM_XADD "%0, %1\n"
11764 + : "+r" (i), "+m" (l->a.counter)
11765 + : : "memory");
11766 + return i + __i;
11767 +
11768 +#ifdef CONFIG_M386
11769 +no_xadd: /* Legacy 386 processor */
11770 + local_irq_save(flags);
11771 + __i = local_read_unchecked(l);
11772 + local_set_unchecked(l, i + __i);
11773 + local_irq_restore(flags);
11774 + return i + __i;
11775 +#endif
11776 +}
11777 +
11778 static inline long local_sub_return(long i, local_t *l)
11779 {
11780 return local_add_return(-i, l);
11781 @@ -157,6 +297,8 @@ static inline long local_sub_return(long i, local_t *l)
11782
11783 #define local_cmpxchg(l, o, n) \
11784 (cmpxchg_local(&((l)->a.counter), (o), (n)))
11785 +#define local_cmpxchg_unchecked(l, o, n) \
11786 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
11787 /* Always has a lock prefix */
11788 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
11789
11790 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11791 index 593e51d..fa69c9a 100644
11792 --- a/arch/x86/include/asm/mman.h
11793 +++ b/arch/x86/include/asm/mman.h
11794 @@ -5,4 +5,14 @@
11795
11796 #include <asm-generic/mman.h>
11797
11798 +#ifdef __KERNEL__
11799 +#ifndef __ASSEMBLY__
11800 +#ifdef CONFIG_X86_32
11801 +#define arch_mmap_check i386_mmap_check
11802 +int i386_mmap_check(unsigned long addr, unsigned long len,
11803 + unsigned long flags);
11804 +#endif
11805 +#endif
11806 +#endif
11807 +
11808 #endif /* _ASM_X86_MMAN_H */
11809 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11810 index 5f55e69..e20bfb1 100644
11811 --- a/arch/x86/include/asm/mmu.h
11812 +++ b/arch/x86/include/asm/mmu.h
11813 @@ -9,7 +9,7 @@
11814 * we put the segment information here.
11815 */
11816 typedef struct {
11817 - void *ldt;
11818 + struct desc_struct *ldt;
11819 int size;
11820
11821 #ifdef CONFIG_X86_64
11822 @@ -18,7 +18,19 @@ typedef struct {
11823 #endif
11824
11825 struct mutex lock;
11826 - void *vdso;
11827 + unsigned long vdso;
11828 +
11829 +#ifdef CONFIG_X86_32
11830 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11831 + unsigned long user_cs_base;
11832 + unsigned long user_cs_limit;
11833 +
11834 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11835 + cpumask_t cpu_user_cs_mask;
11836 +#endif
11837 +
11838 +#endif
11839 +#endif
11840 } mm_context_t;
11841
11842 #ifdef CONFIG_SMP
11843 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11844 index cdbf367..adb37ac 100644
11845 --- a/arch/x86/include/asm/mmu_context.h
11846 +++ b/arch/x86/include/asm/mmu_context.h
11847 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11848
11849 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11850 {
11851 +
11852 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11853 + unsigned int i;
11854 + pgd_t *pgd;
11855 +
11856 + pax_open_kernel();
11857 + pgd = get_cpu_pgd(smp_processor_id());
11858 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11859 + set_pgd_batched(pgd+i, native_make_pgd(0));
11860 + pax_close_kernel();
11861 +#endif
11862 +
11863 #ifdef CONFIG_SMP
11864 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11865 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11866 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11867 struct task_struct *tsk)
11868 {
11869 unsigned cpu = smp_processor_id();
11870 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11871 + int tlbstate = TLBSTATE_OK;
11872 +#endif
11873
11874 if (likely(prev != next)) {
11875 #ifdef CONFIG_SMP
11876 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11877 + tlbstate = this_cpu_read(cpu_tlbstate.state);
11878 +#endif
11879 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11880 this_cpu_write(cpu_tlbstate.active_mm, next);
11881 #endif
11882 cpumask_set_cpu(cpu, mm_cpumask(next));
11883
11884 /* Re-load page tables */
11885 +#ifdef CONFIG_PAX_PER_CPU_PGD
11886 + pax_open_kernel();
11887 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11888 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11889 + pax_close_kernel();
11890 + load_cr3(get_cpu_pgd(cpu));
11891 +#else
11892 load_cr3(next->pgd);
11893 +#endif
11894
11895 /* stop flush ipis for the previous mm */
11896 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11897 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11898 */
11899 if (unlikely(prev->context.ldt != next->context.ldt))
11900 load_LDT_nolock(&next->context);
11901 - }
11902 +
11903 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11904 + if (!(__supported_pte_mask & _PAGE_NX)) {
11905 + smp_mb__before_clear_bit();
11906 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11907 + smp_mb__after_clear_bit();
11908 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11909 + }
11910 +#endif
11911 +
11912 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11913 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11914 + prev->context.user_cs_limit != next->context.user_cs_limit))
11915 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11916 #ifdef CONFIG_SMP
11917 + else if (unlikely(tlbstate != TLBSTATE_OK))
11918 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11919 +#endif
11920 +#endif
11921 +
11922 + }
11923 else {
11924 +
11925 +#ifdef CONFIG_PAX_PER_CPU_PGD
11926 + pax_open_kernel();
11927 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11928 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11929 + pax_close_kernel();
11930 + load_cr3(get_cpu_pgd(cpu));
11931 +#endif
11932 +
11933 +#ifdef CONFIG_SMP
11934 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11935 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
11936
11937 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11938 * tlb flush IPI delivery. We must reload CR3
11939 * to make sure to use no freed page tables.
11940 */
11941 +
11942 +#ifndef CONFIG_PAX_PER_CPU_PGD
11943 load_cr3(next->pgd);
11944 +#endif
11945 +
11946 load_LDT_nolock(&next->context);
11947 +
11948 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11949 + if (!(__supported_pte_mask & _PAGE_NX))
11950 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11951 +#endif
11952 +
11953 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11954 +#ifdef CONFIG_PAX_PAGEEXEC
11955 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11956 +#endif
11957 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11958 +#endif
11959 +
11960 }
11961 +#endif
11962 }
11963 -#endif
11964 }
11965
11966 #define activate_mm(prev, next) \
11967 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11968 index 9eae775..c914fea 100644
11969 --- a/arch/x86/include/asm/module.h
11970 +++ b/arch/x86/include/asm/module.h
11971 @@ -5,6 +5,7 @@
11972
11973 #ifdef CONFIG_X86_64
11974 /* X86_64 does not define MODULE_PROC_FAMILY */
11975 +#define MODULE_PROC_FAMILY ""
11976 #elif defined CONFIG_M386
11977 #define MODULE_PROC_FAMILY "386 "
11978 #elif defined CONFIG_M486
11979 @@ -59,8 +60,20 @@
11980 #error unknown processor family
11981 #endif
11982
11983 -#ifdef CONFIG_X86_32
11984 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11985 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11986 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11987 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11988 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11989 +#else
11990 +#define MODULE_PAX_KERNEXEC ""
11991 #endif
11992
11993 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11994 +#define MODULE_PAX_UDEREF "UDEREF "
11995 +#else
11996 +#define MODULE_PAX_UDEREF ""
11997 +#endif
11998 +
11999 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
12000 +
12001 #endif /* _ASM_X86_MODULE_H */
12002 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
12003 index 320f7bb..e89f8f8 100644
12004 --- a/arch/x86/include/asm/page_64_types.h
12005 +++ b/arch/x86/include/asm/page_64_types.h
12006 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
12007
12008 /* duplicated to the one in bootmem.h */
12009 extern unsigned long max_pfn;
12010 -extern unsigned long phys_base;
12011 +extern const unsigned long phys_base;
12012
12013 extern unsigned long __phys_addr(unsigned long);
12014 #define __phys_reloc_hide(x) (x)
12015 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
12016 index a0facf3..c017b15 100644
12017 --- a/arch/x86/include/asm/paravirt.h
12018 +++ b/arch/x86/include/asm/paravirt.h
12019 @@ -632,6 +632,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
12020 val);
12021 }
12022
12023 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12024 +{
12025 + pgdval_t val = native_pgd_val(pgd);
12026 +
12027 + if (sizeof(pgdval_t) > sizeof(long))
12028 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
12029 + val, (u64)val >> 32);
12030 + else
12031 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
12032 + val);
12033 +}
12034 +
12035 static inline void pgd_clear(pgd_t *pgdp)
12036 {
12037 set_pgd(pgdp, __pgd(0));
12038 @@ -713,6 +725,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
12039 pv_mmu_ops.set_fixmap(idx, phys, flags);
12040 }
12041
12042 +#ifdef CONFIG_PAX_KERNEXEC
12043 +static inline unsigned long pax_open_kernel(void)
12044 +{
12045 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
12046 +}
12047 +
12048 +static inline unsigned long pax_close_kernel(void)
12049 +{
12050 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
12051 +}
12052 +#else
12053 +static inline unsigned long pax_open_kernel(void) { return 0; }
12054 +static inline unsigned long pax_close_kernel(void) { return 0; }
12055 +#endif
12056 +
12057 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
12058
12059 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
12060 @@ -929,7 +956,7 @@ extern void default_banner(void);
12061
12062 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
12063 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
12064 -#define PARA_INDIRECT(addr) *%cs:addr
12065 +#define PARA_INDIRECT(addr) *%ss:addr
12066 #endif
12067
12068 #define INTERRUPT_RETURN \
12069 @@ -1004,6 +1031,21 @@ extern void default_banner(void);
12070 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
12071 CLBR_NONE, \
12072 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
12073 +
12074 +#define GET_CR0_INTO_RDI \
12075 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
12076 + mov %rax,%rdi
12077 +
12078 +#define SET_RDI_INTO_CR0 \
12079 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12080 +
12081 +#define GET_CR3_INTO_RDI \
12082 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
12083 + mov %rax,%rdi
12084 +
12085 +#define SET_RDI_INTO_CR3 \
12086 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
12087 +
12088 #endif /* CONFIG_X86_32 */
12089
12090 #endif /* __ASSEMBLY__ */
12091 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
12092 index 142236e..6a6b4a6 100644
12093 --- a/arch/x86/include/asm/paravirt_types.h
12094 +++ b/arch/x86/include/asm/paravirt_types.h
12095 @@ -84,20 +84,20 @@ struct pv_init_ops {
12096 */
12097 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
12098 unsigned long addr, unsigned len);
12099 -};
12100 +} __no_const;
12101
12102
12103 struct pv_lazy_ops {
12104 /* Set deferred update mode, used for batching operations. */
12105 void (*enter)(void);
12106 void (*leave)(void);
12107 -};
12108 +} __no_const;
12109
12110 struct pv_time_ops {
12111 unsigned long long (*sched_clock)(void);
12112 unsigned long long (*steal_clock)(int cpu);
12113 unsigned long (*get_tsc_khz)(void);
12114 -};
12115 +} __no_const;
12116
12117 struct pv_cpu_ops {
12118 /* hooks for various privileged instructions */
12119 @@ -191,7 +191,7 @@ struct pv_cpu_ops {
12120
12121 void (*start_context_switch)(struct task_struct *prev);
12122 void (*end_context_switch)(struct task_struct *next);
12123 -};
12124 +} __no_const;
12125
12126 struct pv_irq_ops {
12127 /*
12128 @@ -222,7 +222,7 @@ struct pv_apic_ops {
12129 unsigned long start_eip,
12130 unsigned long start_esp);
12131 #endif
12132 -};
12133 +} __no_const;
12134
12135 struct pv_mmu_ops {
12136 unsigned long (*read_cr2)(void);
12137 @@ -312,6 +312,7 @@ struct pv_mmu_ops {
12138 struct paravirt_callee_save make_pud;
12139
12140 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12141 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12142 #endif /* PAGETABLE_LEVELS == 4 */
12143 #endif /* PAGETABLE_LEVELS >= 3 */
12144
12145 @@ -323,6 +324,12 @@ struct pv_mmu_ops {
12146 an mfn. We can tell which is which from the index. */
12147 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12148 phys_addr_t phys, pgprot_t flags);
12149 +
12150 +#ifdef CONFIG_PAX_KERNEXEC
12151 + unsigned long (*pax_open_kernel)(void);
12152 + unsigned long (*pax_close_kernel)(void);
12153 +#endif
12154 +
12155 };
12156
12157 struct arch_spinlock;
12158 @@ -333,7 +340,7 @@ struct pv_lock_ops {
12159 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
12160 int (*spin_trylock)(struct arch_spinlock *lock);
12161 void (*spin_unlock)(struct arch_spinlock *lock);
12162 -};
12163 +} __no_const;
12164
12165 /* This contains all the paravirt structures: we get a convenient
12166 * number for each function using the offset which we use to indicate
12167 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12168 index b4389a4..7024269 100644
12169 --- a/arch/x86/include/asm/pgalloc.h
12170 +++ b/arch/x86/include/asm/pgalloc.h
12171 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12172 pmd_t *pmd, pte_t *pte)
12173 {
12174 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12175 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12176 +}
12177 +
12178 +static inline void pmd_populate_user(struct mm_struct *mm,
12179 + pmd_t *pmd, pte_t *pte)
12180 +{
12181 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12182 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12183 }
12184
12185 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
12186
12187 #ifdef CONFIG_X86_PAE
12188 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
12189 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
12190 +{
12191 + pud_populate(mm, pudp, pmd);
12192 +}
12193 #else /* !CONFIG_X86_PAE */
12194 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12195 {
12196 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12197 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
12198 }
12199 +
12200 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12201 +{
12202 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12203 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
12204 +}
12205 #endif /* CONFIG_X86_PAE */
12206
12207 #if PAGETABLE_LEVELS > 3
12208 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12209 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
12210 }
12211
12212 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12213 +{
12214 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
12215 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
12216 +}
12217 +
12218 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
12219 {
12220 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
12221 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12222 index f2b489c..4f7e2e5 100644
12223 --- a/arch/x86/include/asm/pgtable-2level.h
12224 +++ b/arch/x86/include/asm/pgtable-2level.h
12225 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12226
12227 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12228 {
12229 + pax_open_kernel();
12230 *pmdp = pmd;
12231 + pax_close_kernel();
12232 }
12233
12234 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12235 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12236 index 4cc9f2b..5fd9226 100644
12237 --- a/arch/x86/include/asm/pgtable-3level.h
12238 +++ b/arch/x86/include/asm/pgtable-3level.h
12239 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12240
12241 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12242 {
12243 + pax_open_kernel();
12244 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12245 + pax_close_kernel();
12246 }
12247
12248 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12249 {
12250 + pax_open_kernel();
12251 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12252 + pax_close_kernel();
12253 }
12254
12255 /*
12256 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12257 index a1f780d..5f38ced4 100644
12258 --- a/arch/x86/include/asm/pgtable.h
12259 +++ b/arch/x86/include/asm/pgtable.h
12260 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12261
12262 #ifndef __PAGETABLE_PUD_FOLDED
12263 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12264 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12265 #define pgd_clear(pgd) native_pgd_clear(pgd)
12266 #endif
12267
12268 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12269
12270 #define arch_end_context_switch(prev) do {} while(0)
12271
12272 +#define pax_open_kernel() native_pax_open_kernel()
12273 +#define pax_close_kernel() native_pax_close_kernel()
12274 #endif /* CONFIG_PARAVIRT */
12275
12276 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
12277 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12278 +
12279 +#ifdef CONFIG_PAX_KERNEXEC
12280 +static inline unsigned long native_pax_open_kernel(void)
12281 +{
12282 + unsigned long cr0;
12283 +
12284 + preempt_disable();
12285 + barrier();
12286 + cr0 = read_cr0() ^ X86_CR0_WP;
12287 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
12288 + write_cr0(cr0);
12289 + return cr0 ^ X86_CR0_WP;
12290 +}
12291 +
12292 +static inline unsigned long native_pax_close_kernel(void)
12293 +{
12294 + unsigned long cr0;
12295 +
12296 + cr0 = read_cr0() ^ X86_CR0_WP;
12297 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12298 + write_cr0(cr0);
12299 + barrier();
12300 + preempt_enable_no_resched();
12301 + return cr0 ^ X86_CR0_WP;
12302 +}
12303 +#else
12304 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
12305 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
12306 +#endif
12307 +
12308 /*
12309 * The following only work if pte_present() is true.
12310 * Undefined behaviour if not..
12311 */
12312 +static inline int pte_user(pte_t pte)
12313 +{
12314 + return pte_val(pte) & _PAGE_USER;
12315 +}
12316 +
12317 static inline int pte_dirty(pte_t pte)
12318 {
12319 return pte_flags(pte) & _PAGE_DIRTY;
12320 @@ -195,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12321 return pte_clear_flags(pte, _PAGE_RW);
12322 }
12323
12324 +static inline pte_t pte_mkread(pte_t pte)
12325 +{
12326 + return __pte(pte_val(pte) | _PAGE_USER);
12327 +}
12328 +
12329 static inline pte_t pte_mkexec(pte_t pte)
12330 {
12331 - return pte_clear_flags(pte, _PAGE_NX);
12332 +#ifdef CONFIG_X86_PAE
12333 + if (__supported_pte_mask & _PAGE_NX)
12334 + return pte_clear_flags(pte, _PAGE_NX);
12335 + else
12336 +#endif
12337 + return pte_set_flags(pte, _PAGE_USER);
12338 +}
12339 +
12340 +static inline pte_t pte_exprotect(pte_t pte)
12341 +{
12342 +#ifdef CONFIG_X86_PAE
12343 + if (__supported_pte_mask & _PAGE_NX)
12344 + return pte_set_flags(pte, _PAGE_NX);
12345 + else
12346 +#endif
12347 + return pte_clear_flags(pte, _PAGE_USER);
12348 }
12349
12350 static inline pte_t pte_mkdirty(pte_t pte)
12351 @@ -389,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12352 #endif
12353
12354 #ifndef __ASSEMBLY__
12355 +
12356 +#ifdef CONFIG_PAX_PER_CPU_PGD
12357 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12358 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12359 +{
12360 + return cpu_pgd[cpu];
12361 +}
12362 +#endif
12363 +
12364 #include <linux/mm_types.h>
12365
12366 static inline int pte_none(pte_t pte)
12367 @@ -565,7 +634,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12368
12369 static inline int pgd_bad(pgd_t pgd)
12370 {
12371 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12372 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12373 }
12374
12375 static inline int pgd_none(pgd_t pgd)
12376 @@ -588,7 +657,12 @@ static inline int pgd_none(pgd_t pgd)
12377 * pgd_offset() returns a (pgd_t *)
12378 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12379 */
12380 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12381 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12382 +
12383 +#ifdef CONFIG_PAX_PER_CPU_PGD
12384 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12385 +#endif
12386 +
12387 /*
12388 * a shortcut which implies the use of the kernel's pgd, instead
12389 * of a process's
12390 @@ -599,6 +673,20 @@ static inline int pgd_none(pgd_t pgd)
12391 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12392 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12393
12394 +#ifdef CONFIG_X86_32
12395 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12396 +#else
12397 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12398 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12399 +
12400 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12401 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12402 +#else
12403 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12404 +#endif
12405 +
12406 +#endif
12407 +
12408 #ifndef __ASSEMBLY__
12409
12410 extern int direct_gbpages;
12411 @@ -763,11 +851,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
12412 * dst and src can be on the same page, but the range must not overlap,
12413 * and must not cross a page boundary.
12414 */
12415 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12416 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12417 {
12418 - memcpy(dst, src, count * sizeof(pgd_t));
12419 + pax_open_kernel();
12420 + while (count--)
12421 + *dst++ = *src++;
12422 + pax_close_kernel();
12423 }
12424
12425 +#ifdef CONFIG_PAX_PER_CPU_PGD
12426 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
12427 +#endif
12428 +
12429 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12430 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
12431 +#else
12432 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
12433 +#endif
12434
12435 #include <asm-generic/pgtable.h>
12436 #endif /* __ASSEMBLY__ */
12437 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12438 index 8faa215..a8a17ea 100644
12439 --- a/arch/x86/include/asm/pgtable_32.h
12440 +++ b/arch/x86/include/asm/pgtable_32.h
12441 @@ -25,9 +25,6 @@
12442 struct mm_struct;
12443 struct vm_area_struct;
12444
12445 -extern pgd_t swapper_pg_dir[1024];
12446 -extern pgd_t initial_page_table[1024];
12447 -
12448 static inline void pgtable_cache_init(void) { }
12449 static inline void check_pgt_cache(void) { }
12450 void paging_init(void);
12451 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12452 # include <asm/pgtable-2level.h>
12453 #endif
12454
12455 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12456 +extern pgd_t initial_page_table[PTRS_PER_PGD];
12457 +#ifdef CONFIG_X86_PAE
12458 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12459 +#endif
12460 +
12461 #if defined(CONFIG_HIGHPTE)
12462 #define pte_offset_map(dir, address) \
12463 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
12464 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12465 /* Clear a kernel PTE and flush it from the TLB */
12466 #define kpte_clear_flush(ptep, vaddr) \
12467 do { \
12468 + pax_open_kernel(); \
12469 pte_clear(&init_mm, (vaddr), (ptep)); \
12470 + pax_close_kernel(); \
12471 __flush_tlb_one((vaddr)); \
12472 } while (0)
12473
12474 @@ -75,6 +80,9 @@ do { \
12475
12476 #endif /* !__ASSEMBLY__ */
12477
12478 +#define HAVE_ARCH_UNMAPPED_AREA
12479 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12480 +
12481 /*
12482 * kern_addr_valid() is (1) for FLATMEM and (0) for
12483 * SPARSEMEM and DISCONTIGMEM
12484 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12485 index ed5903b..c7fe163 100644
12486 --- a/arch/x86/include/asm/pgtable_32_types.h
12487 +++ b/arch/x86/include/asm/pgtable_32_types.h
12488 @@ -8,7 +8,7 @@
12489 */
12490 #ifdef CONFIG_X86_PAE
12491 # include <asm/pgtable-3level_types.h>
12492 -# define PMD_SIZE (1UL << PMD_SHIFT)
12493 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12494 # define PMD_MASK (~(PMD_SIZE - 1))
12495 #else
12496 # include <asm/pgtable-2level_types.h>
12497 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12498 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12499 #endif
12500
12501 +#ifdef CONFIG_PAX_KERNEXEC
12502 +#ifndef __ASSEMBLY__
12503 +extern unsigned char MODULES_EXEC_VADDR[];
12504 +extern unsigned char MODULES_EXEC_END[];
12505 +#endif
12506 +#include <asm/boot.h>
12507 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12508 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12509 +#else
12510 +#define ktla_ktva(addr) (addr)
12511 +#define ktva_ktla(addr) (addr)
12512 +#endif
12513 +
12514 #define MODULES_VADDR VMALLOC_START
12515 #define MODULES_END VMALLOC_END
12516 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12517 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12518 index 47356f9..deb94a2 100644
12519 --- a/arch/x86/include/asm/pgtable_64.h
12520 +++ b/arch/x86/include/asm/pgtable_64.h
12521 @@ -16,10 +16,14 @@
12522
12523 extern pud_t level3_kernel_pgt[512];
12524 extern pud_t level3_ident_pgt[512];
12525 +extern pud_t level3_vmalloc_start_pgt[512];
12526 +extern pud_t level3_vmalloc_end_pgt[512];
12527 +extern pud_t level3_vmemmap_pgt[512];
12528 +extern pud_t level2_vmemmap_pgt[512];
12529 extern pmd_t level2_kernel_pgt[512];
12530 extern pmd_t level2_fixmap_pgt[512];
12531 -extern pmd_t level2_ident_pgt[512];
12532 -extern pgd_t init_level4_pgt[];
12533 +extern pmd_t level2_ident_pgt[512*2];
12534 +extern pgd_t init_level4_pgt[512];
12535
12536 #define swapper_pg_dir init_level4_pgt
12537
12538 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12539
12540 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12541 {
12542 + pax_open_kernel();
12543 *pmdp = pmd;
12544 + pax_close_kernel();
12545 }
12546
12547 static inline void native_pmd_clear(pmd_t *pmd)
12548 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
12549
12550 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12551 {
12552 + pax_open_kernel();
12553 *pudp = pud;
12554 + pax_close_kernel();
12555 }
12556
12557 static inline void native_pud_clear(pud_t *pud)
12558 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
12559
12560 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12561 {
12562 + pax_open_kernel();
12563 + *pgdp = pgd;
12564 + pax_close_kernel();
12565 +}
12566 +
12567 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12568 +{
12569 *pgdp = pgd;
12570 }
12571
12572 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12573 index 766ea16..5b96cb3 100644
12574 --- a/arch/x86/include/asm/pgtable_64_types.h
12575 +++ b/arch/x86/include/asm/pgtable_64_types.h
12576 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12577 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12578 #define MODULES_END _AC(0xffffffffff000000, UL)
12579 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12580 +#define MODULES_EXEC_VADDR MODULES_VADDR
12581 +#define MODULES_EXEC_END MODULES_END
12582 +
12583 +#define ktla_ktva(addr) (addr)
12584 +#define ktva_ktla(addr) (addr)
12585
12586 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12587 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12588 index ec8a1fc..7ccb593 100644
12589 --- a/arch/x86/include/asm/pgtable_types.h
12590 +++ b/arch/x86/include/asm/pgtable_types.h
12591 @@ -16,13 +16,12 @@
12592 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12593 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12594 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12595 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12596 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12597 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12598 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12599 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12600 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12601 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12602 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
12603 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12604 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
12605 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12606
12607 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12608 @@ -40,7 +39,6 @@
12609 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12610 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12611 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12612 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12613 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12614 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12615 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12616 @@ -57,8 +55,10 @@
12617
12618 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12619 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12620 -#else
12621 +#elif defined(CONFIG_KMEMCHECK)
12622 #define _PAGE_NX (_AT(pteval_t, 0))
12623 +#else
12624 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12625 #endif
12626
12627 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12628 @@ -96,6 +96,9 @@
12629 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12630 _PAGE_ACCESSED)
12631
12632 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
12633 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
12634 +
12635 #define __PAGE_KERNEL_EXEC \
12636 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12637 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12638 @@ -106,7 +109,7 @@
12639 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12640 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12641 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12642 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12643 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12644 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
12645 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
12646 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12647 @@ -168,8 +171,8 @@
12648 * bits are combined, this will alow user to access the high address mapped
12649 * VDSO in the presence of CONFIG_COMPAT_VDSO
12650 */
12651 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12652 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12653 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12654 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12655 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12656 #endif
12657
12658 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12659 {
12660 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12661 }
12662 +#endif
12663
12664 +#if PAGETABLE_LEVELS == 3
12665 +#include <asm-generic/pgtable-nopud.h>
12666 +#endif
12667 +
12668 +#if PAGETABLE_LEVELS == 2
12669 +#include <asm-generic/pgtable-nopmd.h>
12670 +#endif
12671 +
12672 +#ifndef __ASSEMBLY__
12673 #if PAGETABLE_LEVELS > 3
12674 typedef struct { pudval_t pud; } pud_t;
12675
12676 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12677 return pud.pud;
12678 }
12679 #else
12680 -#include <asm-generic/pgtable-nopud.h>
12681 -
12682 static inline pudval_t native_pud_val(pud_t pud)
12683 {
12684 return native_pgd_val(pud.pgd);
12685 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12686 return pmd.pmd;
12687 }
12688 #else
12689 -#include <asm-generic/pgtable-nopmd.h>
12690 -
12691 static inline pmdval_t native_pmd_val(pmd_t pmd)
12692 {
12693 return native_pgd_val(pmd.pud.pgd);
12694 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
12695
12696 extern pteval_t __supported_pte_mask;
12697 extern void set_nx(void);
12698 -extern int nx_enabled;
12699
12700 #define pgprot_writecombine pgprot_writecombine
12701 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12702 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12703 index ad1fc85..0b15fe1 100644
12704 --- a/arch/x86/include/asm/processor.h
12705 +++ b/arch/x86/include/asm/processor.h
12706 @@ -289,7 +289,7 @@ struct tss_struct {
12707
12708 } ____cacheline_aligned;
12709
12710 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12711 +extern struct tss_struct init_tss[NR_CPUS];
12712
12713 /*
12714 * Save the original ist values for checking stack pointers during debugging
12715 @@ -818,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
12716 */
12717 #define TASK_SIZE PAGE_OFFSET
12718 #define TASK_SIZE_MAX TASK_SIZE
12719 +
12720 +#ifdef CONFIG_PAX_SEGMEXEC
12721 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12722 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12723 +#else
12724 #define STACK_TOP TASK_SIZE
12725 -#define STACK_TOP_MAX STACK_TOP
12726 +#endif
12727 +
12728 +#define STACK_TOP_MAX TASK_SIZE
12729
12730 #define INIT_THREAD { \
12731 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
12732 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12733 .vm86_info = NULL, \
12734 .sysenter_cs = __KERNEL_CS, \
12735 .io_bitmap_ptr = NULL, \
12736 @@ -836,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
12737 */
12738 #define INIT_TSS { \
12739 .x86_tss = { \
12740 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
12741 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12742 .ss0 = __KERNEL_DS, \
12743 .ss1 = __KERNEL_CS, \
12744 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12745 @@ -847,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
12746 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12747
12748 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12749 -#define KSTK_TOP(info) \
12750 -({ \
12751 - unsigned long *__ptr = (unsigned long *)(info); \
12752 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12753 -})
12754 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12755
12756 /*
12757 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12758 @@ -866,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12759 #define task_pt_regs(task) \
12760 ({ \
12761 struct pt_regs *__regs__; \
12762 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12763 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12764 __regs__ - 1; \
12765 })
12766
12767 @@ -876,13 +879,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12768 /*
12769 * User space process size. 47bits minus one guard page.
12770 */
12771 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12772 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12773
12774 /* This decides where the kernel will search for a free chunk of vm
12775 * space during mmap's.
12776 */
12777 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12778 - 0xc0000000 : 0xFFFFe000)
12779 + 0xc0000000 : 0xFFFFf000)
12780
12781 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
12782 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12783 @@ -893,11 +896,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12784 #define STACK_TOP_MAX TASK_SIZE_MAX
12785
12786 #define INIT_THREAD { \
12787 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12788 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12789 }
12790
12791 #define INIT_TSS { \
12792 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12793 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12794 }
12795
12796 /*
12797 @@ -925,6 +928,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12798 */
12799 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12800
12801 +#ifdef CONFIG_PAX_SEGMEXEC
12802 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12803 +#endif
12804 +
12805 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12806
12807 /* Get/set a process' ability to use the timestamp counter instruction */
12808 @@ -985,12 +992,12 @@ extern bool cpu_has_amd_erratum(const int *);
12809 #define cpu_has_amd_erratum(x) (false)
12810 #endif /* CONFIG_CPU_SUP_AMD */
12811
12812 -extern unsigned long arch_align_stack(unsigned long sp);
12813 +#define arch_align_stack(x) ((x) & ~0xfUL)
12814 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12815
12816 void default_idle(void);
12817 bool set_pm_idle_to_default(void);
12818
12819 -void stop_this_cpu(void *dummy);
12820 +void stop_this_cpu(void *dummy) __noreturn;
12821
12822 #endif /* _ASM_X86_PROCESSOR_H */
12823 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12824 index 19f16eb..b50624b 100644
12825 --- a/arch/x86/include/asm/ptrace.h
12826 +++ b/arch/x86/include/asm/ptrace.h
12827 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12828 }
12829
12830 /*
12831 - * user_mode_vm(regs) determines whether a register set came from user mode.
12832 + * user_mode(regs) determines whether a register set came from user mode.
12833 * This is true if V8086 mode was enabled OR if the register set was from
12834 * protected mode with RPL-3 CS value. This tricky test checks that with
12835 * one comparison. Many places in the kernel can bypass this full check
12836 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12837 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12838 + * be used.
12839 */
12840 -static inline int user_mode(struct pt_regs *regs)
12841 +static inline int user_mode_novm(struct pt_regs *regs)
12842 {
12843 #ifdef CONFIG_X86_32
12844 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12845 #else
12846 - return !!(regs->cs & 3);
12847 + return !!(regs->cs & SEGMENT_RPL_MASK);
12848 #endif
12849 }
12850
12851 -static inline int user_mode_vm(struct pt_regs *regs)
12852 +static inline int user_mode(struct pt_regs *regs)
12853 {
12854 #ifdef CONFIG_X86_32
12855 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12856 USER_RPL;
12857 #else
12858 - return user_mode(regs);
12859 + return user_mode_novm(regs);
12860 #endif
12861 }
12862
12863 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
12864 #ifdef CONFIG_X86_64
12865 static inline bool user_64bit_mode(struct pt_regs *regs)
12866 {
12867 + unsigned long cs = regs->cs & 0xffff;
12868 #ifndef CONFIG_PARAVIRT
12869 /*
12870 * On non-paravirt systems, this is the only long mode CPL 3
12871 * selector. We do not allow long mode selectors in the LDT.
12872 */
12873 - return regs->cs == __USER_CS;
12874 + return cs == __USER_CS;
12875 #else
12876 /* Headers are too twisted for this to go in paravirt.h. */
12877 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
12878 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
12879 #endif
12880 }
12881 #endif
12882 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
12883 index fe1ec5b..dc5c3fe 100644
12884 --- a/arch/x86/include/asm/realmode.h
12885 +++ b/arch/x86/include/asm/realmode.h
12886 @@ -22,16 +22,14 @@ struct real_mode_header {
12887 #endif
12888 /* APM/BIOS reboot */
12889 u32 machine_real_restart_asm;
12890 -#ifdef CONFIG_X86_64
12891 u32 machine_real_restart_seg;
12892 -#endif
12893 };
12894
12895 /* This must match data at trampoline_32/64.S */
12896 struct trampoline_header {
12897 #ifdef CONFIG_X86_32
12898 u32 start;
12899 - u16 gdt_pad;
12900 + u16 boot_cs;
12901 u16 gdt_limit;
12902 u32 gdt_base;
12903 #else
12904 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12905 index a82c4f1..ac45053 100644
12906 --- a/arch/x86/include/asm/reboot.h
12907 +++ b/arch/x86/include/asm/reboot.h
12908 @@ -6,13 +6,13 @@
12909 struct pt_regs;
12910
12911 struct machine_ops {
12912 - void (*restart)(char *cmd);
12913 - void (*halt)(void);
12914 - void (*power_off)(void);
12915 + void (* __noreturn restart)(char *cmd);
12916 + void (* __noreturn halt)(void);
12917 + void (* __noreturn power_off)(void);
12918 void (*shutdown)(void);
12919 void (*crash_shutdown)(struct pt_regs *);
12920 - void (*emergency_restart)(void);
12921 -};
12922 + void (* __noreturn emergency_restart)(void);
12923 +} __no_const;
12924
12925 extern struct machine_ops machine_ops;
12926
12927 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12928 index 2dbe4a7..ce1db00 100644
12929 --- a/arch/x86/include/asm/rwsem.h
12930 +++ b/arch/x86/include/asm/rwsem.h
12931 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12932 {
12933 asm volatile("# beginning down_read\n\t"
12934 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12935 +
12936 +#ifdef CONFIG_PAX_REFCOUNT
12937 + "jno 0f\n"
12938 + LOCK_PREFIX _ASM_DEC "(%1)\n"
12939 + "int $4\n0:\n"
12940 + _ASM_EXTABLE(0b, 0b)
12941 +#endif
12942 +
12943 /* adds 0x00000001 */
12944 " jns 1f\n"
12945 " call call_rwsem_down_read_failed\n"
12946 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12947 "1:\n\t"
12948 " mov %1,%2\n\t"
12949 " add %3,%2\n\t"
12950 +
12951 +#ifdef CONFIG_PAX_REFCOUNT
12952 + "jno 0f\n"
12953 + "sub %3,%2\n"
12954 + "int $4\n0:\n"
12955 + _ASM_EXTABLE(0b, 0b)
12956 +#endif
12957 +
12958 " jle 2f\n\t"
12959 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12960 " jnz 1b\n\t"
12961 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12962 long tmp;
12963 asm volatile("# beginning down_write\n\t"
12964 LOCK_PREFIX " xadd %1,(%2)\n\t"
12965 +
12966 +#ifdef CONFIG_PAX_REFCOUNT
12967 + "jno 0f\n"
12968 + "mov %1,(%2)\n"
12969 + "int $4\n0:\n"
12970 + _ASM_EXTABLE(0b, 0b)
12971 +#endif
12972 +
12973 /* adds 0xffff0001, returns the old value */
12974 " test %1,%1\n\t"
12975 /* was the count 0 before? */
12976 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12977 long tmp;
12978 asm volatile("# beginning __up_read\n\t"
12979 LOCK_PREFIX " xadd %1,(%2)\n\t"
12980 +
12981 +#ifdef CONFIG_PAX_REFCOUNT
12982 + "jno 0f\n"
12983 + "mov %1,(%2)\n"
12984 + "int $4\n0:\n"
12985 + _ASM_EXTABLE(0b, 0b)
12986 +#endif
12987 +
12988 /* subtracts 1, returns the old value */
12989 " jns 1f\n\t"
12990 " call call_rwsem_wake\n" /* expects old value in %edx */
12991 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12992 long tmp;
12993 asm volatile("# beginning __up_write\n\t"
12994 LOCK_PREFIX " xadd %1,(%2)\n\t"
12995 +
12996 +#ifdef CONFIG_PAX_REFCOUNT
12997 + "jno 0f\n"
12998 + "mov %1,(%2)\n"
12999 + "int $4\n0:\n"
13000 + _ASM_EXTABLE(0b, 0b)
13001 +#endif
13002 +
13003 /* subtracts 0xffff0001, returns the old value */
13004 " jns 1f\n\t"
13005 " call call_rwsem_wake\n" /* expects old value in %edx */
13006 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13007 {
13008 asm volatile("# beginning __downgrade_write\n\t"
13009 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
13010 +
13011 +#ifdef CONFIG_PAX_REFCOUNT
13012 + "jno 0f\n"
13013 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
13014 + "int $4\n0:\n"
13015 + _ASM_EXTABLE(0b, 0b)
13016 +#endif
13017 +
13018 /*
13019 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
13020 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
13021 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13022 */
13023 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13024 {
13025 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
13026 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
13027 +
13028 +#ifdef CONFIG_PAX_REFCOUNT
13029 + "jno 0f\n"
13030 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
13031 + "int $4\n0:\n"
13032 + _ASM_EXTABLE(0b, 0b)
13033 +#endif
13034 +
13035 : "+m" (sem->count)
13036 : "er" (delta));
13037 }
13038 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13039 */
13040 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
13041 {
13042 - return delta + xadd(&sem->count, delta);
13043 + return delta + xadd_check_overflow(&sem->count, delta);
13044 }
13045
13046 #endif /* __KERNEL__ */
13047 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
13048 index c48a950..c6d7468 100644
13049 --- a/arch/x86/include/asm/segment.h
13050 +++ b/arch/x86/include/asm/segment.h
13051 @@ -64,10 +64,15 @@
13052 * 26 - ESPFIX small SS
13053 * 27 - per-cpu [ offset to per-cpu data area ]
13054 * 28 - stack_canary-20 [ for stack protector ]
13055 - * 29 - unused
13056 - * 30 - unused
13057 + * 29 - PCI BIOS CS
13058 + * 30 - PCI BIOS DS
13059 * 31 - TSS for double fault handler
13060 */
13061 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
13062 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
13063 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
13064 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
13065 +
13066 #define GDT_ENTRY_TLS_MIN 6
13067 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
13068
13069 @@ -79,6 +84,8 @@
13070
13071 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
13072
13073 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
13074 +
13075 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
13076
13077 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
13078 @@ -104,6 +111,12 @@
13079 #define __KERNEL_STACK_CANARY 0
13080 #endif
13081
13082 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
13083 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
13084 +
13085 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
13086 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
13087 +
13088 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
13089
13090 /*
13091 @@ -141,7 +154,7 @@
13092 */
13093
13094 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
13095 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
13096 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
13097
13098
13099 #else
13100 @@ -165,6 +178,8 @@
13101 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
13102 #define __USER32_DS __USER_DS
13103
13104 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
13105 +
13106 #define GDT_ENTRY_TSS 8 /* needs two entries */
13107 #define GDT_ENTRY_LDT 10 /* needs two entries */
13108 #define GDT_ENTRY_TLS_MIN 12
13109 @@ -185,6 +200,7 @@
13110 #endif
13111
13112 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
13113 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
13114 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
13115 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
13116 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
13117 @@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
13118 {
13119 unsigned long __limit;
13120 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13121 - return __limit + 1;
13122 + return __limit;
13123 }
13124
13125 #endif /* !__ASSEMBLY__ */
13126 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
13127 index 4f19a15..9e14f27 100644
13128 --- a/arch/x86/include/asm/smp.h
13129 +++ b/arch/x86/include/asm/smp.h
13130 @@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
13131 /* cpus sharing the last level cache: */
13132 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
13133 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
13134 -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
13135 +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
13136
13137 static inline struct cpumask *cpu_sibling_mask(int cpu)
13138 {
13139 @@ -79,7 +79,7 @@ struct smp_ops {
13140
13141 void (*send_call_func_ipi)(const struct cpumask *mask);
13142 void (*send_call_func_single_ipi)(int cpu);
13143 -};
13144 +} __no_const;
13145
13146 /* Globals due to paravirt */
13147 extern void set_cpu_sibling_map(int cpu);
13148 @@ -190,14 +190,8 @@ extern unsigned disabled_cpus __cpuinitdata;
13149 extern int safe_smp_processor_id(void);
13150
13151 #elif defined(CONFIG_X86_64_SMP)
13152 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13153 -
13154 -#define stack_smp_processor_id() \
13155 -({ \
13156 - struct thread_info *ti; \
13157 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
13158 - ti->cpu; \
13159 -})
13160 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13161 +#define stack_smp_processor_id() raw_smp_processor_id()
13162 #define safe_smp_processor_id() smp_processor_id()
13163
13164 #endif
13165 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
13166 index 33692ea..350a534 100644
13167 --- a/arch/x86/include/asm/spinlock.h
13168 +++ b/arch/x86/include/asm/spinlock.h
13169 @@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
13170 static inline void arch_read_lock(arch_rwlock_t *rw)
13171 {
13172 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
13173 +
13174 +#ifdef CONFIG_PAX_REFCOUNT
13175 + "jno 0f\n"
13176 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
13177 + "int $4\n0:\n"
13178 + _ASM_EXTABLE(0b, 0b)
13179 +#endif
13180 +
13181 "jns 1f\n"
13182 "call __read_lock_failed\n\t"
13183 "1:\n"
13184 @@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
13185 static inline void arch_write_lock(arch_rwlock_t *rw)
13186 {
13187 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
13188 +
13189 +#ifdef CONFIG_PAX_REFCOUNT
13190 + "jno 0f\n"
13191 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
13192 + "int $4\n0:\n"
13193 + _ASM_EXTABLE(0b, 0b)
13194 +#endif
13195 +
13196 "jz 1f\n"
13197 "call __write_lock_failed\n\t"
13198 "1:\n"
13199 @@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
13200
13201 static inline void arch_read_unlock(arch_rwlock_t *rw)
13202 {
13203 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
13204 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
13205 +
13206 +#ifdef CONFIG_PAX_REFCOUNT
13207 + "jno 0f\n"
13208 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
13209 + "int $4\n0:\n"
13210 + _ASM_EXTABLE(0b, 0b)
13211 +#endif
13212 +
13213 :"+m" (rw->lock) : : "memory");
13214 }
13215
13216 static inline void arch_write_unlock(arch_rwlock_t *rw)
13217 {
13218 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
13219 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
13220 +
13221 +#ifdef CONFIG_PAX_REFCOUNT
13222 + "jno 0f\n"
13223 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
13224 + "int $4\n0:\n"
13225 + _ASM_EXTABLE(0b, 0b)
13226 +#endif
13227 +
13228 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
13229 }
13230
13231 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13232 index 6a99859..03cb807 100644
13233 --- a/arch/x86/include/asm/stackprotector.h
13234 +++ b/arch/x86/include/asm/stackprotector.h
13235 @@ -47,7 +47,7 @@
13236 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13237 */
13238 #define GDT_STACK_CANARY_INIT \
13239 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13240 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13241
13242 /*
13243 * Initialize the stackprotector canary value.
13244 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
13245
13246 static inline void load_stack_canary_segment(void)
13247 {
13248 -#ifdef CONFIG_X86_32
13249 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13250 asm volatile ("mov %0, %%gs" : : "r" (0));
13251 #endif
13252 }
13253 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
13254 index 70bbe39..4ae2bd4 100644
13255 --- a/arch/x86/include/asm/stacktrace.h
13256 +++ b/arch/x86/include/asm/stacktrace.h
13257 @@ -11,28 +11,20 @@
13258
13259 extern int kstack_depth_to_print;
13260
13261 -struct thread_info;
13262 +struct task_struct;
13263 struct stacktrace_ops;
13264
13265 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
13266 - unsigned long *stack,
13267 - unsigned long bp,
13268 - const struct stacktrace_ops *ops,
13269 - void *data,
13270 - unsigned long *end,
13271 - int *graph);
13272 +typedef unsigned long walk_stack_t(struct task_struct *task,
13273 + void *stack_start,
13274 + unsigned long *stack,
13275 + unsigned long bp,
13276 + const struct stacktrace_ops *ops,
13277 + void *data,
13278 + unsigned long *end,
13279 + int *graph);
13280
13281 -extern unsigned long
13282 -print_context_stack(struct thread_info *tinfo,
13283 - unsigned long *stack, unsigned long bp,
13284 - const struct stacktrace_ops *ops, void *data,
13285 - unsigned long *end, int *graph);
13286 -
13287 -extern unsigned long
13288 -print_context_stack_bp(struct thread_info *tinfo,
13289 - unsigned long *stack, unsigned long bp,
13290 - const struct stacktrace_ops *ops, void *data,
13291 - unsigned long *end, int *graph);
13292 +extern walk_stack_t print_context_stack;
13293 +extern walk_stack_t print_context_stack_bp;
13294
13295 /* Generic stack tracer with callbacks */
13296
13297 @@ -40,7 +32,7 @@ struct stacktrace_ops {
13298 void (*address)(void *data, unsigned long address, int reliable);
13299 /* On negative return stop dumping */
13300 int (*stack)(void *data, char *name);
13301 - walk_stack_t walk_stack;
13302 + walk_stack_t *walk_stack;
13303 };
13304
13305 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
13306 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
13307 index 4ec45b3..a4f0a8a 100644
13308 --- a/arch/x86/include/asm/switch_to.h
13309 +++ b/arch/x86/include/asm/switch_to.h
13310 @@ -108,7 +108,7 @@ do { \
13311 "call __switch_to\n\t" \
13312 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13313 __switch_canary \
13314 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
13315 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13316 "movq %%rax,%%rdi\n\t" \
13317 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13318 "jnz ret_from_fork\n\t" \
13319 @@ -119,7 +119,7 @@ do { \
13320 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13321 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13322 [_tif_fork] "i" (_TIF_FORK), \
13323 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
13324 + [thread_info] "m" (current_tinfo), \
13325 [current_task] "m" (current_task) \
13326 __switch_canary_iparam \
13327 : "memory", "cc" __EXTRA_CLOBBER)
13328 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13329 index 2d946e6..e453ec4 100644
13330 --- a/arch/x86/include/asm/thread_info.h
13331 +++ b/arch/x86/include/asm/thread_info.h
13332 @@ -10,6 +10,7 @@
13333 #include <linux/compiler.h>
13334 #include <asm/page.h>
13335 #include <asm/types.h>
13336 +#include <asm/percpu.h>
13337
13338 /*
13339 * low level task data that entry.S needs immediate access to
13340 @@ -24,7 +25,6 @@ struct exec_domain;
13341 #include <linux/atomic.h>
13342
13343 struct thread_info {
13344 - struct task_struct *task; /* main task structure */
13345 struct exec_domain *exec_domain; /* execution domain */
13346 __u32 flags; /* low level flags */
13347 __u32 status; /* thread synchronous flags */
13348 @@ -34,19 +34,13 @@ struct thread_info {
13349 mm_segment_t addr_limit;
13350 struct restart_block restart_block;
13351 void __user *sysenter_return;
13352 -#ifdef CONFIG_X86_32
13353 - unsigned long previous_esp; /* ESP of the previous stack in
13354 - case of nested (IRQ) stacks
13355 - */
13356 - __u8 supervisor_stack[0];
13357 -#endif
13358 + unsigned long lowest_stack;
13359 unsigned int sig_on_uaccess_error:1;
13360 unsigned int uaccess_err:1; /* uaccess failed */
13361 };
13362
13363 -#define INIT_THREAD_INFO(tsk) \
13364 +#define INIT_THREAD_INFO \
13365 { \
13366 - .task = &tsk, \
13367 .exec_domain = &default_exec_domain, \
13368 .flags = 0, \
13369 .cpu = 0, \
13370 @@ -57,7 +51,7 @@ struct thread_info {
13371 }, \
13372 }
13373
13374 -#define init_thread_info (init_thread_union.thread_info)
13375 +#define init_thread_info (init_thread_union.stack)
13376 #define init_stack (init_thread_union.stack)
13377
13378 #else /* !__ASSEMBLY__ */
13379 @@ -98,6 +92,7 @@ struct thread_info {
13380 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
13381 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
13382 #define TIF_X32 30 /* 32-bit native x86-64 binary */
13383 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
13384
13385 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
13386 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
13387 @@ -122,17 +117,18 @@ struct thread_info {
13388 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
13389 #define _TIF_ADDR32 (1 << TIF_ADDR32)
13390 #define _TIF_X32 (1 << TIF_X32)
13391 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
13392
13393 /* work to do in syscall_trace_enter() */
13394 #define _TIF_WORK_SYSCALL_ENTRY \
13395 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
13396 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
13397 - _TIF_NOHZ)
13398 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
13399
13400 /* work to do in syscall_trace_leave() */
13401 #define _TIF_WORK_SYSCALL_EXIT \
13402 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
13403 - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
13404 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
13405
13406 /* work to do on interrupt/exception return */
13407 #define _TIF_WORK_MASK \
13408 @@ -143,7 +139,7 @@ struct thread_info {
13409 /* work to do on any return to user space */
13410 #define _TIF_ALLWORK_MASK \
13411 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
13412 - _TIF_NOHZ)
13413 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
13414
13415 /* Only used for 64 bit */
13416 #define _TIF_DO_NOTIFY_MASK \
13417 @@ -159,45 +155,40 @@ struct thread_info {
13418
13419 #define PREEMPT_ACTIVE 0x10000000
13420
13421 -#ifdef CONFIG_X86_32
13422 -
13423 -#define STACK_WARN (THREAD_SIZE/8)
13424 -/*
13425 - * macros/functions for gaining access to the thread information structure
13426 - *
13427 - * preempt_count needs to be 1 initially, until the scheduler is functional.
13428 - */
13429 -#ifndef __ASSEMBLY__
13430 -
13431 -
13432 -/* how to get the current stack pointer from C */
13433 -register unsigned long current_stack_pointer asm("esp") __used;
13434 -
13435 -/* how to get the thread information struct from C */
13436 -static inline struct thread_info *current_thread_info(void)
13437 -{
13438 - return (struct thread_info *)
13439 - (current_stack_pointer & ~(THREAD_SIZE - 1));
13440 -}
13441 -
13442 -#else /* !__ASSEMBLY__ */
13443 -
13444 +#ifdef __ASSEMBLY__
13445 /* how to get the thread information struct from ASM */
13446 #define GET_THREAD_INFO(reg) \
13447 - movl $-THREAD_SIZE, reg; \
13448 - andl %esp, reg
13449 + mov PER_CPU_VAR(current_tinfo), reg
13450
13451 /* use this one if reg already contains %esp */
13452 -#define GET_THREAD_INFO_WITH_ESP(reg) \
13453 - andl $-THREAD_SIZE, reg
13454 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13455 +#else
13456 +/* how to get the thread information struct from C */
13457 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13458 +
13459 +static __always_inline struct thread_info *current_thread_info(void)
13460 +{
13461 + return this_cpu_read_stable(current_tinfo);
13462 +}
13463 +#endif
13464 +
13465 +#ifdef CONFIG_X86_32
13466 +
13467 +#define STACK_WARN (THREAD_SIZE/8)
13468 +/*
13469 + * macros/functions for gaining access to the thread information structure
13470 + *
13471 + * preempt_count needs to be 1 initially, until the scheduler is functional.
13472 + */
13473 +#ifndef __ASSEMBLY__
13474 +
13475 +/* how to get the current stack pointer from C */
13476 +register unsigned long current_stack_pointer asm("esp") __used;
13477
13478 #endif
13479
13480 #else /* X86_32 */
13481
13482 -#include <asm/percpu.h>
13483 -#define KERNEL_STACK_OFFSET (5*8)
13484 -
13485 /*
13486 * macros/functions for gaining access to the thread information structure
13487 * preempt_count needs to be 1 initially, until the scheduler is functional.
13488 @@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
13489 #ifndef __ASSEMBLY__
13490 DECLARE_PER_CPU(unsigned long, kernel_stack);
13491
13492 -static inline struct thread_info *current_thread_info(void)
13493 -{
13494 - struct thread_info *ti;
13495 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
13496 - KERNEL_STACK_OFFSET - THREAD_SIZE);
13497 - return ti;
13498 -}
13499 -
13500 -#else /* !__ASSEMBLY__ */
13501 -
13502 -/* how to get the thread information struct from ASM */
13503 -#define GET_THREAD_INFO(reg) \
13504 - movq PER_CPU_VAR(kernel_stack),reg ; \
13505 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13506 -
13507 -/*
13508 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
13509 - * a certain register (to be used in assembler memory operands).
13510 - */
13511 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
13512 -
13513 +/* how to get the current stack pointer from C */
13514 +register unsigned long current_stack_pointer asm("rsp") __used;
13515 #endif
13516
13517 #endif /* !X86_32 */
13518 @@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
13519 extern void arch_task_cache_init(void);
13520 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13521 extern void arch_release_task_struct(struct task_struct *tsk);
13522 +
13523 +#define __HAVE_THREAD_FUNCTIONS
13524 +#define task_thread_info(task) (&(task)->tinfo)
13525 +#define task_stack_page(task) ((task)->stack)
13526 +#define setup_thread_stack(p, org) do {} while (0)
13527 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13528 +
13529 #endif
13530 #endif /* _ASM_X86_THREAD_INFO_H */
13531 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13532 index 7ccf8d1..7cdca12 100644
13533 --- a/arch/x86/include/asm/uaccess.h
13534 +++ b/arch/x86/include/asm/uaccess.h
13535 @@ -7,6 +7,7 @@
13536 #include <linux/compiler.h>
13537 #include <linux/thread_info.h>
13538 #include <linux/string.h>
13539 +#include <linux/sched.h>
13540 #include <asm/asm.h>
13541 #include <asm/page.h>
13542 #include <asm/smap.h>
13543 @@ -14,6 +15,8 @@
13544 #define VERIFY_READ 0
13545 #define VERIFY_WRITE 1
13546
13547 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
13548 +
13549 /*
13550 * The fs value determines whether argument validity checking should be
13551 * performed or not. If get_fs() == USER_DS, checking is performed, with
13552 @@ -29,7 +32,12 @@
13553
13554 #define get_ds() (KERNEL_DS)
13555 #define get_fs() (current_thread_info()->addr_limit)
13556 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13557 +void __set_fs(mm_segment_t x);
13558 +void set_fs(mm_segment_t x);
13559 +#else
13560 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13561 +#endif
13562
13563 #define segment_eq(a, b) ((a).seg == (b).seg)
13564
13565 @@ -77,8 +85,33 @@
13566 * checks that the pointer is in the user space range - after calling
13567 * this function, memory access functions may still return -EFAULT.
13568 */
13569 -#define access_ok(type, addr, size) \
13570 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13571 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13572 +#define access_ok(type, addr, size) \
13573 +({ \
13574 + long __size = size; \
13575 + unsigned long __addr = (unsigned long)addr; \
13576 + unsigned long __addr_ao = __addr & PAGE_MASK; \
13577 + unsigned long __end_ao = __addr + __size - 1; \
13578 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
13579 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13580 + while(__addr_ao <= __end_ao) { \
13581 + char __c_ao; \
13582 + __addr_ao += PAGE_SIZE; \
13583 + if (__size > PAGE_SIZE) \
13584 + cond_resched(); \
13585 + if (__get_user(__c_ao, (char __user *)__addr)) \
13586 + break; \
13587 + if (type != VERIFY_WRITE) { \
13588 + __addr = __addr_ao; \
13589 + continue; \
13590 + } \
13591 + if (__put_user(__c_ao, (char __user *)__addr)) \
13592 + break; \
13593 + __addr = __addr_ao; \
13594 + } \
13595 + } \
13596 + __ret_ao; \
13597 +})
13598
13599 /*
13600 * The exception table consists of pairs of addresses relative to the
13601 @@ -189,13 +222,21 @@ extern int __get_user_bad(void);
13602 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13603 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13604
13605 -
13606 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13607 +#define __copyuser_seg "gs;"
13608 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13609 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13610 +#else
13611 +#define __copyuser_seg
13612 +#define __COPYUSER_SET_ES
13613 +#define __COPYUSER_RESTORE_ES
13614 +#endif
13615
13616 #ifdef CONFIG_X86_32
13617 #define __put_user_asm_u64(x, addr, err, errret) \
13618 asm volatile(ASM_STAC "\n" \
13619 - "1: movl %%eax,0(%2)\n" \
13620 - "2: movl %%edx,4(%2)\n" \
13621 + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13622 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13623 "3: " ASM_CLAC "\n" \
13624 ".section .fixup,\"ax\"\n" \
13625 "4: movl %3,%0\n" \
13626 @@ -208,8 +249,8 @@ extern int __get_user_bad(void);
13627
13628 #define __put_user_asm_ex_u64(x, addr) \
13629 asm volatile(ASM_STAC "\n" \
13630 - "1: movl %%eax,0(%1)\n" \
13631 - "2: movl %%edx,4(%1)\n" \
13632 + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13633 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13634 "3: " ASM_CLAC "\n" \
13635 _ASM_EXTABLE_EX(1b, 2b) \
13636 _ASM_EXTABLE_EX(2b, 3b) \
13637 @@ -261,7 +302,7 @@ extern void __put_user_8(void);
13638 __typeof__(*(ptr)) __pu_val; \
13639 __chk_user_ptr(ptr); \
13640 might_fault(); \
13641 - __pu_val = x; \
13642 + __pu_val = (x); \
13643 switch (sizeof(*(ptr))) { \
13644 case 1: \
13645 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13646 @@ -383,7 +424,7 @@ do { \
13647
13648 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13649 asm volatile(ASM_STAC "\n" \
13650 - "1: mov"itype" %2,%"rtype"1\n" \
13651 + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13652 "2: " ASM_CLAC "\n" \
13653 ".section .fixup,\"ax\"\n" \
13654 "3: mov %3,%0\n" \
13655 @@ -391,7 +432,7 @@ do { \
13656 " jmp 2b\n" \
13657 ".previous\n" \
13658 _ASM_EXTABLE(1b, 3b) \
13659 - : "=r" (err), ltype(x) \
13660 + : "=r" (err), ltype (x) \
13661 : "m" (__m(addr)), "i" (errret), "0" (err))
13662
13663 #define __get_user_size_ex(x, ptr, size) \
13664 @@ -416,7 +457,7 @@ do { \
13665 } while (0)
13666
13667 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13668 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13669 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13670 "2:\n" \
13671 _ASM_EXTABLE_EX(1b, 2b) \
13672 : ltype(x) : "m" (__m(addr)))
13673 @@ -433,13 +474,24 @@ do { \
13674 int __gu_err; \
13675 unsigned long __gu_val; \
13676 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13677 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
13678 + (x) = (__typeof__(*(ptr)))__gu_val; \
13679 __gu_err; \
13680 })
13681
13682 /* FIXME: this hack is definitely wrong -AK */
13683 struct __large_struct { unsigned long buf[100]; };
13684 -#define __m(x) (*(struct __large_struct __user *)(x))
13685 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13686 +#define ____m(x) \
13687 +({ \
13688 + unsigned long ____x = (unsigned long)(x); \
13689 + if (____x < PAX_USER_SHADOW_BASE) \
13690 + ____x += PAX_USER_SHADOW_BASE; \
13691 + (void __user *)____x; \
13692 +})
13693 +#else
13694 +#define ____m(x) (x)
13695 +#endif
13696 +#define __m(x) (*(struct __large_struct __user *)____m(x))
13697
13698 /*
13699 * Tell gcc we read from memory instead of writing: this is because
13700 @@ -448,7 +500,7 @@ struct __large_struct { unsigned long buf[100]; };
13701 */
13702 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13703 asm volatile(ASM_STAC "\n" \
13704 - "1: mov"itype" %"rtype"1,%2\n" \
13705 + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13706 "2: " ASM_CLAC "\n" \
13707 ".section .fixup,\"ax\"\n" \
13708 "3: mov %3,%0\n" \
13709 @@ -456,10 +508,10 @@ struct __large_struct { unsigned long buf[100]; };
13710 ".previous\n" \
13711 _ASM_EXTABLE(1b, 3b) \
13712 : "=r"(err) \
13713 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13714 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13715
13716 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13717 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13718 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13719 "2:\n" \
13720 _ASM_EXTABLE_EX(1b, 2b) \
13721 : : ltype(x), "m" (__m(addr)))
13722 @@ -498,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
13723 * On error, the variable @x is set to zero.
13724 */
13725
13726 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13727 +#define __get_user(x, ptr) get_user((x), (ptr))
13728 +#else
13729 #define __get_user(x, ptr) \
13730 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13731 +#endif
13732
13733 /**
13734 * __put_user: - Write a simple value into user space, with less checking.
13735 @@ -521,8 +577,12 @@ struct __large_struct { unsigned long buf[100]; };
13736 * Returns zero on success, or -EFAULT on error.
13737 */
13738
13739 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13740 +#define __put_user(x, ptr) put_user((x), (ptr))
13741 +#else
13742 #define __put_user(x, ptr) \
13743 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13744 +#endif
13745
13746 #define __get_user_unaligned __get_user
13747 #define __put_user_unaligned __put_user
13748 @@ -540,7 +600,7 @@ struct __large_struct { unsigned long buf[100]; };
13749 #define get_user_ex(x, ptr) do { \
13750 unsigned long __gue_val; \
13751 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13752 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
13753 + (x) = (__typeof__(*(ptr)))__gue_val; \
13754 } while (0)
13755
13756 #ifdef CONFIG_X86_WP_WORKS_OK
13757 @@ -574,8 +634,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
13758 extern __must_check long strlen_user(const char __user *str);
13759 extern __must_check long strnlen_user(const char __user *str, long n);
13760
13761 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13762 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13763 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13764 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13765
13766 /*
13767 * movsl can be slow when source and dest are not both 8-byte aligned
13768 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13769 index 7f760a9..00f93c0 100644
13770 --- a/arch/x86/include/asm/uaccess_32.h
13771 +++ b/arch/x86/include/asm/uaccess_32.h
13772 @@ -11,15 +11,15 @@
13773 #include <asm/page.h>
13774
13775 unsigned long __must_check __copy_to_user_ll
13776 - (void __user *to, const void *from, unsigned long n);
13777 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
13778 unsigned long __must_check __copy_from_user_ll
13779 - (void *to, const void __user *from, unsigned long n);
13780 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13781 unsigned long __must_check __copy_from_user_ll_nozero
13782 - (void *to, const void __user *from, unsigned long n);
13783 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13784 unsigned long __must_check __copy_from_user_ll_nocache
13785 - (void *to, const void __user *from, unsigned long n);
13786 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13787 unsigned long __must_check __copy_from_user_ll_nocache_nozero
13788 - (void *to, const void __user *from, unsigned long n);
13789 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13790
13791 /**
13792 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
13793 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13794 static __always_inline unsigned long __must_check
13795 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13796 {
13797 + if ((long)n < 0)
13798 + return n;
13799 +
13800 if (__builtin_constant_p(n)) {
13801 unsigned long ret;
13802
13803 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13804 return ret;
13805 }
13806 }
13807 + if (!__builtin_constant_p(n))
13808 + check_object_size(from, n, true);
13809 return __copy_to_user_ll(to, from, n);
13810 }
13811
13812 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
13813 __copy_to_user(void __user *to, const void *from, unsigned long n)
13814 {
13815 might_fault();
13816 +
13817 return __copy_to_user_inatomic(to, from, n);
13818 }
13819
13820 static __always_inline unsigned long
13821 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13822 {
13823 + if ((long)n < 0)
13824 + return n;
13825 +
13826 /* Avoid zeroing the tail if the copy fails..
13827 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13828 * but as the zeroing behaviour is only significant when n is not
13829 @@ -137,6 +146,10 @@ static __always_inline unsigned long
13830 __copy_from_user(void *to, const void __user *from, unsigned long n)
13831 {
13832 might_fault();
13833 +
13834 + if ((long)n < 0)
13835 + return n;
13836 +
13837 if (__builtin_constant_p(n)) {
13838 unsigned long ret;
13839
13840 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13841 return ret;
13842 }
13843 }
13844 + if (!__builtin_constant_p(n))
13845 + check_object_size(to, n, false);
13846 return __copy_from_user_ll(to, from, n);
13847 }
13848
13849 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13850 const void __user *from, unsigned long n)
13851 {
13852 might_fault();
13853 +
13854 + if ((long)n < 0)
13855 + return n;
13856 +
13857 if (__builtin_constant_p(n)) {
13858 unsigned long ret;
13859
13860 @@ -181,15 +200,19 @@ static __always_inline unsigned long
13861 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13862 unsigned long n)
13863 {
13864 - return __copy_from_user_ll_nocache_nozero(to, from, n);
13865 + if ((long)n < 0)
13866 + return n;
13867 +
13868 + return __copy_from_user_ll_nocache_nozero(to, from, n);
13869 }
13870
13871 -unsigned long __must_check copy_to_user(void __user *to,
13872 - const void *from, unsigned long n);
13873 -unsigned long __must_check _copy_from_user(void *to,
13874 - const void __user *from,
13875 - unsigned long n);
13876 -
13877 +extern void copy_to_user_overflow(void)
13878 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13879 + __compiletime_error("copy_to_user() buffer size is not provably correct")
13880 +#else
13881 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
13882 +#endif
13883 +;
13884
13885 extern void copy_from_user_overflow(void)
13886 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13887 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
13888 #endif
13889 ;
13890
13891 -static inline unsigned long __must_check copy_from_user(void *to,
13892 - const void __user *from,
13893 - unsigned long n)
13894 +/**
13895 + * copy_to_user: - Copy a block of data into user space.
13896 + * @to: Destination address, in user space.
13897 + * @from: Source address, in kernel space.
13898 + * @n: Number of bytes to copy.
13899 + *
13900 + * Context: User context only. This function may sleep.
13901 + *
13902 + * Copy data from kernel space to user space.
13903 + *
13904 + * Returns number of bytes that could not be copied.
13905 + * On success, this will be zero.
13906 + */
13907 +static inline unsigned long __must_check
13908 +copy_to_user(void __user *to, const void *from, unsigned long n)
13909 {
13910 - int sz = __compiletime_object_size(to);
13911 + size_t sz = __compiletime_object_size(from);
13912
13913 - if (likely(sz == -1 || sz >= n))
13914 - n = _copy_from_user(to, from, n);
13915 - else
13916 + if (unlikely(sz != (size_t)-1 && sz < n))
13917 + copy_to_user_overflow();
13918 + else if (access_ok(VERIFY_WRITE, to, n))
13919 + n = __copy_to_user(to, from, n);
13920 + return n;
13921 +}
13922 +
13923 +/**
13924 + * copy_from_user: - Copy a block of data from user space.
13925 + * @to: Destination address, in kernel space.
13926 + * @from: Source address, in user space.
13927 + * @n: Number of bytes to copy.
13928 + *
13929 + * Context: User context only. This function may sleep.
13930 + *
13931 + * Copy data from user space to kernel space.
13932 + *
13933 + * Returns number of bytes that could not be copied.
13934 + * On success, this will be zero.
13935 + *
13936 + * If some data could not be copied, this function will pad the copied
13937 + * data to the requested size using zero bytes.
13938 + */
13939 +static inline unsigned long __must_check
13940 +copy_from_user(void *to, const void __user *from, unsigned long n)
13941 +{
13942 + size_t sz = __compiletime_object_size(to);
13943 +
13944 + if (unlikely(sz != (size_t)-1 && sz < n))
13945 copy_from_user_overflow();
13946 -
13947 + else if (access_ok(VERIFY_READ, from, n))
13948 + n = __copy_from_user(to, from, n);
13949 + else if ((long)n > 0) {
13950 + if (!__builtin_constant_p(n))
13951 + check_object_size(to, n, false);
13952 + memset(to, 0, n);
13953 + }
13954 return n;
13955 }
13956
13957 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13958 index 142810c..4389299 100644
13959 --- a/arch/x86/include/asm/uaccess_64.h
13960 +++ b/arch/x86/include/asm/uaccess_64.h
13961 @@ -10,6 +10,9 @@
13962 #include <asm/alternative.h>
13963 #include <asm/cpufeature.h>
13964 #include <asm/page.h>
13965 +#include <asm/pgtable.h>
13966 +
13967 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13968
13969 /*
13970 * Copy To/From Userspace
13971 @@ -17,13 +20,13 @@
13972
13973 /* Handles exceptions in both to and from, but doesn't do access_ok */
13974 __must_check unsigned long
13975 -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
13976 +copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
13977 __must_check unsigned long
13978 -copy_user_generic_string(void *to, const void *from, unsigned len);
13979 +copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
13980 __must_check unsigned long
13981 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13982 +copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
13983
13984 -static __always_inline __must_check unsigned long
13985 +static __always_inline __must_check __size_overflow(3) unsigned long
13986 copy_user_generic(void *to, const void *from, unsigned len)
13987 {
13988 unsigned ret;
13989 @@ -41,142 +44,205 @@ copy_user_generic(void *to, const void *from, unsigned len)
13990 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13991 "=d" (len)),
13992 "1" (to), "2" (from), "3" (len)
13993 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13994 + : "memory", "rcx", "r8", "r9", "r11");
13995 return ret;
13996 }
13997
13998 +static __always_inline __must_check unsigned long
13999 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
14000 +static __always_inline __must_check unsigned long
14001 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
14002 __must_check unsigned long
14003 -_copy_to_user(void __user *to, const void *from, unsigned len);
14004 -__must_check unsigned long
14005 -_copy_from_user(void *to, const void __user *from, unsigned len);
14006 -__must_check unsigned long
14007 -copy_in_user(void __user *to, const void __user *from, unsigned len);
14008 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
14009 +
14010 +extern void copy_to_user_overflow(void)
14011 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14012 + __compiletime_error("copy_to_user() buffer size is not provably correct")
14013 +#else
14014 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
14015 +#endif
14016 +;
14017 +
14018 +extern void copy_from_user_overflow(void)
14019 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14020 + __compiletime_error("copy_from_user() buffer size is not provably correct")
14021 +#else
14022 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
14023 +#endif
14024 +;
14025
14026 static inline unsigned long __must_check copy_from_user(void *to,
14027 const void __user *from,
14028 unsigned long n)
14029 {
14030 - int sz = __compiletime_object_size(to);
14031 -
14032 might_fault();
14033 - if (likely(sz == -1 || sz >= n))
14034 - n = _copy_from_user(to, from, n);
14035 -#ifdef CONFIG_DEBUG_VM
14036 - else
14037 - WARN(1, "Buffer overflow detected!\n");
14038 -#endif
14039 +
14040 + if (access_ok(VERIFY_READ, from, n))
14041 + n = __copy_from_user(to, from, n);
14042 + else if (n < INT_MAX) {
14043 + if (!__builtin_constant_p(n))
14044 + check_object_size(to, n, false);
14045 + memset(to, 0, n);
14046 + }
14047 return n;
14048 }
14049
14050 static __always_inline __must_check
14051 -int copy_to_user(void __user *dst, const void *src, unsigned size)
14052 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
14053 {
14054 might_fault();
14055
14056 - return _copy_to_user(dst, src, size);
14057 + if (access_ok(VERIFY_WRITE, dst, size))
14058 + size = __copy_to_user(dst, src, size);
14059 + return size;
14060 }
14061
14062 static __always_inline __must_check
14063 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
14064 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
14065 {
14066 - int ret = 0;
14067 + size_t sz = __compiletime_object_size(dst);
14068 + unsigned ret = 0;
14069
14070 might_fault();
14071 - if (!__builtin_constant_p(size))
14072 - return copy_user_generic(dst, (__force void *)src, size);
14073 +
14074 + if (size > INT_MAX)
14075 + return size;
14076 +
14077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14078 + if (!__access_ok(VERIFY_READ, src, size))
14079 + return size;
14080 +#endif
14081 +
14082 + if (unlikely(sz != (size_t)-1 && sz < size)) {
14083 + copy_from_user_overflow();
14084 + return size;
14085 + }
14086 +
14087 + if (!__builtin_constant_p(size)) {
14088 + check_object_size(dst, size, false);
14089 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14090 + }
14091 switch (size) {
14092 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
14093 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
14094 ret, "b", "b", "=q", 1);
14095 return ret;
14096 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
14097 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
14098 ret, "w", "w", "=r", 2);
14099 return ret;
14100 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
14101 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
14102 ret, "l", "k", "=r", 4);
14103 return ret;
14104 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
14105 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14106 ret, "q", "", "=r", 8);
14107 return ret;
14108 case 10:
14109 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14110 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14111 ret, "q", "", "=r", 10);
14112 if (unlikely(ret))
14113 return ret;
14114 __get_user_asm(*(u16 *)(8 + (char *)dst),
14115 - (u16 __user *)(8 + (char __user *)src),
14116 + (const u16 __user *)(8 + (const char __user *)src),
14117 ret, "w", "w", "=r", 2);
14118 return ret;
14119 case 16:
14120 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14121 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14122 ret, "q", "", "=r", 16);
14123 if (unlikely(ret))
14124 return ret;
14125 __get_user_asm(*(u64 *)(8 + (char *)dst),
14126 - (u64 __user *)(8 + (char __user *)src),
14127 + (const u64 __user *)(8 + (const char __user *)src),
14128 ret, "q", "", "=r", 8);
14129 return ret;
14130 default:
14131 - return copy_user_generic(dst, (__force void *)src, size);
14132 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14133 }
14134 }
14135
14136 static __always_inline __must_check
14137 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
14138 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
14139 {
14140 - int ret = 0;
14141 + size_t sz = __compiletime_object_size(src);
14142 + unsigned ret = 0;
14143
14144 might_fault();
14145 - if (!__builtin_constant_p(size))
14146 - return copy_user_generic((__force void *)dst, src, size);
14147 +
14148 + if (size > INT_MAX)
14149 + return size;
14150 +
14151 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14152 + if (!__access_ok(VERIFY_WRITE, dst, size))
14153 + return size;
14154 +#endif
14155 +
14156 + if (unlikely(sz != (size_t)-1 && sz < size)) {
14157 + copy_to_user_overflow();
14158 + return size;
14159 + }
14160 +
14161 + if (!__builtin_constant_p(size)) {
14162 + check_object_size(src, size, true);
14163 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14164 + }
14165 switch (size) {
14166 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
14167 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
14168 ret, "b", "b", "iq", 1);
14169 return ret;
14170 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
14171 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
14172 ret, "w", "w", "ir", 2);
14173 return ret;
14174 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
14175 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
14176 ret, "l", "k", "ir", 4);
14177 return ret;
14178 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
14179 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14180 ret, "q", "", "er", 8);
14181 return ret;
14182 case 10:
14183 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14184 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14185 ret, "q", "", "er", 10);
14186 if (unlikely(ret))
14187 return ret;
14188 asm("":::"memory");
14189 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
14190 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
14191 ret, "w", "w", "ir", 2);
14192 return ret;
14193 case 16:
14194 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14195 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14196 ret, "q", "", "er", 16);
14197 if (unlikely(ret))
14198 return ret;
14199 asm("":::"memory");
14200 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
14201 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
14202 ret, "q", "", "er", 8);
14203 return ret;
14204 default:
14205 - return copy_user_generic((__force void *)dst, src, size);
14206 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14207 }
14208 }
14209
14210 static __always_inline __must_check
14211 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14212 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
14213 {
14214 - int ret = 0;
14215 + unsigned ret = 0;
14216
14217 might_fault();
14218 +
14219 + if (size > INT_MAX)
14220 + return size;
14221 +
14222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14223 + if (!__access_ok(VERIFY_READ, src, size))
14224 + return size;
14225 + if (!__access_ok(VERIFY_WRITE, dst, size))
14226 + return size;
14227 +#endif
14228 +
14229 if (!__builtin_constant_p(size))
14230 - return copy_user_generic((__force void *)dst,
14231 - (__force void *)src, size);
14232 + return copy_user_generic((__force_kernel void *)____m(dst),
14233 + (__force_kernel const void *)____m(src), size);
14234 switch (size) {
14235 case 1: {
14236 u8 tmp;
14237 - __get_user_asm(tmp, (u8 __user *)src,
14238 + __get_user_asm(tmp, (const u8 __user *)src,
14239 ret, "b", "b", "=q", 1);
14240 if (likely(!ret))
14241 __put_user_asm(tmp, (u8 __user *)dst,
14242 @@ -185,7 +251,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14243 }
14244 case 2: {
14245 u16 tmp;
14246 - __get_user_asm(tmp, (u16 __user *)src,
14247 + __get_user_asm(tmp, (const u16 __user *)src,
14248 ret, "w", "w", "=r", 2);
14249 if (likely(!ret))
14250 __put_user_asm(tmp, (u16 __user *)dst,
14251 @@ -195,7 +261,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14252
14253 case 4: {
14254 u32 tmp;
14255 - __get_user_asm(tmp, (u32 __user *)src,
14256 + __get_user_asm(tmp, (const u32 __user *)src,
14257 ret, "l", "k", "=r", 4);
14258 if (likely(!ret))
14259 __put_user_asm(tmp, (u32 __user *)dst,
14260 @@ -204,7 +270,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14261 }
14262 case 8: {
14263 u64 tmp;
14264 - __get_user_asm(tmp, (u64 __user *)src,
14265 + __get_user_asm(tmp, (const u64 __user *)src,
14266 ret, "q", "", "=r", 8);
14267 if (likely(!ret))
14268 __put_user_asm(tmp, (u64 __user *)dst,
14269 @@ -212,41 +278,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14270 return ret;
14271 }
14272 default:
14273 - return copy_user_generic((__force void *)dst,
14274 - (__force void *)src, size);
14275 + return copy_user_generic((__force_kernel void *)____m(dst),
14276 + (__force_kernel const void *)____m(src), size);
14277 }
14278 }
14279
14280 static __must_check __always_inline int
14281 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
14282 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14283 {
14284 - return copy_user_generic(dst, (__force const void *)src, size);
14285 + if (size > INT_MAX)
14286 + return size;
14287 +
14288 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14289 + if (!__access_ok(VERIFY_READ, src, size))
14290 + return size;
14291 +#endif
14292 +
14293 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14294 }
14295
14296 -static __must_check __always_inline int
14297 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14298 +static __must_check __always_inline unsigned long
14299 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14300 {
14301 - return copy_user_generic((__force void *)dst, src, size);
14302 + if (size > INT_MAX)
14303 + return size;
14304 +
14305 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14306 + if (!__access_ok(VERIFY_WRITE, dst, size))
14307 + return size;
14308 +#endif
14309 +
14310 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14311 }
14312
14313 -extern long __copy_user_nocache(void *dst, const void __user *src,
14314 - unsigned size, int zerorest);
14315 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14316 + unsigned long size, int zerorest) __size_overflow(3);
14317
14318 -static inline int
14319 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14320 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14321 {
14322 might_sleep();
14323 +
14324 + if (size > INT_MAX)
14325 + return size;
14326 +
14327 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14328 + if (!__access_ok(VERIFY_READ, src, size))
14329 + return size;
14330 +#endif
14331 +
14332 return __copy_user_nocache(dst, src, size, 1);
14333 }
14334
14335 -static inline int
14336 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14337 - unsigned size)
14338 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14339 + unsigned long size)
14340 {
14341 + if (size > INT_MAX)
14342 + return size;
14343 +
14344 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14345 + if (!__access_ok(VERIFY_READ, src, size))
14346 + return size;
14347 +#endif
14348 +
14349 return __copy_user_nocache(dst, src, size, 0);
14350 }
14351
14352 -unsigned long
14353 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14354 +extern unsigned long
14355 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14356
14357 #endif /* _ASM_X86_UACCESS_64_H */
14358 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
14359 index 5b238981..77fdd78 100644
14360 --- a/arch/x86/include/asm/word-at-a-time.h
14361 +++ b/arch/x86/include/asm/word-at-a-time.h
14362 @@ -11,7 +11,7 @@
14363 * and shift, for example.
14364 */
14365 struct word_at_a_time {
14366 - const unsigned long one_bits, high_bits;
14367 + unsigned long one_bits, high_bits;
14368 };
14369
14370 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
14371 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14372 index 5769349..d49a4dd 100644
14373 --- a/arch/x86/include/asm/x86_init.h
14374 +++ b/arch/x86/include/asm/x86_init.h
14375 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
14376 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
14377 void (*find_smp_config)(void);
14378 void (*get_smp_config)(unsigned int early);
14379 -};
14380 +} __no_const;
14381
14382 /**
14383 * struct x86_init_resources - platform specific resource related ops
14384 @@ -43,7 +43,7 @@ struct x86_init_resources {
14385 void (*probe_roms)(void);
14386 void (*reserve_resources)(void);
14387 char *(*memory_setup)(void);
14388 -};
14389 +} __no_const;
14390
14391 /**
14392 * struct x86_init_irqs - platform specific interrupt setup
14393 @@ -56,7 +56,7 @@ struct x86_init_irqs {
14394 void (*pre_vector_init)(void);
14395 void (*intr_init)(void);
14396 void (*trap_init)(void);
14397 -};
14398 +} __no_const;
14399
14400 /**
14401 * struct x86_init_oem - oem platform specific customizing functions
14402 @@ -66,7 +66,7 @@ struct x86_init_irqs {
14403 struct x86_init_oem {
14404 void (*arch_setup)(void);
14405 void (*banner)(void);
14406 -};
14407 +} __no_const;
14408
14409 /**
14410 * struct x86_init_mapping - platform specific initial kernel pagetable setup
14411 @@ -77,7 +77,7 @@ struct x86_init_oem {
14412 */
14413 struct x86_init_mapping {
14414 void (*pagetable_reserve)(u64 start, u64 end);
14415 -};
14416 +} __no_const;
14417
14418 /**
14419 * struct x86_init_paging - platform specific paging functions
14420 @@ -88,7 +88,7 @@ struct x86_init_mapping {
14421 */
14422 struct x86_init_paging {
14423 void (*pagetable_init)(void);
14424 -};
14425 +} __no_const;
14426
14427 /**
14428 * struct x86_init_timers - platform specific timer setup
14429 @@ -103,7 +103,7 @@ struct x86_init_timers {
14430 void (*tsc_pre_init)(void);
14431 void (*timer_init)(void);
14432 void (*wallclock_init)(void);
14433 -};
14434 +} __no_const;
14435
14436 /**
14437 * struct x86_init_iommu - platform specific iommu setup
14438 @@ -111,7 +111,7 @@ struct x86_init_timers {
14439 */
14440 struct x86_init_iommu {
14441 int (*iommu_init)(void);
14442 -};
14443 +} __no_const;
14444
14445 /**
14446 * struct x86_init_pci - platform specific pci init functions
14447 @@ -125,7 +125,7 @@ struct x86_init_pci {
14448 int (*init)(void);
14449 void (*init_irq)(void);
14450 void (*fixup_irqs)(void);
14451 -};
14452 +} __no_const;
14453
14454 /**
14455 * struct x86_init_ops - functions for platform specific setup
14456 @@ -141,7 +141,7 @@ struct x86_init_ops {
14457 struct x86_init_timers timers;
14458 struct x86_init_iommu iommu;
14459 struct x86_init_pci pci;
14460 -};
14461 +} __no_const;
14462
14463 /**
14464 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14465 @@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
14466 void (*setup_percpu_clockev)(void);
14467 void (*early_percpu_clock_init)(void);
14468 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
14469 -};
14470 +} __no_const;
14471
14472 /**
14473 * struct x86_platform_ops - platform specific runtime functions
14474 @@ -178,7 +178,7 @@ struct x86_platform_ops {
14475 void (*save_sched_clock_state)(void);
14476 void (*restore_sched_clock_state)(void);
14477 void (*apic_post_init)(void);
14478 -};
14479 +} __no_const;
14480
14481 struct pci_dev;
14482
14483 @@ -187,14 +187,14 @@ struct x86_msi_ops {
14484 void (*teardown_msi_irq)(unsigned int irq);
14485 void (*teardown_msi_irqs)(struct pci_dev *dev);
14486 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
14487 -};
14488 +} __no_const;
14489
14490 struct x86_io_apic_ops {
14491 void (*init) (void);
14492 unsigned int (*read) (unsigned int apic, unsigned int reg);
14493 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
14494 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
14495 -};
14496 +} __no_const;
14497
14498 extern struct x86_init_ops x86_init;
14499 extern struct x86_cpuinit_ops x86_cpuinit;
14500 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14501 index 0415cda..b43d877 100644
14502 --- a/arch/x86/include/asm/xsave.h
14503 +++ b/arch/x86/include/asm/xsave.h
14504 @@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14505 return -EFAULT;
14506
14507 __asm__ __volatile__(ASM_STAC "\n"
14508 - "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14509 + "1:"
14510 + __copyuser_seg
14511 + ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
14512 "2: " ASM_CLAC "\n"
14513 ".section .fixup,\"ax\"\n"
14514 "3: movl $-1,%[err]\n"
14515 @@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14516 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14517 {
14518 int err;
14519 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14520 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14521 u32 lmask = mask;
14522 u32 hmask = mask >> 32;
14523
14524 __asm__ __volatile__(ASM_STAC "\n"
14525 - "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14526 + "1:"
14527 + __copyuser_seg
14528 + ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14529 "2: " ASM_CLAC "\n"
14530 ".section .fixup,\"ax\"\n"
14531 "3: movl $-1,%[err]\n"
14532 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
14533 index 91ce48f..a48ea05 100644
14534 --- a/arch/x86/kernel/Makefile
14535 +++ b/arch/x86/kernel/Makefile
14536 @@ -23,7 +23,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
14537 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
14538 obj-$(CONFIG_IRQ_WORK) += irq_work.o
14539 obj-y += probe_roms.o
14540 -obj-$(CONFIG_X86_32) += i386_ksyms_32.o
14541 +obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
14542 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
14543 obj-y += syscall_$(BITS).o
14544 obj-$(CONFIG_X86_64) += vsyscall_64.o
14545 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14546 index 11676cf..a8cf3ec 100644
14547 --- a/arch/x86/kernel/acpi/sleep.c
14548 +++ b/arch/x86/kernel/acpi/sleep.c
14549 @@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
14550 #else /* CONFIG_64BIT */
14551 #ifdef CONFIG_SMP
14552 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14553 +
14554 + pax_open_kernel();
14555 early_gdt_descr.address =
14556 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14557 + pax_close_kernel();
14558 +
14559 initial_gs = per_cpu_offset(smp_processor_id());
14560 #endif
14561 initial_code = (unsigned long)wakeup_long64;
14562 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14563 index 13ab720..95d5442 100644
14564 --- a/arch/x86/kernel/acpi/wakeup_32.S
14565 +++ b/arch/x86/kernel/acpi/wakeup_32.S
14566 @@ -30,13 +30,11 @@ wakeup_pmode_return:
14567 # and restore the stack ... but you need gdt for this to work
14568 movl saved_context_esp, %esp
14569
14570 - movl %cs:saved_magic, %eax
14571 - cmpl $0x12345678, %eax
14572 + cmpl $0x12345678, saved_magic
14573 jne bogus_magic
14574
14575 # jump to place where we left off
14576 - movl saved_eip, %eax
14577 - jmp *%eax
14578 + jmp *(saved_eip)
14579
14580 bogus_magic:
14581 jmp bogus_magic
14582 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14583 index ef5ccca..bd83949 100644
14584 --- a/arch/x86/kernel/alternative.c
14585 +++ b/arch/x86/kernel/alternative.c
14586 @@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
14587 */
14588 for (a = start; a < end; a++) {
14589 instr = (u8 *)&a->instr_offset + a->instr_offset;
14590 +
14591 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14592 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14593 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
14594 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14595 +#endif
14596 +
14597 replacement = (u8 *)&a->repl_offset + a->repl_offset;
14598 BUG_ON(a->replacementlen > a->instrlen);
14599 BUG_ON(a->instrlen > sizeof(insnbuf));
14600 @@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
14601 for (poff = start; poff < end; poff++) {
14602 u8 *ptr = (u8 *)poff + *poff;
14603
14604 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14605 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14606 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14607 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14608 +#endif
14609 +
14610 if (!*poff || ptr < text || ptr >= text_end)
14611 continue;
14612 /* turn DS segment override prefix into lock prefix */
14613 - if (*ptr == 0x3e)
14614 + if (*ktla_ktva(ptr) == 0x3e)
14615 text_poke(ptr, ((unsigned char []){0xf0}), 1);
14616 }
14617 mutex_unlock(&text_mutex);
14618 @@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
14619 for (poff = start; poff < end; poff++) {
14620 u8 *ptr = (u8 *)poff + *poff;
14621
14622 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14623 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14624 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14625 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14626 +#endif
14627 +
14628 if (!*poff || ptr < text || ptr >= text_end)
14629 continue;
14630 /* turn lock prefix into DS segment override prefix */
14631 - if (*ptr == 0xf0)
14632 + if (*ktla_ktva(ptr) == 0xf0)
14633 text_poke(ptr, ((unsigned char []){0x3E}), 1);
14634 }
14635 mutex_unlock(&text_mutex);
14636 @@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14637
14638 BUG_ON(p->len > MAX_PATCH_LEN);
14639 /* prep the buffer with the original instructions */
14640 - memcpy(insnbuf, p->instr, p->len);
14641 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14642 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14643 (unsigned long)p->instr, p->len);
14644
14645 @@ -515,7 +534,7 @@ void __init alternative_instructions(void)
14646 if (!uniproc_patched || num_possible_cpus() == 1)
14647 free_init_pages("SMP alternatives",
14648 (unsigned long)__smp_locks,
14649 - (unsigned long)__smp_locks_end);
14650 + PAGE_ALIGN((unsigned long)__smp_locks_end));
14651 #endif
14652
14653 apply_paravirt(__parainstructions, __parainstructions_end);
14654 @@ -535,13 +554,17 @@ void __init alternative_instructions(void)
14655 * instructions. And on the local CPU you need to be protected again NMI or MCE
14656 * handlers seeing an inconsistent instruction while you patch.
14657 */
14658 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
14659 +void *__kprobes text_poke_early(void *addr, const void *opcode,
14660 size_t len)
14661 {
14662 unsigned long flags;
14663 local_irq_save(flags);
14664 - memcpy(addr, opcode, len);
14665 +
14666 + pax_open_kernel();
14667 + memcpy(ktla_ktva(addr), opcode, len);
14668 sync_core();
14669 + pax_close_kernel();
14670 +
14671 local_irq_restore(flags);
14672 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14673 that causes hangs on some VIA CPUs. */
14674 @@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
14675 */
14676 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14677 {
14678 - unsigned long flags;
14679 - char *vaddr;
14680 + unsigned char *vaddr = ktla_ktva(addr);
14681 struct page *pages[2];
14682 - int i;
14683 + size_t i;
14684
14685 if (!core_kernel_text((unsigned long)addr)) {
14686 - pages[0] = vmalloc_to_page(addr);
14687 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14688 + pages[0] = vmalloc_to_page(vaddr);
14689 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14690 } else {
14691 - pages[0] = virt_to_page(addr);
14692 + pages[0] = virt_to_page(vaddr);
14693 WARN_ON(!PageReserved(pages[0]));
14694 - pages[1] = virt_to_page(addr + PAGE_SIZE);
14695 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14696 }
14697 BUG_ON(!pages[0]);
14698 - local_irq_save(flags);
14699 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14700 - if (pages[1])
14701 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14702 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14703 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14704 - clear_fixmap(FIX_TEXT_POKE0);
14705 - if (pages[1])
14706 - clear_fixmap(FIX_TEXT_POKE1);
14707 - local_flush_tlb();
14708 - sync_core();
14709 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
14710 - that causes hangs on some VIA CPUs. */
14711 + text_poke_early(addr, opcode, len);
14712 for (i = 0; i < len; i++)
14713 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14714 - local_irq_restore(flags);
14715 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14716 return addr;
14717 }
14718
14719 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14720 index b17416e..be6e5dc 100644
14721 --- a/arch/x86/kernel/apic/apic.c
14722 +++ b/arch/x86/kernel/apic/apic.c
14723 @@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
14724 /*
14725 * Debug level, exported for io_apic.c
14726 */
14727 -unsigned int apic_verbosity;
14728 +int apic_verbosity;
14729
14730 int pic_mode;
14731
14732 @@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14733 apic_write(APIC_ESR, 0);
14734 v1 = apic_read(APIC_ESR);
14735 ack_APIC_irq();
14736 - atomic_inc(&irq_err_count);
14737 + atomic_inc_unchecked(&irq_err_count);
14738
14739 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
14740 smp_processor_id(), v0 , v1);
14741 @@ -2155,7 +2155,9 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
14742 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
14743 /* Should happen once for each apic */
14744 WARN_ON((*drv)->eoi_write == eoi_write);
14745 - (*drv)->eoi_write = eoi_write;
14746 + pax_open_kernel();
14747 + *(void **)&(*drv)->eoi_write = eoi_write;
14748 + pax_close_kernel();
14749 }
14750 }
14751
14752 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14753 index 1817fa9..7bff097 100644
14754 --- a/arch/x86/kernel/apic/io_apic.c
14755 +++ b/arch/x86/kernel/apic/io_apic.c
14756 @@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14757 }
14758 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14759
14760 -void lock_vector_lock(void)
14761 +void lock_vector_lock(void) __acquires(vector_lock)
14762 {
14763 /* Used to the online set of cpus does not change
14764 * during assign_irq_vector.
14765 @@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
14766 raw_spin_lock(&vector_lock);
14767 }
14768
14769 -void unlock_vector_lock(void)
14770 +void unlock_vector_lock(void) __releases(vector_lock)
14771 {
14772 raw_spin_unlock(&vector_lock);
14773 }
14774 @@ -2411,7 +2411,7 @@ static void ack_apic_edge(struct irq_data *data)
14775 ack_APIC_irq();
14776 }
14777
14778 -atomic_t irq_mis_count;
14779 +atomic_unchecked_t irq_mis_count;
14780
14781 #ifdef CONFIG_GENERIC_PENDING_IRQ
14782 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
14783 @@ -2552,7 +2552,7 @@ static void ack_apic_level(struct irq_data *data)
14784 * at the cpu.
14785 */
14786 if (!(v & (1 << (i & 0x1f)))) {
14787 - atomic_inc(&irq_mis_count);
14788 + atomic_inc_unchecked(&irq_mis_count);
14789
14790 eoi_ioapic_irq(irq, cfg);
14791 }
14792 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14793 index d65464e..1035d31 100644
14794 --- a/arch/x86/kernel/apm_32.c
14795 +++ b/arch/x86/kernel/apm_32.c
14796 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
14797 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14798 * even though they are called in protected mode.
14799 */
14800 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14801 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14802 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14803
14804 static const char driver_version[] = "1.16ac"; /* no spaces */
14805 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
14806 BUG_ON(cpu != 0);
14807 gdt = get_cpu_gdt_table(cpu);
14808 save_desc_40 = gdt[0x40 / 8];
14809 +
14810 + pax_open_kernel();
14811 gdt[0x40 / 8] = bad_bios_desc;
14812 + pax_close_kernel();
14813
14814 apm_irq_save(flags);
14815 APM_DO_SAVE_SEGS;
14816 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
14817 &call->esi);
14818 APM_DO_RESTORE_SEGS;
14819 apm_irq_restore(flags);
14820 +
14821 + pax_open_kernel();
14822 gdt[0x40 / 8] = save_desc_40;
14823 + pax_close_kernel();
14824 +
14825 put_cpu();
14826
14827 return call->eax & 0xff;
14828 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
14829 BUG_ON(cpu != 0);
14830 gdt = get_cpu_gdt_table(cpu);
14831 save_desc_40 = gdt[0x40 / 8];
14832 +
14833 + pax_open_kernel();
14834 gdt[0x40 / 8] = bad_bios_desc;
14835 + pax_close_kernel();
14836
14837 apm_irq_save(flags);
14838 APM_DO_SAVE_SEGS;
14839 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
14840 &call->eax);
14841 APM_DO_RESTORE_SEGS;
14842 apm_irq_restore(flags);
14843 +
14844 + pax_open_kernel();
14845 gdt[0x40 / 8] = save_desc_40;
14846 + pax_close_kernel();
14847 +
14848 put_cpu();
14849 return error;
14850 }
14851 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14852 * code to that CPU.
14853 */
14854 gdt = get_cpu_gdt_table(0);
14855 +
14856 + pax_open_kernel();
14857 set_desc_base(&gdt[APM_CS >> 3],
14858 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14859 set_desc_base(&gdt[APM_CS_16 >> 3],
14860 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14861 set_desc_base(&gdt[APM_DS >> 3],
14862 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14863 + pax_close_kernel();
14864
14865 proc_create("apm", 0, NULL, &apm_file_ops);
14866
14867 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14868 index 2861082..6d4718e 100644
14869 --- a/arch/x86/kernel/asm-offsets.c
14870 +++ b/arch/x86/kernel/asm-offsets.c
14871 @@ -33,6 +33,8 @@ void common(void) {
14872 OFFSET(TI_status, thread_info, status);
14873 OFFSET(TI_addr_limit, thread_info, addr_limit);
14874 OFFSET(TI_preempt_count, thread_info, preempt_count);
14875 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14876 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14877
14878 BLANK();
14879 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14880 @@ -53,8 +55,26 @@ void common(void) {
14881 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14882 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14883 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14884 +
14885 +#ifdef CONFIG_PAX_KERNEXEC
14886 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14887 #endif
14888
14889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14890 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14891 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14892 +#ifdef CONFIG_X86_64
14893 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14894 +#endif
14895 +#endif
14896 +
14897 +#endif
14898 +
14899 + BLANK();
14900 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14901 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14902 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14903 +
14904 #ifdef CONFIG_XEN
14905 BLANK();
14906 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14907 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14908 index 1b4754f..fbb4227 100644
14909 --- a/arch/x86/kernel/asm-offsets_64.c
14910 +++ b/arch/x86/kernel/asm-offsets_64.c
14911 @@ -76,6 +76,7 @@ int main(void)
14912 BLANK();
14913 #undef ENTRY
14914
14915 + DEFINE(TSS_size, sizeof(struct tss_struct));
14916 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14917 BLANK();
14918
14919 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14920 index a0e067d..9c7db16 100644
14921 --- a/arch/x86/kernel/cpu/Makefile
14922 +++ b/arch/x86/kernel/cpu/Makefile
14923 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14924 CFLAGS_REMOVE_perf_event.o = -pg
14925 endif
14926
14927 -# Make sure load_percpu_segment has no stackprotector
14928 -nostackp := $(call cc-option, -fno-stack-protector)
14929 -CFLAGS_common.o := $(nostackp)
14930 -
14931 obj-y := intel_cacheinfo.o scattered.o topology.o
14932 obj-y += proc.o capflags.o powerflags.o common.o
14933 obj-y += vmware.o hypervisor.o mshyperv.o
14934 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14935 index 1b7d165..b9e2627 100644
14936 --- a/arch/x86/kernel/cpu/amd.c
14937 +++ b/arch/x86/kernel/cpu/amd.c
14938 @@ -738,7 +738,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14939 unsigned int size)
14940 {
14941 /* AMD errata T13 (order #21922) */
14942 - if ((c->x86 == 6)) {
14943 + if (c->x86 == 6) {
14944 /* Duron Rev A0 */
14945 if (c->x86_model == 3 && c->x86_mask == 0)
14946 size = 64;
14947 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14948 index 7505f7b..d59dac0 100644
14949 --- a/arch/x86/kernel/cpu/common.c
14950 +++ b/arch/x86/kernel/cpu/common.c
14951 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14952
14953 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14954
14955 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14956 -#ifdef CONFIG_X86_64
14957 - /*
14958 - * We need valid kernel segments for data and code in long mode too
14959 - * IRET will check the segment types kkeil 2000/10/28
14960 - * Also sysret mandates a special GDT layout
14961 - *
14962 - * TLS descriptors are currently at a different place compared to i386.
14963 - * Hopefully nobody expects them at a fixed place (Wine?)
14964 - */
14965 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14966 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14967 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14968 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14969 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14970 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14971 -#else
14972 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14973 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14974 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14975 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14976 - /*
14977 - * Segments used for calling PnP BIOS have byte granularity.
14978 - * They code segments and data segments have fixed 64k limits,
14979 - * the transfer segment sizes are set at run time.
14980 - */
14981 - /* 32-bit code */
14982 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14983 - /* 16-bit code */
14984 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14985 - /* 16-bit data */
14986 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14987 - /* 16-bit data */
14988 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14989 - /* 16-bit data */
14990 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14991 - /*
14992 - * The APM segments have byte granularity and their bases
14993 - * are set at run time. All have 64k limits.
14994 - */
14995 - /* 32-bit code */
14996 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14997 - /* 16-bit code */
14998 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14999 - /* data */
15000 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
15001 -
15002 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15003 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15004 - GDT_STACK_CANARY_INIT
15005 -#endif
15006 -} };
15007 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
15008 -
15009 static int __init x86_xsave_setup(char *s)
15010 {
15011 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
15012 @@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
15013 {
15014 struct desc_ptr gdt_descr;
15015
15016 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
15017 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15018 gdt_descr.size = GDT_SIZE - 1;
15019 load_gdt(&gdt_descr);
15020 /* Reload the per-cpu base */
15021 @@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
15022 /* Filter out anything that depends on CPUID levels we don't have */
15023 filter_cpuid_features(c, true);
15024
15025 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15026 + setup_clear_cpu_cap(X86_FEATURE_SEP);
15027 +#endif
15028 +
15029 /* If the model name is still unset, do table lookup. */
15030 if (!c->x86_model_id[0]) {
15031 const char *p;
15032 @@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
15033 }
15034 __setup("clearcpuid=", setup_disablecpuid);
15035
15036 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
15037 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
15038 +
15039 #ifdef CONFIG_X86_64
15040 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
15041 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
15042 - (unsigned long) nmi_idt_table };
15043 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
15044
15045 DEFINE_PER_CPU_FIRST(union irq_stack_union,
15046 irq_stack_union) __aligned(PAGE_SIZE);
15047 @@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
15048 EXPORT_PER_CPU_SYMBOL(current_task);
15049
15050 DEFINE_PER_CPU(unsigned long, kernel_stack) =
15051 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
15052 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
15053 EXPORT_PER_CPU_SYMBOL(kernel_stack);
15054
15055 DEFINE_PER_CPU(char *, irq_stack_ptr) =
15056 @@ -1178,7 +1130,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
15057 {
15058 memset(regs, 0, sizeof(struct pt_regs));
15059 regs->fs = __KERNEL_PERCPU;
15060 - regs->gs = __KERNEL_STACK_CANARY;
15061 + savesegment(gs, regs->gs);
15062
15063 return regs;
15064 }
15065 @@ -1233,7 +1185,7 @@ void __cpuinit cpu_init(void)
15066 int i;
15067
15068 cpu = stack_smp_processor_id();
15069 - t = &per_cpu(init_tss, cpu);
15070 + t = init_tss + cpu;
15071 oist = &per_cpu(orig_ist, cpu);
15072
15073 #ifdef CONFIG_NUMA
15074 @@ -1259,7 +1211,7 @@ void __cpuinit cpu_init(void)
15075 switch_to_new_gdt(cpu);
15076 loadsegment(fs, 0);
15077
15078 - load_idt((const struct desc_ptr *)&idt_descr);
15079 + load_idt(&idt_descr);
15080
15081 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15082 syscall_init();
15083 @@ -1268,7 +1220,6 @@ void __cpuinit cpu_init(void)
15084 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15085 barrier();
15086
15087 - x86_configure_nx();
15088 if (cpu != 0)
15089 enable_x2apic();
15090
15091 @@ -1321,7 +1272,7 @@ void __cpuinit cpu_init(void)
15092 {
15093 int cpu = smp_processor_id();
15094 struct task_struct *curr = current;
15095 - struct tss_struct *t = &per_cpu(init_tss, cpu);
15096 + struct tss_struct *t = init_tss + cpu;
15097 struct thread_struct *thread = &curr->thread;
15098
15099 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15100 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15101 index 198e019..867575e 100644
15102 --- a/arch/x86/kernel/cpu/intel.c
15103 +++ b/arch/x86/kernel/cpu/intel.c
15104 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15105 * Update the IDT descriptor and reload the IDT so that
15106 * it uses the read-only mapped virtual address.
15107 */
15108 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15109 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15110 load_idt(&idt_descr);
15111 }
15112 #endif
15113 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15114 index 93c5451..3887433 100644
15115 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15116 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15117 @@ -983,6 +983,22 @@ static struct attribute *default_attrs[] = {
15118 };
15119
15120 #ifdef CONFIG_AMD_NB
15121 +static struct attribute *default_attrs_amd_nb[] = {
15122 + &type.attr,
15123 + &level.attr,
15124 + &coherency_line_size.attr,
15125 + &physical_line_partition.attr,
15126 + &ways_of_associativity.attr,
15127 + &number_of_sets.attr,
15128 + &size.attr,
15129 + &shared_cpu_map.attr,
15130 + &shared_cpu_list.attr,
15131 + NULL,
15132 + NULL,
15133 + NULL,
15134 + NULL
15135 +};
15136 +
15137 static struct attribute ** __cpuinit amd_l3_attrs(void)
15138 {
15139 static struct attribute **attrs;
15140 @@ -993,18 +1009,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
15141
15142 n = ARRAY_SIZE(default_attrs);
15143
15144 - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
15145 - n += 2;
15146 -
15147 - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
15148 - n += 1;
15149 -
15150 - attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
15151 - if (attrs == NULL)
15152 - return attrs = default_attrs;
15153 -
15154 - for (n = 0; default_attrs[n]; n++)
15155 - attrs[n] = default_attrs[n];
15156 + attrs = default_attrs_amd_nb;
15157
15158 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
15159 attrs[n++] = &cache_disable_0.attr;
15160 @@ -1055,6 +1060,13 @@ static struct kobj_type ktype_cache = {
15161 .default_attrs = default_attrs,
15162 };
15163
15164 +#ifdef CONFIG_AMD_NB
15165 +static struct kobj_type ktype_cache_amd_nb = {
15166 + .sysfs_ops = &sysfs_ops,
15167 + .default_attrs = default_attrs_amd_nb,
15168 +};
15169 +#endif
15170 +
15171 static struct kobj_type ktype_percpu_entry = {
15172 .sysfs_ops = &sysfs_ops,
15173 };
15174 @@ -1120,20 +1132,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
15175 return retval;
15176 }
15177
15178 +#ifdef CONFIG_AMD_NB
15179 + amd_l3_attrs();
15180 +#endif
15181 +
15182 for (i = 0; i < num_cache_leaves; i++) {
15183 + struct kobj_type *ktype;
15184 +
15185 this_object = INDEX_KOBJECT_PTR(cpu, i);
15186 this_object->cpu = cpu;
15187 this_object->index = i;
15188
15189 this_leaf = CPUID4_INFO_IDX(cpu, i);
15190
15191 - ktype_cache.default_attrs = default_attrs;
15192 + ktype = &ktype_cache;
15193 #ifdef CONFIG_AMD_NB
15194 if (this_leaf->base.nb)
15195 - ktype_cache.default_attrs = amd_l3_attrs();
15196 + ktype = &ktype_cache_amd_nb;
15197 #endif
15198 retval = kobject_init_and_add(&(this_object->kobj),
15199 - &ktype_cache,
15200 + ktype,
15201 per_cpu(ici_cache_kobject, cpu),
15202 "index%1lu", i);
15203 if (unlikely(retval)) {
15204 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15205 index 46cbf86..55c7292 100644
15206 --- a/arch/x86/kernel/cpu/mcheck/mce.c
15207 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
15208 @@ -45,6 +45,7 @@
15209 #include <asm/processor.h>
15210 #include <asm/mce.h>
15211 #include <asm/msr.h>
15212 +#include <asm/local.h>
15213
15214 #include "mce-internal.h"
15215
15216 @@ -254,7 +255,7 @@ static void print_mce(struct mce *m)
15217 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15218 m->cs, m->ip);
15219
15220 - if (m->cs == __KERNEL_CS)
15221 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15222 print_symbol("{%s}", m->ip);
15223 pr_cont("\n");
15224 }
15225 @@ -287,10 +288,10 @@ static void print_mce(struct mce *m)
15226
15227 #define PANIC_TIMEOUT 5 /* 5 seconds */
15228
15229 -static atomic_t mce_paniced;
15230 +static atomic_unchecked_t mce_paniced;
15231
15232 static int fake_panic;
15233 -static atomic_t mce_fake_paniced;
15234 +static atomic_unchecked_t mce_fake_paniced;
15235
15236 /* Panic in progress. Enable interrupts and wait for final IPI */
15237 static void wait_for_panic(void)
15238 @@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15239 /*
15240 * Make sure only one CPU runs in machine check panic
15241 */
15242 - if (atomic_inc_return(&mce_paniced) > 1)
15243 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15244 wait_for_panic();
15245 barrier();
15246
15247 @@ -322,7 +323,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15248 console_verbose();
15249 } else {
15250 /* Don't log too much for fake panic */
15251 - if (atomic_inc_return(&mce_fake_paniced) > 1)
15252 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15253 return;
15254 }
15255 /* First print corrected ones that are still unlogged */
15256 @@ -694,7 +695,7 @@ static int mce_timed_out(u64 *t)
15257 * might have been modified by someone else.
15258 */
15259 rmb();
15260 - if (atomic_read(&mce_paniced))
15261 + if (atomic_read_unchecked(&mce_paniced))
15262 wait_for_panic();
15263 if (!monarch_timeout)
15264 goto out;
15265 @@ -1659,7 +1660,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15266 }
15267
15268 /* Call the installed machine check handler for this CPU setup. */
15269 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
15270 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15271 unexpected_machine_check;
15272
15273 /*
15274 @@ -1682,7 +1683,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15275 return;
15276 }
15277
15278 + pax_open_kernel();
15279 machine_check_vector = do_machine_check;
15280 + pax_close_kernel();
15281
15282 __mcheck_cpu_init_generic();
15283 __mcheck_cpu_init_vendor(c);
15284 @@ -1696,7 +1699,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15285 */
15286
15287 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
15288 -static int mce_chrdev_open_count; /* #times opened */
15289 +static local_t mce_chrdev_open_count; /* #times opened */
15290 static int mce_chrdev_open_exclu; /* already open exclusive? */
15291
15292 static int mce_chrdev_open(struct inode *inode, struct file *file)
15293 @@ -1704,7 +1707,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15294 spin_lock(&mce_chrdev_state_lock);
15295
15296 if (mce_chrdev_open_exclu ||
15297 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
15298 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
15299 spin_unlock(&mce_chrdev_state_lock);
15300
15301 return -EBUSY;
15302 @@ -1712,7 +1715,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15303
15304 if (file->f_flags & O_EXCL)
15305 mce_chrdev_open_exclu = 1;
15306 - mce_chrdev_open_count++;
15307 + local_inc(&mce_chrdev_open_count);
15308
15309 spin_unlock(&mce_chrdev_state_lock);
15310
15311 @@ -1723,7 +1726,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
15312 {
15313 spin_lock(&mce_chrdev_state_lock);
15314
15315 - mce_chrdev_open_count--;
15316 + local_dec(&mce_chrdev_open_count);
15317 mce_chrdev_open_exclu = 0;
15318
15319 spin_unlock(&mce_chrdev_state_lock);
15320 @@ -2367,7 +2370,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
15321 return NOTIFY_OK;
15322 }
15323
15324 -static struct notifier_block mce_cpu_notifier __cpuinitdata = {
15325 +static struct notifier_block mce_cpu_notifier __cpuinitconst = {
15326 .notifier_call = mce_cpu_callback,
15327 };
15328
15329 @@ -2445,7 +2448,7 @@ struct dentry *mce_get_debugfs_dir(void)
15330 static void mce_reset(void)
15331 {
15332 cpu_missing = 0;
15333 - atomic_set(&mce_fake_paniced, 0);
15334 + atomic_set_unchecked(&mce_fake_paniced, 0);
15335 atomic_set(&mce_executing, 0);
15336 atomic_set(&mce_callin, 0);
15337 atomic_set(&global_nwo, 0);
15338 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15339 index 2d5454c..51987eb 100644
15340 --- a/arch/x86/kernel/cpu/mcheck/p5.c
15341 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
15342 @@ -11,6 +11,7 @@
15343 #include <asm/processor.h>
15344 #include <asm/mce.h>
15345 #include <asm/msr.h>
15346 +#include <asm/pgtable.h>
15347
15348 /* By default disabled */
15349 int mce_p5_enabled __read_mostly;
15350 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15351 if (!cpu_has(c, X86_FEATURE_MCE))
15352 return;
15353
15354 + pax_open_kernel();
15355 machine_check_vector = pentium_machine_check;
15356 + pax_close_kernel();
15357 /* Make sure the vector pointer is visible before we enable MCEs: */
15358 wmb();
15359
15360 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15361 index 2d7998f..17c9de1 100644
15362 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
15363 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15364 @@ -10,6 +10,7 @@
15365 #include <asm/processor.h>
15366 #include <asm/mce.h>
15367 #include <asm/msr.h>
15368 +#include <asm/pgtable.h>
15369
15370 /* Machine check handler for WinChip C6: */
15371 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15372 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15373 {
15374 u32 lo, hi;
15375
15376 + pax_open_kernel();
15377 machine_check_vector = winchip_machine_check;
15378 + pax_close_kernel();
15379 /* Make sure the vector pointer is visible before we enable MCEs: */
15380 wmb();
15381
15382 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15383 index 6b96110..0da73eb 100644
15384 --- a/arch/x86/kernel/cpu/mtrr/main.c
15385 +++ b/arch/x86/kernel/cpu/mtrr/main.c
15386 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
15387 u64 size_or_mask, size_and_mask;
15388 static bool mtrr_aps_delayed_init;
15389
15390 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15391 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15392
15393 const struct mtrr_ops *mtrr_if;
15394
15395 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15396 index df5e41f..816c719 100644
15397 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15398 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15399 @@ -25,7 +25,7 @@ struct mtrr_ops {
15400 int (*validate_add_page)(unsigned long base, unsigned long size,
15401 unsigned int type);
15402 int (*have_wrcomb)(void);
15403 -};
15404 +} __do_const;
15405
15406 extern int generic_get_free_region(unsigned long base, unsigned long size,
15407 int replace_reg);
15408 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15409 index 4a3374e..1ca3ecb 100644
15410 --- a/arch/x86/kernel/cpu/perf_event.c
15411 +++ b/arch/x86/kernel/cpu/perf_event.c
15412 @@ -1765,7 +1765,7 @@ static unsigned long get_segment_base(unsigned int segment)
15413 if (idx > GDT_ENTRIES)
15414 return 0;
15415
15416 - desc = __this_cpu_ptr(&gdt_page.gdt[0]);
15417 + desc = get_cpu_gdt_table(smp_processor_id());
15418 }
15419
15420 return get_desc_base(desc + idx);
15421 @@ -1855,7 +1855,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
15422 break;
15423
15424 perf_callchain_store(entry, frame.return_address);
15425 - fp = frame.next_frame;
15426 + fp = (const void __force_user *)frame.next_frame;
15427 }
15428 }
15429
15430 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
15431 index 324bb52..1a93d85 100644
15432 --- a/arch/x86/kernel/cpu/perf_event_intel.c
15433 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
15434 @@ -1949,10 +1949,10 @@ __init int intel_pmu_init(void)
15435 * v2 and above have a perf capabilities MSR
15436 */
15437 if (version > 1) {
15438 - u64 capabilities;
15439 + u64 capabilities = x86_pmu.intel_cap.capabilities;
15440
15441 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
15442 - x86_pmu.intel_cap.capabilities = capabilities;
15443 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
15444 + x86_pmu.intel_cap.capabilities = capabilities;
15445 }
15446
15447 intel_ds_init();
15448 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15449 index 13ad899..f642b9a 100644
15450 --- a/arch/x86/kernel/crash.c
15451 +++ b/arch/x86/kernel/crash.c
15452 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
15453 {
15454 #ifdef CONFIG_X86_32
15455 struct pt_regs fixed_regs;
15456 -#endif
15457
15458 -#ifdef CONFIG_X86_32
15459 - if (!user_mode_vm(regs)) {
15460 + if (!user_mode(regs)) {
15461 crash_fixup_ss_esp(&fixed_regs, regs);
15462 regs = &fixed_regs;
15463 }
15464 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15465 index 37250fe..bf2ec74 100644
15466 --- a/arch/x86/kernel/doublefault_32.c
15467 +++ b/arch/x86/kernel/doublefault_32.c
15468 @@ -11,7 +11,7 @@
15469
15470 #define DOUBLEFAULT_STACKSIZE (1024)
15471 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15472 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15473 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15474
15475 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15476
15477 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
15478 unsigned long gdt, tss;
15479
15480 store_gdt(&gdt_desc);
15481 - gdt = gdt_desc.address;
15482 + gdt = (unsigned long)gdt_desc.address;
15483
15484 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15485
15486 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15487 /* 0x2 bit is always set */
15488 .flags = X86_EFLAGS_SF | 0x2,
15489 .sp = STACK_START,
15490 - .es = __USER_DS,
15491 + .es = __KERNEL_DS,
15492 .cs = __KERNEL_CS,
15493 .ss = __KERNEL_DS,
15494 - .ds = __USER_DS,
15495 + .ds = __KERNEL_DS,
15496 .fs = __KERNEL_PERCPU,
15497
15498 .__cr3 = __pa_nodebug(swapper_pg_dir),
15499 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15500 index ae42418b..787c16b 100644
15501 --- a/arch/x86/kernel/dumpstack.c
15502 +++ b/arch/x86/kernel/dumpstack.c
15503 @@ -2,6 +2,9 @@
15504 * Copyright (C) 1991, 1992 Linus Torvalds
15505 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15506 */
15507 +#ifdef CONFIG_GRKERNSEC_HIDESYM
15508 +#define __INCLUDED_BY_HIDESYM 1
15509 +#endif
15510 #include <linux/kallsyms.h>
15511 #include <linux/kprobes.h>
15512 #include <linux/uaccess.h>
15513 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
15514 static void
15515 print_ftrace_graph_addr(unsigned long addr, void *data,
15516 const struct stacktrace_ops *ops,
15517 - struct thread_info *tinfo, int *graph)
15518 + struct task_struct *task, int *graph)
15519 {
15520 - struct task_struct *task;
15521 unsigned long ret_addr;
15522 int index;
15523
15524 if (addr != (unsigned long)return_to_handler)
15525 return;
15526
15527 - task = tinfo->task;
15528 index = task->curr_ret_stack;
15529
15530 if (!task->ret_stack || index < *graph)
15531 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15532 static inline void
15533 print_ftrace_graph_addr(unsigned long addr, void *data,
15534 const struct stacktrace_ops *ops,
15535 - struct thread_info *tinfo, int *graph)
15536 + struct task_struct *task, int *graph)
15537 { }
15538 #endif
15539
15540 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15541 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15542 */
15543
15544 -static inline int valid_stack_ptr(struct thread_info *tinfo,
15545 - void *p, unsigned int size, void *end)
15546 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15547 {
15548 - void *t = tinfo;
15549 if (end) {
15550 if (p < end && p >= (end-THREAD_SIZE))
15551 return 1;
15552 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15553 }
15554
15555 unsigned long
15556 -print_context_stack(struct thread_info *tinfo,
15557 +print_context_stack(struct task_struct *task, void *stack_start,
15558 unsigned long *stack, unsigned long bp,
15559 const struct stacktrace_ops *ops, void *data,
15560 unsigned long *end, int *graph)
15561 {
15562 struct stack_frame *frame = (struct stack_frame *)bp;
15563
15564 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15565 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15566 unsigned long addr;
15567
15568 addr = *stack;
15569 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
15570 } else {
15571 ops->address(data, addr, 0);
15572 }
15573 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15574 + print_ftrace_graph_addr(addr, data, ops, task, graph);
15575 }
15576 stack++;
15577 }
15578 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
15579 EXPORT_SYMBOL_GPL(print_context_stack);
15580
15581 unsigned long
15582 -print_context_stack_bp(struct thread_info *tinfo,
15583 +print_context_stack_bp(struct task_struct *task, void *stack_start,
15584 unsigned long *stack, unsigned long bp,
15585 const struct stacktrace_ops *ops, void *data,
15586 unsigned long *end, int *graph)
15587 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
15588 struct stack_frame *frame = (struct stack_frame *)bp;
15589 unsigned long *ret_addr = &frame->return_address;
15590
15591 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
15592 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
15593 unsigned long addr = *ret_addr;
15594
15595 if (!__kernel_text_address(addr))
15596 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
15597 ops->address(data, addr, 1);
15598 frame = frame->next_frame;
15599 ret_addr = &frame->return_address;
15600 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15601 + print_ftrace_graph_addr(addr, data, ops, task, graph);
15602 }
15603
15604 return (unsigned long)frame;
15605 @@ -189,7 +188,7 @@ void dump_stack(void)
15606
15607 bp = stack_frame(current, NULL);
15608 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15609 - current->pid, current->comm, print_tainted(),
15610 + task_pid_nr(current), current->comm, print_tainted(),
15611 init_utsname()->release,
15612 (int)strcspn(init_utsname()->version, " "),
15613 init_utsname()->version);
15614 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
15615 }
15616 EXPORT_SYMBOL_GPL(oops_begin);
15617
15618 +extern void gr_handle_kernel_exploit(void);
15619 +
15620 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15621 {
15622 if (regs && kexec_should_crash(current))
15623 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15624 panic("Fatal exception in interrupt");
15625 if (panic_on_oops)
15626 panic("Fatal exception");
15627 - do_exit(signr);
15628 +
15629 + gr_handle_kernel_exploit();
15630 +
15631 + do_group_exit(signr);
15632 }
15633
15634 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15635 @@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15636 print_modules();
15637 show_regs(regs);
15638 #ifdef CONFIG_X86_32
15639 - if (user_mode_vm(regs)) {
15640 + if (user_mode(regs)) {
15641 sp = regs->sp;
15642 ss = regs->ss & 0xffff;
15643 } else {
15644 @@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15645 unsigned long flags = oops_begin();
15646 int sig = SIGSEGV;
15647
15648 - if (!user_mode_vm(regs))
15649 + if (!user_mode(regs))
15650 report_bug(regs->ip, regs);
15651
15652 if (__die(str, regs, err))
15653 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15654 index 1038a41..db2c12b 100644
15655 --- a/arch/x86/kernel/dumpstack_32.c
15656 +++ b/arch/x86/kernel/dumpstack_32.c
15657 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15658 bp = stack_frame(task, regs);
15659
15660 for (;;) {
15661 - struct thread_info *context;
15662 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15663
15664 - context = (struct thread_info *)
15665 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15666 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
15667 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15668
15669 - stack = (unsigned long *)context->previous_esp;
15670 - if (!stack)
15671 + if (stack_start == task_stack_page(task))
15672 break;
15673 + stack = *(unsigned long **)stack_start;
15674 if (ops->stack(data, "IRQ") < 0)
15675 break;
15676 touch_nmi_watchdog();
15677 @@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
15678 {
15679 int i;
15680
15681 - __show_regs(regs, !user_mode_vm(regs));
15682 + __show_regs(regs, !user_mode(regs));
15683
15684 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
15685 TASK_COMM_LEN, current->comm, task_pid_nr(current),
15686 @@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
15687 * When in-kernel, we also print out the stack and code at the
15688 * time of the fault..
15689 */
15690 - if (!user_mode_vm(regs)) {
15691 + if (!user_mode(regs)) {
15692 unsigned int code_prologue = code_bytes * 43 / 64;
15693 unsigned int code_len = code_bytes;
15694 unsigned char c;
15695 u8 *ip;
15696 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
15697
15698 pr_emerg("Stack:\n");
15699 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
15700
15701 pr_emerg("Code:");
15702
15703 - ip = (u8 *)regs->ip - code_prologue;
15704 + ip = (u8 *)regs->ip - code_prologue + cs_base;
15705 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15706 /* try starting at IP */
15707 - ip = (u8 *)regs->ip;
15708 + ip = (u8 *)regs->ip + cs_base;
15709 code_len = code_len - code_prologue + 1;
15710 }
15711 for (i = 0; i < code_len; i++, ip++) {
15712 @@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
15713 pr_cont(" Bad EIP value.");
15714 break;
15715 }
15716 - if (ip == (u8 *)regs->ip)
15717 + if (ip == (u8 *)regs->ip + cs_base)
15718 pr_cont(" <%02x>", c);
15719 else
15720 pr_cont(" %02x", c);
15721 @@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
15722 {
15723 unsigned short ud2;
15724
15725 + ip = ktla_ktva(ip);
15726 if (ip < PAGE_OFFSET)
15727 return 0;
15728 if (probe_kernel_address((unsigned short *)ip, ud2))
15729 @@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
15730
15731 return ud2 == 0x0b0f;
15732 }
15733 +
15734 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15735 +void pax_check_alloca(unsigned long size)
15736 +{
15737 + unsigned long sp = (unsigned long)&sp, stack_left;
15738 +
15739 + /* all kernel stacks are of the same size */
15740 + stack_left = sp & (THREAD_SIZE - 1);
15741 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15742 +}
15743 +EXPORT_SYMBOL(pax_check_alloca);
15744 +#endif
15745 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15746 index b653675..51cc8c0 100644
15747 --- a/arch/x86/kernel/dumpstack_64.c
15748 +++ b/arch/x86/kernel/dumpstack_64.c
15749 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15750 unsigned long *irq_stack_end =
15751 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15752 unsigned used = 0;
15753 - struct thread_info *tinfo;
15754 int graph = 0;
15755 unsigned long dummy;
15756 + void *stack_start;
15757
15758 if (!task)
15759 task = current;
15760 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15761 * current stack address. If the stacks consist of nested
15762 * exceptions
15763 */
15764 - tinfo = task_thread_info(task);
15765 for (;;) {
15766 char *id;
15767 unsigned long *estack_end;
15768 +
15769 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15770 &used, &id);
15771
15772 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15773 if (ops->stack(data, id) < 0)
15774 break;
15775
15776 - bp = ops->walk_stack(tinfo, stack, bp, ops,
15777 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15778 data, estack_end, &graph);
15779 ops->stack(data, "<EOE>");
15780 /*
15781 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15782 * second-to-last pointer (index -2 to end) in the
15783 * exception stack:
15784 */
15785 + if ((u16)estack_end[-1] != __KERNEL_DS)
15786 + goto out;
15787 stack = (unsigned long *) estack_end[-2];
15788 continue;
15789 }
15790 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15791 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
15792 if (ops->stack(data, "IRQ") < 0)
15793 break;
15794 - bp = ops->walk_stack(tinfo, stack, bp,
15795 + bp = ops->walk_stack(task, irq_stack, stack, bp,
15796 ops, data, irq_stack_end, &graph);
15797 /*
15798 * We link to the next stack (which would be
15799 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15800 /*
15801 * This handles the process stack:
15802 */
15803 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15804 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15805 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15806 +out:
15807 put_cpu();
15808 }
15809 EXPORT_SYMBOL(dump_trace);
15810 @@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
15811 {
15812 int i;
15813 unsigned long sp;
15814 - const int cpu = smp_processor_id();
15815 + const int cpu = raw_smp_processor_id();
15816 struct task_struct *cur = current;
15817
15818 sp = regs->sp;
15819 @@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
15820
15821 return ud2 == 0x0b0f;
15822 }
15823 +
15824 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15825 +void pax_check_alloca(unsigned long size)
15826 +{
15827 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15828 + unsigned cpu, used;
15829 + char *id;
15830 +
15831 + /* check the process stack first */
15832 + stack_start = (unsigned long)task_stack_page(current);
15833 + stack_end = stack_start + THREAD_SIZE;
15834 + if (likely(stack_start <= sp && sp < stack_end)) {
15835 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
15836 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15837 + return;
15838 + }
15839 +
15840 + cpu = get_cpu();
15841 +
15842 + /* check the irq stacks */
15843 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15844 + stack_start = stack_end - IRQ_STACK_SIZE;
15845 + if (stack_start <= sp && sp < stack_end) {
15846 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15847 + put_cpu();
15848 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15849 + return;
15850 + }
15851 +
15852 + /* check the exception stacks */
15853 + used = 0;
15854 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15855 + stack_start = stack_end - EXCEPTION_STKSZ;
15856 + if (stack_end && stack_start <= sp && sp < stack_end) {
15857 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15858 + put_cpu();
15859 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15860 + return;
15861 + }
15862 +
15863 + put_cpu();
15864 +
15865 + /* unknown stack */
15866 + BUG();
15867 +}
15868 +EXPORT_SYMBOL(pax_check_alloca);
15869 +#endif
15870 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15871 index 9b9f18b..9fcaa04 100644
15872 --- a/arch/x86/kernel/early_printk.c
15873 +++ b/arch/x86/kernel/early_printk.c
15874 @@ -7,6 +7,7 @@
15875 #include <linux/pci_regs.h>
15876 #include <linux/pci_ids.h>
15877 #include <linux/errno.h>
15878 +#include <linux/sched.h>
15879 #include <asm/io.h>
15880 #include <asm/processor.h>
15881 #include <asm/fcntl.h>
15882 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15883 index 88b725a..3efabf6 100644
15884 --- a/arch/x86/kernel/entry_32.S
15885 +++ b/arch/x86/kernel/entry_32.S
15886 @@ -177,13 +177,153 @@
15887 /*CFI_REL_OFFSET gs, PT_GS*/
15888 .endm
15889 .macro SET_KERNEL_GS reg
15890 +
15891 +#ifdef CONFIG_CC_STACKPROTECTOR
15892 movl $(__KERNEL_STACK_CANARY), \reg
15893 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15894 + movl $(__USER_DS), \reg
15895 +#else
15896 + xorl \reg, \reg
15897 +#endif
15898 +
15899 movl \reg, %gs
15900 .endm
15901
15902 #endif /* CONFIG_X86_32_LAZY_GS */
15903
15904 -.macro SAVE_ALL
15905 +.macro pax_enter_kernel
15906 +#ifdef CONFIG_PAX_KERNEXEC
15907 + call pax_enter_kernel
15908 +#endif
15909 +.endm
15910 +
15911 +.macro pax_exit_kernel
15912 +#ifdef CONFIG_PAX_KERNEXEC
15913 + call pax_exit_kernel
15914 +#endif
15915 +.endm
15916 +
15917 +#ifdef CONFIG_PAX_KERNEXEC
15918 +ENTRY(pax_enter_kernel)
15919 +#ifdef CONFIG_PARAVIRT
15920 + pushl %eax
15921 + pushl %ecx
15922 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15923 + mov %eax, %esi
15924 +#else
15925 + mov %cr0, %esi
15926 +#endif
15927 + bts $16, %esi
15928 + jnc 1f
15929 + mov %cs, %esi
15930 + cmp $__KERNEL_CS, %esi
15931 + jz 3f
15932 + ljmp $__KERNEL_CS, $3f
15933 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15934 +2:
15935 +#ifdef CONFIG_PARAVIRT
15936 + mov %esi, %eax
15937 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15938 +#else
15939 + mov %esi, %cr0
15940 +#endif
15941 +3:
15942 +#ifdef CONFIG_PARAVIRT
15943 + popl %ecx
15944 + popl %eax
15945 +#endif
15946 + ret
15947 +ENDPROC(pax_enter_kernel)
15948 +
15949 +ENTRY(pax_exit_kernel)
15950 +#ifdef CONFIG_PARAVIRT
15951 + pushl %eax
15952 + pushl %ecx
15953 +#endif
15954 + mov %cs, %esi
15955 + cmp $__KERNEXEC_KERNEL_CS, %esi
15956 + jnz 2f
15957 +#ifdef CONFIG_PARAVIRT
15958 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15959 + mov %eax, %esi
15960 +#else
15961 + mov %cr0, %esi
15962 +#endif
15963 + btr $16, %esi
15964 + ljmp $__KERNEL_CS, $1f
15965 +1:
15966 +#ifdef CONFIG_PARAVIRT
15967 + mov %esi, %eax
15968 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15969 +#else
15970 + mov %esi, %cr0
15971 +#endif
15972 +2:
15973 +#ifdef CONFIG_PARAVIRT
15974 + popl %ecx
15975 + popl %eax
15976 +#endif
15977 + ret
15978 +ENDPROC(pax_exit_kernel)
15979 +#endif
15980 +
15981 +.macro pax_erase_kstack
15982 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15983 + call pax_erase_kstack
15984 +#endif
15985 +.endm
15986 +
15987 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15988 +/*
15989 + * ebp: thread_info
15990 + */
15991 +ENTRY(pax_erase_kstack)
15992 + pushl %edi
15993 + pushl %ecx
15994 + pushl %eax
15995 +
15996 + mov TI_lowest_stack(%ebp), %edi
15997 + mov $-0xBEEF, %eax
15998 + std
15999 +
16000 +1: mov %edi, %ecx
16001 + and $THREAD_SIZE_asm - 1, %ecx
16002 + shr $2, %ecx
16003 + repne scasl
16004 + jecxz 2f
16005 +
16006 + cmp $2*16, %ecx
16007 + jc 2f
16008 +
16009 + mov $2*16, %ecx
16010 + repe scasl
16011 + jecxz 2f
16012 + jne 1b
16013 +
16014 +2: cld
16015 + mov %esp, %ecx
16016 + sub %edi, %ecx
16017 +
16018 + cmp $THREAD_SIZE_asm, %ecx
16019 + jb 3f
16020 + ud2
16021 +3:
16022 +
16023 + shr $2, %ecx
16024 + rep stosl
16025 +
16026 + mov TI_task_thread_sp0(%ebp), %edi
16027 + sub $128, %edi
16028 + mov %edi, TI_lowest_stack(%ebp)
16029 +
16030 + popl %eax
16031 + popl %ecx
16032 + popl %edi
16033 + ret
16034 +ENDPROC(pax_erase_kstack)
16035 +#endif
16036 +
16037 +.macro __SAVE_ALL _DS
16038 cld
16039 PUSH_GS
16040 pushl_cfi %fs
16041 @@ -206,7 +346,7 @@
16042 CFI_REL_OFFSET ecx, 0
16043 pushl_cfi %ebx
16044 CFI_REL_OFFSET ebx, 0
16045 - movl $(__USER_DS), %edx
16046 + movl $\_DS, %edx
16047 movl %edx, %ds
16048 movl %edx, %es
16049 movl $(__KERNEL_PERCPU), %edx
16050 @@ -214,6 +354,15 @@
16051 SET_KERNEL_GS %edx
16052 .endm
16053
16054 +.macro SAVE_ALL
16055 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16056 + __SAVE_ALL __KERNEL_DS
16057 + pax_enter_kernel
16058 +#else
16059 + __SAVE_ALL __USER_DS
16060 +#endif
16061 +.endm
16062 +
16063 .macro RESTORE_INT_REGS
16064 popl_cfi %ebx
16065 CFI_RESTORE ebx
16066 @@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
16067 popfl_cfi
16068 jmp syscall_exit
16069 CFI_ENDPROC
16070 -END(ret_from_fork)
16071 +ENDPROC(ret_from_fork)
16072
16073 ENTRY(ret_from_kernel_thread)
16074 CFI_STARTPROC
16075 @@ -344,7 +493,15 @@ ret_from_intr:
16076 andl $SEGMENT_RPL_MASK, %eax
16077 #endif
16078 cmpl $USER_RPL, %eax
16079 +
16080 +#ifdef CONFIG_PAX_KERNEXEC
16081 + jae resume_userspace
16082 +
16083 + pax_exit_kernel
16084 + jmp resume_kernel
16085 +#else
16086 jb resume_kernel # not returning to v8086 or userspace
16087 +#endif
16088
16089 ENTRY(resume_userspace)
16090 LOCKDEP_SYS_EXIT
16091 @@ -356,8 +513,8 @@ ENTRY(resume_userspace)
16092 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16093 # int/exception return?
16094 jne work_pending
16095 - jmp restore_all
16096 -END(ret_from_exception)
16097 + jmp restore_all_pax
16098 +ENDPROC(ret_from_exception)
16099
16100 #ifdef CONFIG_PREEMPT
16101 ENTRY(resume_kernel)
16102 @@ -372,7 +529,7 @@ need_resched:
16103 jz restore_all
16104 call preempt_schedule_irq
16105 jmp need_resched
16106 -END(resume_kernel)
16107 +ENDPROC(resume_kernel)
16108 #endif
16109 CFI_ENDPROC
16110 /*
16111 @@ -406,30 +563,45 @@ sysenter_past_esp:
16112 /*CFI_REL_OFFSET cs, 0*/
16113 /*
16114 * Push current_thread_info()->sysenter_return to the stack.
16115 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16116 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
16117 */
16118 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
16119 + pushl_cfi $0
16120 CFI_REL_OFFSET eip, 0
16121
16122 pushl_cfi %eax
16123 SAVE_ALL
16124 + GET_THREAD_INFO(%ebp)
16125 + movl TI_sysenter_return(%ebp),%ebp
16126 + movl %ebp,PT_EIP(%esp)
16127 ENABLE_INTERRUPTS(CLBR_NONE)
16128
16129 /*
16130 * Load the potential sixth argument from user stack.
16131 * Careful about security.
16132 */
16133 + movl PT_OLDESP(%esp),%ebp
16134 +
16135 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16136 + mov PT_OLDSS(%esp),%ds
16137 +1: movl %ds:(%ebp),%ebp
16138 + push %ss
16139 + pop %ds
16140 +#else
16141 cmpl $__PAGE_OFFSET-3,%ebp
16142 jae syscall_fault
16143 ASM_STAC
16144 1: movl (%ebp),%ebp
16145 ASM_CLAC
16146 +#endif
16147 +
16148 movl %ebp,PT_EBP(%esp)
16149 _ASM_EXTABLE(1b,syscall_fault)
16150
16151 GET_THREAD_INFO(%ebp)
16152
16153 +#ifdef CONFIG_PAX_RANDKSTACK
16154 + pax_erase_kstack
16155 +#endif
16156 +
16157 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16158 jnz sysenter_audit
16159 sysenter_do_call:
16160 @@ -444,12 +616,24 @@ sysenter_do_call:
16161 testl $_TIF_ALLWORK_MASK, %ecx
16162 jne sysexit_audit
16163 sysenter_exit:
16164 +
16165 +#ifdef CONFIG_PAX_RANDKSTACK
16166 + pushl_cfi %eax
16167 + movl %esp, %eax
16168 + call pax_randomize_kstack
16169 + popl_cfi %eax
16170 +#endif
16171 +
16172 + pax_erase_kstack
16173 +
16174 /* if something modifies registers it must also disable sysexit */
16175 movl PT_EIP(%esp), %edx
16176 movl PT_OLDESP(%esp), %ecx
16177 xorl %ebp,%ebp
16178 TRACE_IRQS_ON
16179 1: mov PT_FS(%esp), %fs
16180 +2: mov PT_DS(%esp), %ds
16181 +3: mov PT_ES(%esp), %es
16182 PTGS_TO_GS
16183 ENABLE_INTERRUPTS_SYSEXIT
16184
16185 @@ -466,6 +650,9 @@ sysenter_audit:
16186 movl %eax,%edx /* 2nd arg: syscall number */
16187 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16188 call __audit_syscall_entry
16189 +
16190 + pax_erase_kstack
16191 +
16192 pushl_cfi %ebx
16193 movl PT_EAX(%esp),%eax /* reload syscall number */
16194 jmp sysenter_do_call
16195 @@ -491,10 +678,16 @@ sysexit_audit:
16196
16197 CFI_ENDPROC
16198 .pushsection .fixup,"ax"
16199 -2: movl $0,PT_FS(%esp)
16200 +4: movl $0,PT_FS(%esp)
16201 + jmp 1b
16202 +5: movl $0,PT_DS(%esp)
16203 + jmp 1b
16204 +6: movl $0,PT_ES(%esp)
16205 jmp 1b
16206 .popsection
16207 - _ASM_EXTABLE(1b,2b)
16208 + _ASM_EXTABLE(1b,4b)
16209 + _ASM_EXTABLE(2b,5b)
16210 + _ASM_EXTABLE(3b,6b)
16211 PTGS_TO_GS_EX
16212 ENDPROC(ia32_sysenter_target)
16213
16214 @@ -509,6 +702,11 @@ ENTRY(system_call)
16215 pushl_cfi %eax # save orig_eax
16216 SAVE_ALL
16217 GET_THREAD_INFO(%ebp)
16218 +
16219 +#ifdef CONFIG_PAX_RANDKSTACK
16220 + pax_erase_kstack
16221 +#endif
16222 +
16223 # system call tracing in operation / emulation
16224 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16225 jnz syscall_trace_entry
16226 @@ -527,6 +725,15 @@ syscall_exit:
16227 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16228 jne syscall_exit_work
16229
16230 +restore_all_pax:
16231 +
16232 +#ifdef CONFIG_PAX_RANDKSTACK
16233 + movl %esp, %eax
16234 + call pax_randomize_kstack
16235 +#endif
16236 +
16237 + pax_erase_kstack
16238 +
16239 restore_all:
16240 TRACE_IRQS_IRET
16241 restore_all_notrace:
16242 @@ -583,14 +790,34 @@ ldt_ss:
16243 * compensating for the offset by changing to the ESPFIX segment with
16244 * a base address that matches for the difference.
16245 */
16246 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
16247 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
16248 mov %esp, %edx /* load kernel esp */
16249 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16250 mov %dx, %ax /* eax: new kernel esp */
16251 sub %eax, %edx /* offset (low word is 0) */
16252 +#ifdef CONFIG_SMP
16253 + movl PER_CPU_VAR(cpu_number), %ebx
16254 + shll $PAGE_SHIFT_asm, %ebx
16255 + addl $cpu_gdt_table, %ebx
16256 +#else
16257 + movl $cpu_gdt_table, %ebx
16258 +#endif
16259 shr $16, %edx
16260 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
16261 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
16262 +
16263 +#ifdef CONFIG_PAX_KERNEXEC
16264 + mov %cr0, %esi
16265 + btr $16, %esi
16266 + mov %esi, %cr0
16267 +#endif
16268 +
16269 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
16270 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
16271 +
16272 +#ifdef CONFIG_PAX_KERNEXEC
16273 + bts $16, %esi
16274 + mov %esi, %cr0
16275 +#endif
16276 +
16277 pushl_cfi $__ESPFIX_SS
16278 pushl_cfi %eax /* new kernel esp */
16279 /* Disable interrupts, but do not irqtrace this section: we
16280 @@ -619,20 +846,18 @@ work_resched:
16281 movl TI_flags(%ebp), %ecx
16282 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16283 # than syscall tracing?
16284 - jz restore_all
16285 + jz restore_all_pax
16286 testb $_TIF_NEED_RESCHED, %cl
16287 jnz work_resched
16288
16289 work_notifysig: # deal with pending signals and
16290 # notify-resume requests
16291 + movl %esp, %eax
16292 #ifdef CONFIG_VM86
16293 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16294 - movl %esp, %eax
16295 jne work_notifysig_v86 # returning to kernel-space or
16296 # vm86-space
16297 1:
16298 -#else
16299 - movl %esp, %eax
16300 #endif
16301 TRACE_IRQS_ON
16302 ENABLE_INTERRUPTS(CLBR_NONE)
16303 @@ -653,7 +878,7 @@ work_notifysig_v86:
16304 movl %eax, %esp
16305 jmp 1b
16306 #endif
16307 -END(work_pending)
16308 +ENDPROC(work_pending)
16309
16310 # perform syscall exit tracing
16311 ALIGN
16312 @@ -661,11 +886,14 @@ syscall_trace_entry:
16313 movl $-ENOSYS,PT_EAX(%esp)
16314 movl %esp, %eax
16315 call syscall_trace_enter
16316 +
16317 + pax_erase_kstack
16318 +
16319 /* What it returned is what we'll actually use. */
16320 cmpl $(NR_syscalls), %eax
16321 jnae syscall_call
16322 jmp syscall_exit
16323 -END(syscall_trace_entry)
16324 +ENDPROC(syscall_trace_entry)
16325
16326 # perform syscall exit tracing
16327 ALIGN
16328 @@ -678,21 +906,25 @@ syscall_exit_work:
16329 movl %esp, %eax
16330 call syscall_trace_leave
16331 jmp resume_userspace
16332 -END(syscall_exit_work)
16333 +ENDPROC(syscall_exit_work)
16334 CFI_ENDPROC
16335
16336 RING0_INT_FRAME # can't unwind into user space anyway
16337 syscall_fault:
16338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16339 + push %ss
16340 + pop %ds
16341 +#endif
16342 ASM_CLAC
16343 GET_THREAD_INFO(%ebp)
16344 movl $-EFAULT,PT_EAX(%esp)
16345 jmp resume_userspace
16346 -END(syscall_fault)
16347 +ENDPROC(syscall_fault)
16348
16349 syscall_badsys:
16350 movl $-ENOSYS,PT_EAX(%esp)
16351 jmp resume_userspace
16352 -END(syscall_badsys)
16353 +ENDPROC(syscall_badsys)
16354 CFI_ENDPROC
16355 /*
16356 * End of kprobes section
16357 @@ -763,6 +995,36 @@ ENTRY(ptregs_clone)
16358 CFI_ENDPROC
16359 ENDPROC(ptregs_clone)
16360
16361 + ALIGN;
16362 +ENTRY(kernel_execve)
16363 + CFI_STARTPROC
16364 + pushl_cfi %ebp
16365 + sub $PT_OLDSS+4,%esp
16366 + pushl_cfi %edi
16367 + pushl_cfi %ecx
16368 + pushl_cfi %eax
16369 + lea 3*4(%esp),%edi
16370 + mov $PT_OLDSS/4+1,%ecx
16371 + xorl %eax,%eax
16372 + rep stosl
16373 + popl_cfi %eax
16374 + popl_cfi %ecx
16375 + popl_cfi %edi
16376 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16377 + pushl_cfi %esp
16378 + call sys_execve
16379 + add $4,%esp
16380 + CFI_ADJUST_CFA_OFFSET -4
16381 + GET_THREAD_INFO(%ebp)
16382 + test %eax,%eax
16383 + jz syscall_exit
16384 + add $PT_OLDSS+4,%esp
16385 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
16386 + popl_cfi %ebp
16387 + ret
16388 + CFI_ENDPROC
16389 +ENDPROC(kernel_execve)
16390 +
16391 .macro FIXUP_ESPFIX_STACK
16392 /*
16393 * Switch back for ESPFIX stack to the normal zerobased stack
16394 @@ -772,8 +1034,15 @@ ENDPROC(ptregs_clone)
16395 * normal stack and adjusts ESP with the matching offset.
16396 */
16397 /* fixup the stack */
16398 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
16399 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
16400 +#ifdef CONFIG_SMP
16401 + movl PER_CPU_VAR(cpu_number), %ebx
16402 + shll $PAGE_SHIFT_asm, %ebx
16403 + addl $cpu_gdt_table, %ebx
16404 +#else
16405 + movl $cpu_gdt_table, %ebx
16406 +#endif
16407 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
16408 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
16409 shl $16, %eax
16410 addl %esp, %eax /* the adjusted stack pointer */
16411 pushl_cfi $__KERNEL_DS
16412 @@ -826,7 +1095,7 @@ vector=vector+1
16413 .endr
16414 2: jmp common_interrupt
16415 .endr
16416 -END(irq_entries_start)
16417 +ENDPROC(irq_entries_start)
16418
16419 .previous
16420 END(interrupt)
16421 @@ -877,7 +1146,7 @@ ENTRY(coprocessor_error)
16422 pushl_cfi $do_coprocessor_error
16423 jmp error_code
16424 CFI_ENDPROC
16425 -END(coprocessor_error)
16426 +ENDPROC(coprocessor_error)
16427
16428 ENTRY(simd_coprocessor_error)
16429 RING0_INT_FRAME
16430 @@ -899,7 +1168,7 @@ ENTRY(simd_coprocessor_error)
16431 #endif
16432 jmp error_code
16433 CFI_ENDPROC
16434 -END(simd_coprocessor_error)
16435 +ENDPROC(simd_coprocessor_error)
16436
16437 ENTRY(device_not_available)
16438 RING0_INT_FRAME
16439 @@ -908,18 +1177,18 @@ ENTRY(device_not_available)
16440 pushl_cfi $do_device_not_available
16441 jmp error_code
16442 CFI_ENDPROC
16443 -END(device_not_available)
16444 +ENDPROC(device_not_available)
16445
16446 #ifdef CONFIG_PARAVIRT
16447 ENTRY(native_iret)
16448 iret
16449 _ASM_EXTABLE(native_iret, iret_exc)
16450 -END(native_iret)
16451 +ENDPROC(native_iret)
16452
16453 ENTRY(native_irq_enable_sysexit)
16454 sti
16455 sysexit
16456 -END(native_irq_enable_sysexit)
16457 +ENDPROC(native_irq_enable_sysexit)
16458 #endif
16459
16460 ENTRY(overflow)
16461 @@ -929,7 +1198,7 @@ ENTRY(overflow)
16462 pushl_cfi $do_overflow
16463 jmp error_code
16464 CFI_ENDPROC
16465 -END(overflow)
16466 +ENDPROC(overflow)
16467
16468 ENTRY(bounds)
16469 RING0_INT_FRAME
16470 @@ -938,7 +1207,7 @@ ENTRY(bounds)
16471 pushl_cfi $do_bounds
16472 jmp error_code
16473 CFI_ENDPROC
16474 -END(bounds)
16475 +ENDPROC(bounds)
16476
16477 ENTRY(invalid_op)
16478 RING0_INT_FRAME
16479 @@ -947,7 +1216,7 @@ ENTRY(invalid_op)
16480 pushl_cfi $do_invalid_op
16481 jmp error_code
16482 CFI_ENDPROC
16483 -END(invalid_op)
16484 +ENDPROC(invalid_op)
16485
16486 ENTRY(coprocessor_segment_overrun)
16487 RING0_INT_FRAME
16488 @@ -956,7 +1225,7 @@ ENTRY(coprocessor_segment_overrun)
16489 pushl_cfi $do_coprocessor_segment_overrun
16490 jmp error_code
16491 CFI_ENDPROC
16492 -END(coprocessor_segment_overrun)
16493 +ENDPROC(coprocessor_segment_overrun)
16494
16495 ENTRY(invalid_TSS)
16496 RING0_EC_FRAME
16497 @@ -964,7 +1233,7 @@ ENTRY(invalid_TSS)
16498 pushl_cfi $do_invalid_TSS
16499 jmp error_code
16500 CFI_ENDPROC
16501 -END(invalid_TSS)
16502 +ENDPROC(invalid_TSS)
16503
16504 ENTRY(segment_not_present)
16505 RING0_EC_FRAME
16506 @@ -972,7 +1241,7 @@ ENTRY(segment_not_present)
16507 pushl_cfi $do_segment_not_present
16508 jmp error_code
16509 CFI_ENDPROC
16510 -END(segment_not_present)
16511 +ENDPROC(segment_not_present)
16512
16513 ENTRY(stack_segment)
16514 RING0_EC_FRAME
16515 @@ -980,7 +1249,7 @@ ENTRY(stack_segment)
16516 pushl_cfi $do_stack_segment
16517 jmp error_code
16518 CFI_ENDPROC
16519 -END(stack_segment)
16520 +ENDPROC(stack_segment)
16521
16522 ENTRY(alignment_check)
16523 RING0_EC_FRAME
16524 @@ -988,7 +1257,7 @@ ENTRY(alignment_check)
16525 pushl_cfi $do_alignment_check
16526 jmp error_code
16527 CFI_ENDPROC
16528 -END(alignment_check)
16529 +ENDPROC(alignment_check)
16530
16531 ENTRY(divide_error)
16532 RING0_INT_FRAME
16533 @@ -997,7 +1266,7 @@ ENTRY(divide_error)
16534 pushl_cfi $do_divide_error
16535 jmp error_code
16536 CFI_ENDPROC
16537 -END(divide_error)
16538 +ENDPROC(divide_error)
16539
16540 #ifdef CONFIG_X86_MCE
16541 ENTRY(machine_check)
16542 @@ -1007,7 +1276,7 @@ ENTRY(machine_check)
16543 pushl_cfi machine_check_vector
16544 jmp error_code
16545 CFI_ENDPROC
16546 -END(machine_check)
16547 +ENDPROC(machine_check)
16548 #endif
16549
16550 ENTRY(spurious_interrupt_bug)
16551 @@ -1017,7 +1286,7 @@ ENTRY(spurious_interrupt_bug)
16552 pushl_cfi $do_spurious_interrupt_bug
16553 jmp error_code
16554 CFI_ENDPROC
16555 -END(spurious_interrupt_bug)
16556 +ENDPROC(spurious_interrupt_bug)
16557 /*
16558 * End of kprobes section
16559 */
16560 @@ -1121,7 +1390,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
16561
16562 ENTRY(mcount)
16563 ret
16564 -END(mcount)
16565 +ENDPROC(mcount)
16566
16567 ENTRY(ftrace_caller)
16568 cmpl $0, function_trace_stop
16569 @@ -1154,7 +1423,7 @@ ftrace_graph_call:
16570 .globl ftrace_stub
16571 ftrace_stub:
16572 ret
16573 -END(ftrace_caller)
16574 +ENDPROC(ftrace_caller)
16575
16576 ENTRY(ftrace_regs_caller)
16577 pushf /* push flags before compare (in cs location) */
16578 @@ -1255,7 +1524,7 @@ trace:
16579 popl %ecx
16580 popl %eax
16581 jmp ftrace_stub
16582 -END(mcount)
16583 +ENDPROC(mcount)
16584 #endif /* CONFIG_DYNAMIC_FTRACE */
16585 #endif /* CONFIG_FUNCTION_TRACER */
16586
16587 @@ -1273,7 +1542,7 @@ ENTRY(ftrace_graph_caller)
16588 popl %ecx
16589 popl %eax
16590 ret
16591 -END(ftrace_graph_caller)
16592 +ENDPROC(ftrace_graph_caller)
16593
16594 .globl return_to_handler
16595 return_to_handler:
16596 @@ -1329,15 +1598,18 @@ error_code:
16597 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16598 REG_TO_PTGS %ecx
16599 SET_KERNEL_GS %ecx
16600 - movl $(__USER_DS), %ecx
16601 + movl $(__KERNEL_DS), %ecx
16602 movl %ecx, %ds
16603 movl %ecx, %es
16604 +
16605 + pax_enter_kernel
16606 +
16607 TRACE_IRQS_OFF
16608 movl %esp,%eax # pt_regs pointer
16609 call *%edi
16610 jmp ret_from_exception
16611 CFI_ENDPROC
16612 -END(page_fault)
16613 +ENDPROC(page_fault)
16614
16615 /*
16616 * Debug traps and NMI can happen at the one SYSENTER instruction
16617 @@ -1380,7 +1652,7 @@ debug_stack_correct:
16618 call do_debug
16619 jmp ret_from_exception
16620 CFI_ENDPROC
16621 -END(debug)
16622 +ENDPROC(debug)
16623
16624 /*
16625 * NMI is doubly nasty. It can happen _while_ we're handling
16626 @@ -1418,6 +1690,9 @@ nmi_stack_correct:
16627 xorl %edx,%edx # zero error code
16628 movl %esp,%eax # pt_regs pointer
16629 call do_nmi
16630 +
16631 + pax_exit_kernel
16632 +
16633 jmp restore_all_notrace
16634 CFI_ENDPROC
16635
16636 @@ -1454,12 +1729,15 @@ nmi_espfix_stack:
16637 FIXUP_ESPFIX_STACK # %eax == %esp
16638 xorl %edx,%edx # zero error code
16639 call do_nmi
16640 +
16641 + pax_exit_kernel
16642 +
16643 RESTORE_REGS
16644 lss 12+4(%esp), %esp # back to espfix stack
16645 CFI_ADJUST_CFA_OFFSET -24
16646 jmp irq_return
16647 CFI_ENDPROC
16648 -END(nmi)
16649 +ENDPROC(nmi)
16650
16651 ENTRY(int3)
16652 RING0_INT_FRAME
16653 @@ -1472,14 +1750,14 @@ ENTRY(int3)
16654 call do_int3
16655 jmp ret_from_exception
16656 CFI_ENDPROC
16657 -END(int3)
16658 +ENDPROC(int3)
16659
16660 ENTRY(general_protection)
16661 RING0_EC_FRAME
16662 pushl_cfi $do_general_protection
16663 jmp error_code
16664 CFI_ENDPROC
16665 -END(general_protection)
16666 +ENDPROC(general_protection)
16667
16668 #ifdef CONFIG_KVM_GUEST
16669 ENTRY(async_page_fault)
16670 @@ -1488,7 +1766,7 @@ ENTRY(async_page_fault)
16671 pushl_cfi $do_async_page_fault
16672 jmp error_code
16673 CFI_ENDPROC
16674 -END(async_page_fault)
16675 +ENDPROC(async_page_fault)
16676 #endif
16677
16678 /*
16679 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16680 index 1328fe4..cb03298 100644
16681 --- a/arch/x86/kernel/entry_64.S
16682 +++ b/arch/x86/kernel/entry_64.S
16683 @@ -59,6 +59,8 @@
16684 #include <asm/rcu.h>
16685 #include <asm/smap.h>
16686 #include <linux/err.h>
16687 +#include <asm/pgtable.h>
16688 +#include <asm/alternative-asm.h>
16689
16690 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16691 #include <linux/elf-em.h>
16692 @@ -80,8 +82,9 @@
16693 #ifdef CONFIG_DYNAMIC_FTRACE
16694
16695 ENTRY(function_hook)
16696 + pax_force_retaddr
16697 retq
16698 -END(function_hook)
16699 +ENDPROC(function_hook)
16700
16701 /* skip is set if stack has been adjusted */
16702 .macro ftrace_caller_setup skip=0
16703 @@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
16704 #endif
16705
16706 GLOBAL(ftrace_stub)
16707 + pax_force_retaddr
16708 retq
16709 -END(ftrace_caller)
16710 +ENDPROC(ftrace_caller)
16711
16712 ENTRY(ftrace_regs_caller)
16713 /* Save the current flags before compare (in SS location)*/
16714 @@ -191,7 +195,7 @@ ftrace_restore_flags:
16715 popfq
16716 jmp ftrace_stub
16717
16718 -END(ftrace_regs_caller)
16719 +ENDPROC(ftrace_regs_caller)
16720
16721
16722 #else /* ! CONFIG_DYNAMIC_FTRACE */
16723 @@ -212,6 +216,7 @@ ENTRY(function_hook)
16724 #endif
16725
16726 GLOBAL(ftrace_stub)
16727 + pax_force_retaddr
16728 retq
16729
16730 trace:
16731 @@ -225,12 +230,13 @@ trace:
16732 #endif
16733 subq $MCOUNT_INSN_SIZE, %rdi
16734
16735 + pax_force_fptr ftrace_trace_function
16736 call *ftrace_trace_function
16737
16738 MCOUNT_RESTORE_FRAME
16739
16740 jmp ftrace_stub
16741 -END(function_hook)
16742 +ENDPROC(function_hook)
16743 #endif /* CONFIG_DYNAMIC_FTRACE */
16744 #endif /* CONFIG_FUNCTION_TRACER */
16745
16746 @@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
16747
16748 MCOUNT_RESTORE_FRAME
16749
16750 + pax_force_retaddr
16751 retq
16752 -END(ftrace_graph_caller)
16753 +ENDPROC(ftrace_graph_caller)
16754
16755 GLOBAL(return_to_handler)
16756 subq $24, %rsp
16757 @@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
16758 movq 8(%rsp), %rdx
16759 movq (%rsp), %rax
16760 addq $24, %rsp
16761 + pax_force_fptr %rdi
16762 jmp *%rdi
16763 +ENDPROC(return_to_handler)
16764 #endif
16765
16766
16767 @@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
16768 ENDPROC(native_usergs_sysret64)
16769 #endif /* CONFIG_PARAVIRT */
16770
16771 + .macro ljmpq sel, off
16772 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16773 + .byte 0x48; ljmp *1234f(%rip)
16774 + .pushsection .rodata
16775 + .align 16
16776 + 1234: .quad \off; .word \sel
16777 + .popsection
16778 +#else
16779 + pushq $\sel
16780 + pushq $\off
16781 + lretq
16782 +#endif
16783 + .endm
16784 +
16785 + .macro pax_enter_kernel
16786 + pax_set_fptr_mask
16787 +#ifdef CONFIG_PAX_KERNEXEC
16788 + call pax_enter_kernel
16789 +#endif
16790 + .endm
16791 +
16792 + .macro pax_exit_kernel
16793 +#ifdef CONFIG_PAX_KERNEXEC
16794 + call pax_exit_kernel
16795 +#endif
16796 + .endm
16797 +
16798 +#ifdef CONFIG_PAX_KERNEXEC
16799 +ENTRY(pax_enter_kernel)
16800 + pushq %rdi
16801 +
16802 +#ifdef CONFIG_PARAVIRT
16803 + PV_SAVE_REGS(CLBR_RDI)
16804 +#endif
16805 +
16806 + GET_CR0_INTO_RDI
16807 + bts $16,%rdi
16808 + jnc 3f
16809 + mov %cs,%edi
16810 + cmp $__KERNEL_CS,%edi
16811 + jnz 2f
16812 +1:
16813 +
16814 +#ifdef CONFIG_PARAVIRT
16815 + PV_RESTORE_REGS(CLBR_RDI)
16816 +#endif
16817 +
16818 + popq %rdi
16819 + pax_force_retaddr
16820 + retq
16821 +
16822 +2: ljmpq __KERNEL_CS,1f
16823 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
16824 +4: SET_RDI_INTO_CR0
16825 + jmp 1b
16826 +ENDPROC(pax_enter_kernel)
16827 +
16828 +ENTRY(pax_exit_kernel)
16829 + pushq %rdi
16830 +
16831 +#ifdef CONFIG_PARAVIRT
16832 + PV_SAVE_REGS(CLBR_RDI)
16833 +#endif
16834 +
16835 + mov %cs,%rdi
16836 + cmp $__KERNEXEC_KERNEL_CS,%edi
16837 + jz 2f
16838 +1:
16839 +
16840 +#ifdef CONFIG_PARAVIRT
16841 + PV_RESTORE_REGS(CLBR_RDI);
16842 +#endif
16843 +
16844 + popq %rdi
16845 + pax_force_retaddr
16846 + retq
16847 +
16848 +2: GET_CR0_INTO_RDI
16849 + btr $16,%rdi
16850 + ljmpq __KERNEL_CS,3f
16851 +3: SET_RDI_INTO_CR0
16852 + jmp 1b
16853 +ENDPROC(pax_exit_kernel)
16854 +#endif
16855 +
16856 + .macro pax_enter_kernel_user
16857 + pax_set_fptr_mask
16858 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16859 + call pax_enter_kernel_user
16860 +#endif
16861 + .endm
16862 +
16863 + .macro pax_exit_kernel_user
16864 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16865 + call pax_exit_kernel_user
16866 +#endif
16867 +#ifdef CONFIG_PAX_RANDKSTACK
16868 + pushq %rax
16869 + call pax_randomize_kstack
16870 + popq %rax
16871 +#endif
16872 + .endm
16873 +
16874 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16875 +ENTRY(pax_enter_kernel_user)
16876 + pushq %rdi
16877 + pushq %rbx
16878 +
16879 +#ifdef CONFIG_PARAVIRT
16880 + PV_SAVE_REGS(CLBR_RDI)
16881 +#endif
16882 +
16883 + GET_CR3_INTO_RDI
16884 + mov %rdi,%rbx
16885 + add $__START_KERNEL_map,%rbx
16886 + sub phys_base(%rip),%rbx
16887 +
16888 +#ifdef CONFIG_PARAVIRT
16889 + pushq %rdi
16890 + cmpl $0, pv_info+PARAVIRT_enabled
16891 + jz 1f
16892 + i = 0
16893 + .rept USER_PGD_PTRS
16894 + mov i*8(%rbx),%rsi
16895 + mov $0,%sil
16896 + lea i*8(%rbx),%rdi
16897 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16898 + i = i + 1
16899 + .endr
16900 + jmp 2f
16901 +1:
16902 +#endif
16903 +
16904 + i = 0
16905 + .rept USER_PGD_PTRS
16906 + movb $0,i*8(%rbx)
16907 + i = i + 1
16908 + .endr
16909 +
16910 +#ifdef CONFIG_PARAVIRT
16911 +2: popq %rdi
16912 +#endif
16913 + SET_RDI_INTO_CR3
16914 +
16915 +#ifdef CONFIG_PAX_KERNEXEC
16916 + GET_CR0_INTO_RDI
16917 + bts $16,%rdi
16918 + SET_RDI_INTO_CR0
16919 +#endif
16920 +
16921 +#ifdef CONFIG_PARAVIRT
16922 + PV_RESTORE_REGS(CLBR_RDI)
16923 +#endif
16924 +
16925 + popq %rbx
16926 + popq %rdi
16927 + pax_force_retaddr
16928 + retq
16929 +ENDPROC(pax_enter_kernel_user)
16930 +
16931 +ENTRY(pax_exit_kernel_user)
16932 + push %rdi
16933 +
16934 +#ifdef CONFIG_PARAVIRT
16935 + pushq %rbx
16936 + PV_SAVE_REGS(CLBR_RDI)
16937 +#endif
16938 +
16939 +#ifdef CONFIG_PAX_KERNEXEC
16940 + GET_CR0_INTO_RDI
16941 + btr $16,%rdi
16942 + SET_RDI_INTO_CR0
16943 +#endif
16944 +
16945 + GET_CR3_INTO_RDI
16946 + add $__START_KERNEL_map,%rdi
16947 + sub phys_base(%rip),%rdi
16948 +
16949 +#ifdef CONFIG_PARAVIRT
16950 + cmpl $0, pv_info+PARAVIRT_enabled
16951 + jz 1f
16952 + mov %rdi,%rbx
16953 + i = 0
16954 + .rept USER_PGD_PTRS
16955 + mov i*8(%rbx),%rsi
16956 + mov $0x67,%sil
16957 + lea i*8(%rbx),%rdi
16958 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16959 + i = i + 1
16960 + .endr
16961 + jmp 2f
16962 +1:
16963 +#endif
16964 +
16965 + i = 0
16966 + .rept USER_PGD_PTRS
16967 + movb $0x67,i*8(%rdi)
16968 + i = i + 1
16969 + .endr
16970 +
16971 +#ifdef CONFIG_PARAVIRT
16972 +2: PV_RESTORE_REGS(CLBR_RDI)
16973 + popq %rbx
16974 +#endif
16975 +
16976 + popq %rdi
16977 + pax_force_retaddr
16978 + retq
16979 +ENDPROC(pax_exit_kernel_user)
16980 +#endif
16981 +
16982 +.macro pax_erase_kstack
16983 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16984 + call pax_erase_kstack
16985 +#endif
16986 +.endm
16987 +
16988 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16989 +ENTRY(pax_erase_kstack)
16990 + pushq %rdi
16991 + pushq %rcx
16992 + pushq %rax
16993 + pushq %r11
16994 +
16995 + GET_THREAD_INFO(%r11)
16996 + mov TI_lowest_stack(%r11), %rdi
16997 + mov $-0xBEEF, %rax
16998 + std
16999 +
17000 +1: mov %edi, %ecx
17001 + and $THREAD_SIZE_asm - 1, %ecx
17002 + shr $3, %ecx
17003 + repne scasq
17004 + jecxz 2f
17005 +
17006 + cmp $2*8, %ecx
17007 + jc 2f
17008 +
17009 + mov $2*8, %ecx
17010 + repe scasq
17011 + jecxz 2f
17012 + jne 1b
17013 +
17014 +2: cld
17015 + mov %esp, %ecx
17016 + sub %edi, %ecx
17017 +
17018 + cmp $THREAD_SIZE_asm, %rcx
17019 + jb 3f
17020 + ud2
17021 +3:
17022 +
17023 + shr $3, %ecx
17024 + rep stosq
17025 +
17026 + mov TI_task_thread_sp0(%r11), %rdi
17027 + sub $256, %rdi
17028 + mov %rdi, TI_lowest_stack(%r11)
17029 +
17030 + popq %r11
17031 + popq %rax
17032 + popq %rcx
17033 + popq %rdi
17034 + pax_force_retaddr
17035 + ret
17036 +ENDPROC(pax_erase_kstack)
17037 +#endif
17038
17039 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17040 #ifdef CONFIG_TRACE_IRQFLAGS
17041 @@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
17042 .endm
17043
17044 .macro UNFAKE_STACK_FRAME
17045 - addq $8*6, %rsp
17046 - CFI_ADJUST_CFA_OFFSET -(6*8)
17047 + addq $8*6 + ARG_SKIP, %rsp
17048 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17049 .endm
17050
17051 /*
17052 @@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
17053 movq %rsp, %rsi
17054
17055 leaq -RBP(%rsp),%rdi /* arg1 for handler */
17056 - testl $3, CS-RBP(%rsi)
17057 + testb $3, CS-RBP(%rsi)
17058 je 1f
17059 SWAPGS
17060 /*
17061 @@ -498,9 +774,10 @@ ENTRY(save_rest)
17062 movq_cfi r15, R15+16
17063 movq %r11, 8(%rsp) /* return address */
17064 FIXUP_TOP_OF_STACK %r11, 16
17065 + pax_force_retaddr
17066 ret
17067 CFI_ENDPROC
17068 -END(save_rest)
17069 +ENDPROC(save_rest)
17070
17071 /* save complete stack frame */
17072 .pushsection .kprobes.text, "ax"
17073 @@ -529,9 +806,10 @@ ENTRY(save_paranoid)
17074 js 1f /* negative -> in kernel */
17075 SWAPGS
17076 xorl %ebx,%ebx
17077 -1: ret
17078 +1: pax_force_retaddr_bts
17079 + ret
17080 CFI_ENDPROC
17081 -END(save_paranoid)
17082 +ENDPROC(save_paranoid)
17083 .popsection
17084
17085 /*
17086 @@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
17087
17088 RESTORE_REST
17089
17090 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17091 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17092 jz 1f
17093
17094 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17095 @@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
17096 RESTORE_REST
17097 jmp int_ret_from_sys_call
17098 CFI_ENDPROC
17099 -END(ret_from_fork)
17100 +ENDPROC(ret_from_fork)
17101
17102 /*
17103 * System call entry. Up to 6 arguments in registers are supported.
17104 @@ -608,7 +886,7 @@ END(ret_from_fork)
17105 ENTRY(system_call)
17106 CFI_STARTPROC simple
17107 CFI_SIGNAL_FRAME
17108 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17109 + CFI_DEF_CFA rsp,0
17110 CFI_REGISTER rip,rcx
17111 /*CFI_REGISTER rflags,r11*/
17112 SWAPGS_UNSAFE_STACK
17113 @@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
17114
17115 movq %rsp,PER_CPU_VAR(old_rsp)
17116 movq PER_CPU_VAR(kernel_stack),%rsp
17117 + SAVE_ARGS 8*6,0
17118 + pax_enter_kernel_user
17119 +
17120 +#ifdef CONFIG_PAX_RANDKSTACK
17121 + pax_erase_kstack
17122 +#endif
17123 +
17124 /*
17125 * No need to follow this irqs off/on section - it's straight
17126 * and short:
17127 */
17128 ENABLE_INTERRUPTS(CLBR_NONE)
17129 - SAVE_ARGS 8,0
17130 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17131 movq %rcx,RIP-ARGOFFSET(%rsp)
17132 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17133 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17134 + GET_THREAD_INFO(%rcx)
17135 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
17136 jnz tracesys
17137 system_call_fastpath:
17138 #if __SYSCALL_MASK == ~0
17139 @@ -640,7 +925,7 @@ system_call_fastpath:
17140 cmpl $__NR_syscall_max,%eax
17141 #endif
17142 ja badsys
17143 - movq %r10,%rcx
17144 + movq R10-ARGOFFSET(%rsp),%rcx
17145 call *sys_call_table(,%rax,8) # XXX: rip relative
17146 movq %rax,RAX-ARGOFFSET(%rsp)
17147 /*
17148 @@ -654,10 +939,13 @@ sysret_check:
17149 LOCKDEP_SYS_EXIT
17150 DISABLE_INTERRUPTS(CLBR_NONE)
17151 TRACE_IRQS_OFF
17152 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
17153 + GET_THREAD_INFO(%rcx)
17154 + movl TI_flags(%rcx),%edx
17155 andl %edi,%edx
17156 jnz sysret_careful
17157 CFI_REMEMBER_STATE
17158 + pax_exit_kernel_user
17159 + pax_erase_kstack
17160 /*
17161 * sysretq will re-enable interrupts:
17162 */
17163 @@ -709,14 +997,18 @@ badsys:
17164 * jump back to the normal fast path.
17165 */
17166 auditsys:
17167 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
17168 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17169 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17170 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17171 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17172 movq %rax,%rsi /* 2nd arg: syscall number */
17173 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17174 call __audit_syscall_entry
17175 +
17176 + pax_erase_kstack
17177 +
17178 LOAD_ARGS 0 /* reload call-clobbered registers */
17179 + pax_set_fptr_mask
17180 jmp system_call_fastpath
17181
17182 /*
17183 @@ -737,7 +1029,7 @@ sysret_audit:
17184 /* Do syscall tracing */
17185 tracesys:
17186 #ifdef CONFIG_AUDITSYSCALL
17187 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17188 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
17189 jz auditsys
17190 #endif
17191 SAVE_REST
17192 @@ -745,12 +1037,16 @@ tracesys:
17193 FIXUP_TOP_OF_STACK %rdi
17194 movq %rsp,%rdi
17195 call syscall_trace_enter
17196 +
17197 + pax_erase_kstack
17198 +
17199 /*
17200 * Reload arg registers from stack in case ptrace changed them.
17201 * We don't reload %rax because syscall_trace_enter() returned
17202 * the value it wants us to use in the table lookup.
17203 */
17204 LOAD_ARGS ARGOFFSET, 1
17205 + pax_set_fptr_mask
17206 RESTORE_REST
17207 #if __SYSCALL_MASK == ~0
17208 cmpq $__NR_syscall_max,%rax
17209 @@ -759,7 +1055,7 @@ tracesys:
17210 cmpl $__NR_syscall_max,%eax
17211 #endif
17212 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17213 - movq %r10,%rcx /* fixup for C */
17214 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17215 call *sys_call_table(,%rax,8)
17216 movq %rax,RAX-ARGOFFSET(%rsp)
17217 /* Use IRET because user could have changed frame */
17218 @@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
17219 andl %edi,%edx
17220 jnz int_careful
17221 andl $~TS_COMPAT,TI_status(%rcx)
17222 - jmp retint_swapgs
17223 + pax_exit_kernel_user
17224 + pax_erase_kstack
17225 + jmp retint_swapgs_pax
17226
17227 /* Either reschedule or signal or syscall exit tracking needed. */
17228 /* First do a reschedule test. */
17229 @@ -826,7 +1124,7 @@ int_restore_rest:
17230 TRACE_IRQS_OFF
17231 jmp int_with_check
17232 CFI_ENDPROC
17233 -END(system_call)
17234 +ENDPROC(system_call)
17235
17236 /*
17237 * Certain special system calls that need to save a complete full stack frame.
17238 @@ -842,7 +1140,7 @@ ENTRY(\label)
17239 call \func
17240 jmp ptregscall_common
17241 CFI_ENDPROC
17242 -END(\label)
17243 +ENDPROC(\label)
17244 .endm
17245
17246 PTREGSCALL stub_clone, sys_clone, %r8
17247 @@ -860,9 +1158,10 @@ ENTRY(ptregscall_common)
17248 movq_cfi_restore R12+8, r12
17249 movq_cfi_restore RBP+8, rbp
17250 movq_cfi_restore RBX+8, rbx
17251 + pax_force_retaddr
17252 ret $REST_SKIP /* pop extended registers */
17253 CFI_ENDPROC
17254 -END(ptregscall_common)
17255 +ENDPROC(ptregscall_common)
17256
17257 ENTRY(stub_execve)
17258 CFI_STARTPROC
17259 @@ -876,7 +1175,7 @@ ENTRY(stub_execve)
17260 RESTORE_REST
17261 jmp int_ret_from_sys_call
17262 CFI_ENDPROC
17263 -END(stub_execve)
17264 +ENDPROC(stub_execve)
17265
17266 /*
17267 * sigreturn is special because it needs to restore all registers on return.
17268 @@ -894,7 +1193,7 @@ ENTRY(stub_rt_sigreturn)
17269 RESTORE_REST
17270 jmp int_ret_from_sys_call
17271 CFI_ENDPROC
17272 -END(stub_rt_sigreturn)
17273 +ENDPROC(stub_rt_sigreturn)
17274
17275 #ifdef CONFIG_X86_X32_ABI
17276 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
17277 @@ -962,7 +1261,7 @@ vector=vector+1
17278 2: jmp common_interrupt
17279 .endr
17280 CFI_ENDPROC
17281 -END(irq_entries_start)
17282 +ENDPROC(irq_entries_start)
17283
17284 .previous
17285 END(interrupt)
17286 @@ -982,6 +1281,16 @@ END(interrupt)
17287 subq $ORIG_RAX-RBP, %rsp
17288 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
17289 SAVE_ARGS_IRQ
17290 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17291 + testb $3, CS(%rdi)
17292 + jnz 1f
17293 + pax_enter_kernel
17294 + jmp 2f
17295 +1: pax_enter_kernel_user
17296 +2:
17297 +#else
17298 + pax_enter_kernel
17299 +#endif
17300 call \func
17301 .endm
17302
17303 @@ -1014,7 +1323,7 @@ ret_from_intr:
17304
17305 exit_intr:
17306 GET_THREAD_INFO(%rcx)
17307 - testl $3,CS-ARGOFFSET(%rsp)
17308 + testb $3,CS-ARGOFFSET(%rsp)
17309 je retint_kernel
17310
17311 /* Interrupt came from user space */
17312 @@ -1036,12 +1345,16 @@ retint_swapgs: /* return to user-space */
17313 * The iretq could re-enable interrupts:
17314 */
17315 DISABLE_INTERRUPTS(CLBR_ANY)
17316 + pax_exit_kernel_user
17317 +retint_swapgs_pax:
17318 TRACE_IRQS_IRETQ
17319 SWAPGS
17320 jmp restore_args
17321
17322 retint_restore_args: /* return to kernel space */
17323 DISABLE_INTERRUPTS(CLBR_ANY)
17324 + pax_exit_kernel
17325 + pax_force_retaddr (RIP-ARGOFFSET)
17326 /*
17327 * The iretq could re-enable interrupts:
17328 */
17329 @@ -1124,7 +1437,7 @@ ENTRY(retint_kernel)
17330 #endif
17331
17332 CFI_ENDPROC
17333 -END(common_interrupt)
17334 +ENDPROC(common_interrupt)
17335 /*
17336 * End of kprobes section
17337 */
17338 @@ -1142,7 +1455,7 @@ ENTRY(\sym)
17339 interrupt \do_sym
17340 jmp ret_from_intr
17341 CFI_ENDPROC
17342 -END(\sym)
17343 +ENDPROC(\sym)
17344 .endm
17345
17346 #ifdef CONFIG_SMP
17347 @@ -1198,12 +1511,22 @@ ENTRY(\sym)
17348 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17349 call error_entry
17350 DEFAULT_FRAME 0
17351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17352 + testb $3, CS(%rsp)
17353 + jnz 1f
17354 + pax_enter_kernel
17355 + jmp 2f
17356 +1: pax_enter_kernel_user
17357 +2:
17358 +#else
17359 + pax_enter_kernel
17360 +#endif
17361 movq %rsp,%rdi /* pt_regs pointer */
17362 xorl %esi,%esi /* no error code */
17363 call \do_sym
17364 jmp error_exit /* %ebx: no swapgs flag */
17365 CFI_ENDPROC
17366 -END(\sym)
17367 +ENDPROC(\sym)
17368 .endm
17369
17370 .macro paranoidzeroentry sym do_sym
17371 @@ -1216,15 +1539,25 @@ ENTRY(\sym)
17372 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17373 call save_paranoid
17374 TRACE_IRQS_OFF
17375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17376 + testb $3, CS(%rsp)
17377 + jnz 1f
17378 + pax_enter_kernel
17379 + jmp 2f
17380 +1: pax_enter_kernel_user
17381 +2:
17382 +#else
17383 + pax_enter_kernel
17384 +#endif
17385 movq %rsp,%rdi /* pt_regs pointer */
17386 xorl %esi,%esi /* no error code */
17387 call \do_sym
17388 jmp paranoid_exit /* %ebx: no swapgs flag */
17389 CFI_ENDPROC
17390 -END(\sym)
17391 +ENDPROC(\sym)
17392 .endm
17393
17394 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
17395 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
17396 .macro paranoidzeroentry_ist sym do_sym ist
17397 ENTRY(\sym)
17398 INTR_FRAME
17399 @@ -1235,14 +1568,30 @@ ENTRY(\sym)
17400 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17401 call save_paranoid
17402 TRACE_IRQS_OFF_DEBUG
17403 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17404 + testb $3, CS(%rsp)
17405 + jnz 1f
17406 + pax_enter_kernel
17407 + jmp 2f
17408 +1: pax_enter_kernel_user
17409 +2:
17410 +#else
17411 + pax_enter_kernel
17412 +#endif
17413 movq %rsp,%rdi /* pt_regs pointer */
17414 xorl %esi,%esi /* no error code */
17415 +#ifdef CONFIG_SMP
17416 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
17417 + lea init_tss(%r12), %r12
17418 +#else
17419 + lea init_tss(%rip), %r12
17420 +#endif
17421 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17422 call \do_sym
17423 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17424 jmp paranoid_exit /* %ebx: no swapgs flag */
17425 CFI_ENDPROC
17426 -END(\sym)
17427 +ENDPROC(\sym)
17428 .endm
17429
17430 .macro errorentry sym do_sym
17431 @@ -1254,13 +1603,23 @@ ENTRY(\sym)
17432 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17433 call error_entry
17434 DEFAULT_FRAME 0
17435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17436 + testb $3, CS(%rsp)
17437 + jnz 1f
17438 + pax_enter_kernel
17439 + jmp 2f
17440 +1: pax_enter_kernel_user
17441 +2:
17442 +#else
17443 + pax_enter_kernel
17444 +#endif
17445 movq %rsp,%rdi /* pt_regs pointer */
17446 movq ORIG_RAX(%rsp),%rsi /* get error code */
17447 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17448 call \do_sym
17449 jmp error_exit /* %ebx: no swapgs flag */
17450 CFI_ENDPROC
17451 -END(\sym)
17452 +ENDPROC(\sym)
17453 .endm
17454
17455 /* error code is on the stack already */
17456 @@ -1274,13 +1633,23 @@ ENTRY(\sym)
17457 call save_paranoid
17458 DEFAULT_FRAME 0
17459 TRACE_IRQS_OFF
17460 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17461 + testb $3, CS(%rsp)
17462 + jnz 1f
17463 + pax_enter_kernel
17464 + jmp 2f
17465 +1: pax_enter_kernel_user
17466 +2:
17467 +#else
17468 + pax_enter_kernel
17469 +#endif
17470 movq %rsp,%rdi /* pt_regs pointer */
17471 movq ORIG_RAX(%rsp),%rsi /* get error code */
17472 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17473 call \do_sym
17474 jmp paranoid_exit /* %ebx: no swapgs flag */
17475 CFI_ENDPROC
17476 -END(\sym)
17477 +ENDPROC(\sym)
17478 .endm
17479
17480 zeroentry divide_error do_divide_error
17481 @@ -1310,9 +1679,10 @@ gs_change:
17482 2: mfence /* workaround */
17483 SWAPGS
17484 popfq_cfi
17485 + pax_force_retaddr
17486 ret
17487 CFI_ENDPROC
17488 -END(native_load_gs_index)
17489 +ENDPROC(native_load_gs_index)
17490
17491 _ASM_EXTABLE(gs_change,bad_gs)
17492 .section .fixup,"ax"
17493 @@ -1340,9 +1710,10 @@ ENTRY(call_softirq)
17494 CFI_DEF_CFA_REGISTER rsp
17495 CFI_ADJUST_CFA_OFFSET -8
17496 decl PER_CPU_VAR(irq_count)
17497 + pax_force_retaddr
17498 ret
17499 CFI_ENDPROC
17500 -END(call_softirq)
17501 +ENDPROC(call_softirq)
17502
17503 #ifdef CONFIG_XEN
17504 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17505 @@ -1380,7 +1751,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17506 decl PER_CPU_VAR(irq_count)
17507 jmp error_exit
17508 CFI_ENDPROC
17509 -END(xen_do_hypervisor_callback)
17510 +ENDPROC(xen_do_hypervisor_callback)
17511
17512 /*
17513 * Hypervisor uses this for application faults while it executes.
17514 @@ -1439,7 +1810,7 @@ ENTRY(xen_failsafe_callback)
17515 SAVE_ALL
17516 jmp error_exit
17517 CFI_ENDPROC
17518 -END(xen_failsafe_callback)
17519 +ENDPROC(xen_failsafe_callback)
17520
17521 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
17522 xen_hvm_callback_vector xen_evtchn_do_upcall
17523 @@ -1488,16 +1859,31 @@ ENTRY(paranoid_exit)
17524 TRACE_IRQS_OFF_DEBUG
17525 testl %ebx,%ebx /* swapgs needed? */
17526 jnz paranoid_restore
17527 - testl $3,CS(%rsp)
17528 + testb $3,CS(%rsp)
17529 jnz paranoid_userspace
17530 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17531 + pax_exit_kernel
17532 + TRACE_IRQS_IRETQ 0
17533 + SWAPGS_UNSAFE_STACK
17534 + RESTORE_ALL 8
17535 + pax_force_retaddr_bts
17536 + jmp irq_return
17537 +#endif
17538 paranoid_swapgs:
17539 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17540 + pax_exit_kernel_user
17541 +#else
17542 + pax_exit_kernel
17543 +#endif
17544 TRACE_IRQS_IRETQ 0
17545 SWAPGS_UNSAFE_STACK
17546 RESTORE_ALL 8
17547 jmp irq_return
17548 paranoid_restore:
17549 + pax_exit_kernel
17550 TRACE_IRQS_IRETQ_DEBUG 0
17551 RESTORE_ALL 8
17552 + pax_force_retaddr_bts
17553 jmp irq_return
17554 paranoid_userspace:
17555 GET_THREAD_INFO(%rcx)
17556 @@ -1526,7 +1912,7 @@ paranoid_schedule:
17557 TRACE_IRQS_OFF
17558 jmp paranoid_userspace
17559 CFI_ENDPROC
17560 -END(paranoid_exit)
17561 +ENDPROC(paranoid_exit)
17562
17563 /*
17564 * Exception entry point. This expects an error code/orig_rax on the stack.
17565 @@ -1553,12 +1939,13 @@ ENTRY(error_entry)
17566 movq_cfi r14, R14+8
17567 movq_cfi r15, R15+8
17568 xorl %ebx,%ebx
17569 - testl $3,CS+8(%rsp)
17570 + testb $3,CS+8(%rsp)
17571 je error_kernelspace
17572 error_swapgs:
17573 SWAPGS
17574 error_sti:
17575 TRACE_IRQS_OFF
17576 + pax_force_retaddr_bts
17577 ret
17578
17579 /*
17580 @@ -1585,7 +1972,7 @@ bstep_iret:
17581 movq %rcx,RIP+8(%rsp)
17582 jmp error_swapgs
17583 CFI_ENDPROC
17584 -END(error_entry)
17585 +ENDPROC(error_entry)
17586
17587
17588 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17589 @@ -1605,7 +1992,7 @@ ENTRY(error_exit)
17590 jnz retint_careful
17591 jmp retint_swapgs
17592 CFI_ENDPROC
17593 -END(error_exit)
17594 +ENDPROC(error_exit)
17595
17596 /*
17597 * Test if a given stack is an NMI stack or not.
17598 @@ -1663,9 +2050,11 @@ ENTRY(nmi)
17599 * If %cs was not the kernel segment, then the NMI triggered in user
17600 * space, which means it is definitely not nested.
17601 */
17602 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
17603 + je 1f
17604 cmpl $__KERNEL_CS, 16(%rsp)
17605 jne first_nmi
17606 -
17607 +1:
17608 /*
17609 * Check the special variable on the stack to see if NMIs are
17610 * executing.
17611 @@ -1824,6 +2213,17 @@ end_repeat_nmi:
17612 */
17613 movq %cr2, %r12
17614
17615 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17616 + testb $3, CS(%rsp)
17617 + jnz 1f
17618 + pax_enter_kernel
17619 + jmp 2f
17620 +1: pax_enter_kernel_user
17621 +2:
17622 +#else
17623 + pax_enter_kernel
17624 +#endif
17625 +
17626 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17627 movq %rsp,%rdi
17628 movq $-1,%rsi
17629 @@ -1839,21 +2239,32 @@ end_repeat_nmi:
17630 testl %ebx,%ebx /* swapgs needed? */
17631 jnz nmi_restore
17632 nmi_swapgs:
17633 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17634 + pax_exit_kernel_user
17635 +#else
17636 + pax_exit_kernel
17637 +#endif
17638 SWAPGS_UNSAFE_STACK
17639 + RESTORE_ALL 8
17640 + /* Clear the NMI executing stack variable */
17641 + movq $0, 10*8(%rsp)
17642 + jmp irq_return
17643 nmi_restore:
17644 + pax_exit_kernel
17645 RESTORE_ALL 8
17646 + pax_force_retaddr_bts
17647 /* Clear the NMI executing stack variable */
17648 movq $0, 10*8(%rsp)
17649 jmp irq_return
17650 CFI_ENDPROC
17651 -END(nmi)
17652 +ENDPROC(nmi)
17653
17654 ENTRY(ignore_sysret)
17655 CFI_STARTPROC
17656 mov $-ENOSYS,%eax
17657 sysret
17658 CFI_ENDPROC
17659 -END(ignore_sysret)
17660 +ENDPROC(ignore_sysret)
17661
17662 /*
17663 * End of kprobes section
17664 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17665 index 1d41402..af9a46a 100644
17666 --- a/arch/x86/kernel/ftrace.c
17667 +++ b/arch/x86/kernel/ftrace.c
17668 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
17669 {
17670 unsigned char replaced[MCOUNT_INSN_SIZE];
17671
17672 + ip = ktla_ktva(ip);
17673 +
17674 /*
17675 * Note: Due to modules and __init, code can
17676 * disappear and change, we need to protect against faulting
17677 @@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17678 unsigned char old[MCOUNT_INSN_SIZE], *new;
17679 int ret;
17680
17681 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17682 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17683 new = ftrace_call_replace(ip, (unsigned long)func);
17684
17685 /* See comment above by declaration of modifying_ftrace_code */
17686 @@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17687 /* Also update the regs callback function */
17688 if (!ret) {
17689 ip = (unsigned long)(&ftrace_regs_call);
17690 - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
17691 + memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
17692 new = ftrace_call_replace(ip, (unsigned long)func);
17693 ret = ftrace_modify_code(ip, old, new);
17694 }
17695 @@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
17696 * kernel identity mapping to modify code.
17697 */
17698 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
17699 - ip = (unsigned long)__va(__pa(ip));
17700 + ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
17701
17702 return probe_kernel_write((void *)ip, val, size);
17703 }
17704 @@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
17705 unsigned char replaced[MCOUNT_INSN_SIZE];
17706 unsigned char brk = BREAKPOINT_INSTRUCTION;
17707
17708 - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
17709 + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
17710 return -EFAULT;
17711
17712 /* Make sure it is what we expect it to be */
17713 @@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
17714 return ret;
17715
17716 fail_update:
17717 - probe_kernel_write((void *)ip, &old_code[0], 1);
17718 + probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
17719 goto out;
17720 }
17721
17722 @@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
17723 {
17724 unsigned char code[MCOUNT_INSN_SIZE];
17725
17726 + ip = ktla_ktva(ip);
17727 +
17728 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
17729 return -EFAULT;
17730
17731 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
17732 index c18f59d..9c0c9f6 100644
17733 --- a/arch/x86/kernel/head32.c
17734 +++ b/arch/x86/kernel/head32.c
17735 @@ -18,6 +18,7 @@
17736 #include <asm/io_apic.h>
17737 #include <asm/bios_ebda.h>
17738 #include <asm/tlbflush.h>
17739 +#include <asm/boot.h>
17740
17741 static void __init i386_default_early_setup(void)
17742 {
17743 @@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
17744
17745 void __init i386_start_kernel(void)
17746 {
17747 - memblock_reserve(__pa_symbol(&_text),
17748 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
17749 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
17750
17751 #ifdef CONFIG_BLK_DEV_INITRD
17752 /* Reserve INITRD */
17753 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
17754 index 4dac2f6..bc6a335 100644
17755 --- a/arch/x86/kernel/head_32.S
17756 +++ b/arch/x86/kernel/head_32.S
17757 @@ -26,6 +26,12 @@
17758 /* Physical address */
17759 #define pa(X) ((X) - __PAGE_OFFSET)
17760
17761 +#ifdef CONFIG_PAX_KERNEXEC
17762 +#define ta(X) (X)
17763 +#else
17764 +#define ta(X) ((X) - __PAGE_OFFSET)
17765 +#endif
17766 +
17767 /*
17768 * References to members of the new_cpu_data structure.
17769 */
17770 @@ -55,11 +61,7 @@
17771 * and small than max_low_pfn, otherwise will waste some page table entries
17772 */
17773
17774 -#if PTRS_PER_PMD > 1
17775 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17776 -#else
17777 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17778 -#endif
17779 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17780
17781 /* Number of possible pages in the lowmem region */
17782 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
17783 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
17784 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17785
17786 /*
17787 + * Real beginning of normal "text" segment
17788 + */
17789 +ENTRY(stext)
17790 +ENTRY(_stext)
17791 +
17792 +/*
17793 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17794 * %esi points to the real-mode code as a 32-bit pointer.
17795 * CS and DS must be 4 GB flat segments, but we don't depend on
17796 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17797 * can.
17798 */
17799 __HEAD
17800 +
17801 +#ifdef CONFIG_PAX_KERNEXEC
17802 + jmp startup_32
17803 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17804 +.fill PAGE_SIZE-5,1,0xcc
17805 +#endif
17806 +
17807 ENTRY(startup_32)
17808 movl pa(stack_start),%ecx
17809
17810 @@ -106,6 +121,59 @@ ENTRY(startup_32)
17811 2:
17812 leal -__PAGE_OFFSET(%ecx),%esp
17813
17814 +#ifdef CONFIG_SMP
17815 + movl $pa(cpu_gdt_table),%edi
17816 + movl $__per_cpu_load,%eax
17817 + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
17818 + rorl $16,%eax
17819 + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
17820 + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
17821 + movl $__per_cpu_end - 1,%eax
17822 + subl $__per_cpu_start,%eax
17823 + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
17824 +#endif
17825 +
17826 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17827 + movl $NR_CPUS,%ecx
17828 + movl $pa(cpu_gdt_table),%edi
17829 +1:
17830 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17831 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17832 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17833 + addl $PAGE_SIZE_asm,%edi
17834 + loop 1b
17835 +#endif
17836 +
17837 +#ifdef CONFIG_PAX_KERNEXEC
17838 + movl $pa(boot_gdt),%edi
17839 + movl $__LOAD_PHYSICAL_ADDR,%eax
17840 + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
17841 + rorl $16,%eax
17842 + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
17843 + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
17844 + rorl $16,%eax
17845 +
17846 + ljmp $(__BOOT_CS),$1f
17847 +1:
17848 +
17849 + movl $NR_CPUS,%ecx
17850 + movl $pa(cpu_gdt_table),%edi
17851 + addl $__PAGE_OFFSET,%eax
17852 +1:
17853 + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
17854 + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
17855 + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
17856 + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
17857 + rorl $16,%eax
17858 + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
17859 + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
17860 + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
17861 + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
17862 + rorl $16,%eax
17863 + addl $PAGE_SIZE_asm,%edi
17864 + loop 1b
17865 +#endif
17866 +
17867 /*
17868 * Clear BSS first so that there are no surprises...
17869 */
17870 @@ -196,8 +264,11 @@ ENTRY(startup_32)
17871 movl %eax, pa(max_pfn_mapped)
17872
17873 /* Do early initialization of the fixmap area */
17874 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
17875 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
17876 +#ifdef CONFIG_COMPAT_VDSO
17877 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
17878 +#else
17879 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
17880 +#endif
17881 #else /* Not PAE */
17882
17883 page_pde_offset = (__PAGE_OFFSET >> 20);
17884 @@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17885 movl %eax, pa(max_pfn_mapped)
17886
17887 /* Do early initialization of the fixmap area */
17888 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
17889 - movl %eax,pa(initial_page_table+0xffc)
17890 +#ifdef CONFIG_COMPAT_VDSO
17891 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
17892 +#else
17893 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
17894 +#endif
17895 #endif
17896
17897 #ifdef CONFIG_PARAVIRT
17898 @@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17899 cmpl $num_subarch_entries, %eax
17900 jae bad_subarch
17901
17902 - movl pa(subarch_entries)(,%eax,4), %eax
17903 - subl $__PAGE_OFFSET, %eax
17904 - jmp *%eax
17905 + jmp *pa(subarch_entries)(,%eax,4)
17906
17907 bad_subarch:
17908 WEAK(lguest_entry)
17909 @@ -256,10 +328,10 @@ WEAK(xen_entry)
17910 __INITDATA
17911
17912 subarch_entries:
17913 - .long default_entry /* normal x86/PC */
17914 - .long lguest_entry /* lguest hypervisor */
17915 - .long xen_entry /* Xen hypervisor */
17916 - .long default_entry /* Moorestown MID */
17917 + .long ta(default_entry) /* normal x86/PC */
17918 + .long ta(lguest_entry) /* lguest hypervisor */
17919 + .long ta(xen_entry) /* Xen hypervisor */
17920 + .long ta(default_entry) /* Moorestown MID */
17921 num_subarch_entries = (. - subarch_entries) / 4
17922 .previous
17923 #else
17924 @@ -316,6 +388,7 @@ default_entry:
17925 movl pa(mmu_cr4_features),%eax
17926 movl %eax,%cr4
17927
17928 +#ifdef CONFIG_X86_PAE
17929 testb $X86_CR4_PAE, %al # check if PAE is enabled
17930 jz 6f
17931
17932 @@ -344,6 +417,9 @@ default_entry:
17933 /* Make changes effective */
17934 wrmsr
17935
17936 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17937 +#endif
17938 +
17939 6:
17940
17941 /*
17942 @@ -442,14 +518,20 @@ is386: movl $2,%ecx # set MP
17943 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17944 movl %eax,%ss # after changing gdt.
17945
17946 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17947 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17948 movl %eax,%ds
17949 movl %eax,%es
17950
17951 movl $(__KERNEL_PERCPU), %eax
17952 movl %eax,%fs # set this cpu's percpu
17953
17954 +#ifdef CONFIG_CC_STACKPROTECTOR
17955 movl $(__KERNEL_STACK_CANARY),%eax
17956 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17957 + movl $(__USER_DS),%eax
17958 +#else
17959 + xorl %eax,%eax
17960 +#endif
17961 movl %eax,%gs
17962
17963 xorl %eax,%eax # Clear LDT
17964 @@ -526,8 +608,11 @@ setup_once:
17965 * relocation. Manually set base address in stack canary
17966 * segment descriptor.
17967 */
17968 - movl $gdt_page,%eax
17969 + movl $cpu_gdt_table,%eax
17970 movl $stack_canary,%ecx
17971 +#ifdef CONFIG_SMP
17972 + addl $__per_cpu_load,%ecx
17973 +#endif
17974 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17975 shrl $16, %ecx
17976 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17977 @@ -558,7 +643,7 @@ ENDPROC(early_idt_handlers)
17978 /* This is global to keep gas from relaxing the jumps */
17979 ENTRY(early_idt_handler)
17980 cld
17981 - cmpl $2,%ss:early_recursion_flag
17982 + cmpl $1,%ss:early_recursion_flag
17983 je hlt_loop
17984 incl %ss:early_recursion_flag
17985
17986 @@ -596,8 +681,8 @@ ENTRY(early_idt_handler)
17987 pushl (20+6*4)(%esp) /* trapno */
17988 pushl $fault_msg
17989 call printk
17990 -#endif
17991 call dump_stack
17992 +#endif
17993 hlt_loop:
17994 hlt
17995 jmp hlt_loop
17996 @@ -616,8 +701,11 @@ ENDPROC(early_idt_handler)
17997 /* This is the default interrupt "handler" :-) */
17998 ALIGN
17999 ignore_int:
18000 - cld
18001 #ifdef CONFIG_PRINTK
18002 + cmpl $2,%ss:early_recursion_flag
18003 + je hlt_loop
18004 + incl %ss:early_recursion_flag
18005 + cld
18006 pushl %eax
18007 pushl %ecx
18008 pushl %edx
18009 @@ -626,9 +714,6 @@ ignore_int:
18010 movl $(__KERNEL_DS),%eax
18011 movl %eax,%ds
18012 movl %eax,%es
18013 - cmpl $2,early_recursion_flag
18014 - je hlt_loop
18015 - incl early_recursion_flag
18016 pushl 16(%esp)
18017 pushl 24(%esp)
18018 pushl 32(%esp)
18019 @@ -662,29 +747,43 @@ ENTRY(setup_once_ref)
18020 /*
18021 * BSS section
18022 */
18023 -__PAGE_ALIGNED_BSS
18024 - .align PAGE_SIZE
18025 #ifdef CONFIG_X86_PAE
18026 +.section .initial_pg_pmd,"a",@progbits
18027 initial_pg_pmd:
18028 .fill 1024*KPMDS,4,0
18029 #else
18030 +.section .initial_page_table,"a",@progbits
18031 ENTRY(initial_page_table)
18032 .fill 1024,4,0
18033 #endif
18034 +.section .initial_pg_fixmap,"a",@progbits
18035 initial_pg_fixmap:
18036 .fill 1024,4,0
18037 +.section .empty_zero_page,"a",@progbits
18038 ENTRY(empty_zero_page)
18039 .fill 4096,1,0
18040 +.section .swapper_pg_dir,"a",@progbits
18041 ENTRY(swapper_pg_dir)
18042 +#ifdef CONFIG_X86_PAE
18043 + .fill 4,8,0
18044 +#else
18045 .fill 1024,4,0
18046 +#endif
18047 +
18048 +/*
18049 + * The IDT has to be page-aligned to simplify the Pentium
18050 + * F0 0F bug workaround.. We have a special link segment
18051 + * for this.
18052 + */
18053 +.section .idt,"a",@progbits
18054 +ENTRY(idt_table)
18055 + .fill 256,8,0
18056
18057 /*
18058 * This starts the data section.
18059 */
18060 #ifdef CONFIG_X86_PAE
18061 -__PAGE_ALIGNED_DATA
18062 - /* Page-aligned for the benefit of paravirt? */
18063 - .align PAGE_SIZE
18064 +.section .initial_page_table,"a",@progbits
18065 ENTRY(initial_page_table)
18066 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18067 # if KPMDS == 3
18068 @@ -703,12 +802,20 @@ ENTRY(initial_page_table)
18069 # error "Kernel PMDs should be 1, 2 or 3"
18070 # endif
18071 .align PAGE_SIZE /* needs to be page-sized too */
18072 +
18073 +#ifdef CONFIG_PAX_PER_CPU_PGD
18074 +ENTRY(cpu_pgd)
18075 + .rept NR_CPUS
18076 + .fill 4,8,0
18077 + .endr
18078 +#endif
18079 +
18080 #endif
18081
18082 .data
18083 .balign 4
18084 ENTRY(stack_start)
18085 - .long init_thread_union+THREAD_SIZE
18086 + .long init_thread_union+THREAD_SIZE-8
18087
18088 __INITRODATA
18089 int_msg:
18090 @@ -736,7 +843,7 @@ fault_msg:
18091 * segment size, and 32-bit linear address value:
18092 */
18093
18094 - .data
18095 +.section .rodata,"a",@progbits
18096 .globl boot_gdt_descr
18097 .globl idt_descr
18098
18099 @@ -745,7 +852,7 @@ fault_msg:
18100 .word 0 # 32 bit align gdt_desc.address
18101 boot_gdt_descr:
18102 .word __BOOT_DS+7
18103 - .long boot_gdt - __PAGE_OFFSET
18104 + .long pa(boot_gdt)
18105
18106 .word 0 # 32-bit align idt_desc.address
18107 idt_descr:
18108 @@ -756,7 +863,7 @@ idt_descr:
18109 .word 0 # 32 bit align gdt_desc.address
18110 ENTRY(early_gdt_descr)
18111 .word GDT_ENTRIES*8-1
18112 - .long gdt_page /* Overwritten for secondary CPUs */
18113 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
18114
18115 /*
18116 * The boot_gdt must mirror the equivalent in setup.S and is
18117 @@ -765,5 +872,65 @@ ENTRY(early_gdt_descr)
18118 .align L1_CACHE_BYTES
18119 ENTRY(boot_gdt)
18120 .fill GDT_ENTRY_BOOT_CS,8,0
18121 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18122 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18123 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18124 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18125 +
18126 + .align PAGE_SIZE_asm
18127 +ENTRY(cpu_gdt_table)
18128 + .rept NR_CPUS
18129 + .quad 0x0000000000000000 /* NULL descriptor */
18130 + .quad 0x0000000000000000 /* 0x0b reserved */
18131 + .quad 0x0000000000000000 /* 0x13 reserved */
18132 + .quad 0x0000000000000000 /* 0x1b reserved */
18133 +
18134 +#ifdef CONFIG_PAX_KERNEXEC
18135 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18136 +#else
18137 + .quad 0x0000000000000000 /* 0x20 unused */
18138 +#endif
18139 +
18140 + .quad 0x0000000000000000 /* 0x28 unused */
18141 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18142 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18143 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18144 + .quad 0x0000000000000000 /* 0x4b reserved */
18145 + .quad 0x0000000000000000 /* 0x53 reserved */
18146 + .quad 0x0000000000000000 /* 0x5b reserved */
18147 +
18148 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18149 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18150 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18151 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18152 +
18153 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18154 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18155 +
18156 + /*
18157 + * Segments used for calling PnP BIOS have byte granularity.
18158 + * The code segments and data segments have fixed 64k limits,
18159 + * the transfer segment sizes are set at run time.
18160 + */
18161 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
18162 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
18163 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
18164 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
18165 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
18166 +
18167 + /*
18168 + * The APM segments have byte granularity and their bases
18169 + * are set at run time. All have 64k limits.
18170 + */
18171 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18172 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18173 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
18174 +
18175 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18176 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18177 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18178 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18179 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18180 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18181 +
18182 + /* Be sure this is zeroed to avoid false validations in Xen */
18183 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18184 + .endr
18185 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18186 index 94bf9cc..400455a 100644
18187 --- a/arch/x86/kernel/head_64.S
18188 +++ b/arch/x86/kernel/head_64.S
18189 @@ -20,6 +20,8 @@
18190 #include <asm/processor-flags.h>
18191 #include <asm/percpu.h>
18192 #include <asm/nops.h>
18193 +#include <asm/cpufeature.h>
18194 +#include <asm/alternative-asm.h>
18195
18196 #ifdef CONFIG_PARAVIRT
18197 #include <asm/asm-offsets.h>
18198 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18199 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18200 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18201 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18202 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
18203 +L3_VMALLOC_START = pud_index(VMALLOC_START)
18204 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
18205 +L3_VMALLOC_END = pud_index(VMALLOC_END)
18206 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18207 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18208
18209 .text
18210 __HEAD
18211 @@ -88,35 +96,23 @@ startup_64:
18212 */
18213 addq %rbp, init_level4_pgt + 0(%rip)
18214 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18215 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18216 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18217 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18218 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18219
18220 addq %rbp, level3_ident_pgt + 0(%rip)
18221 +#ifndef CONFIG_XEN
18222 + addq %rbp, level3_ident_pgt + 8(%rip)
18223 +#endif
18224
18225 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18226 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18227 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18228 +
18229 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18230 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18231
18232 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18233 -
18234 - /* Add an Identity mapping if I am above 1G */
18235 - leaq _text(%rip), %rdi
18236 - andq $PMD_PAGE_MASK, %rdi
18237 -
18238 - movq %rdi, %rax
18239 - shrq $PUD_SHIFT, %rax
18240 - andq $(PTRS_PER_PUD - 1), %rax
18241 - jz ident_complete
18242 -
18243 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18244 - leaq level3_ident_pgt(%rip), %rbx
18245 - movq %rdx, 0(%rbx, %rax, 8)
18246 -
18247 - movq %rdi, %rax
18248 - shrq $PMD_SHIFT, %rax
18249 - andq $(PTRS_PER_PMD - 1), %rax
18250 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18251 - leaq level2_spare_pgt(%rip), %rbx
18252 - movq %rdx, 0(%rbx, %rax, 8)
18253 -ident_complete:
18254 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18255
18256 /*
18257 * Fixup the kernel text+data virtual addresses. Note that
18258 @@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
18259 * after the boot processor executes this code.
18260 */
18261
18262 - /* Enable PAE mode and PGE */
18263 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18264 + /* Enable PAE mode and PSE/PGE */
18265 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18266 movq %rax, %cr4
18267
18268 /* Setup early boot stage 4 level pagetables. */
18269 @@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
18270 movl $MSR_EFER, %ecx
18271 rdmsr
18272 btsl $_EFER_SCE, %eax /* Enable System Call */
18273 - btl $20,%edi /* No Execute supported? */
18274 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18275 jnc 1f
18276 btsl $_EFER_NX, %eax
18277 + leaq init_level4_pgt(%rip), %rdi
18278 +#ifndef CONFIG_EFI
18279 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18280 +#endif
18281 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18282 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18283 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18284 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
18285 1: wrmsr /* Make changes effective */
18286
18287 /* Setup cr0 */
18288 @@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
18289 * jump. In addition we need to ensure %cs is set so we make this
18290 * a far return.
18291 */
18292 + pax_set_fptr_mask
18293 movq initial_code(%rip),%rax
18294 pushq $0 # fake return address to stop unwinder
18295 pushq $__KERNEL_CS # set correct cs
18296 @@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
18297 bad_address:
18298 jmp bad_address
18299
18300 - .section ".init.text","ax"
18301 + __INIT
18302 .globl early_idt_handlers
18303 early_idt_handlers:
18304 # 104(%rsp) %rflags
18305 @@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
18306 addq $16,%rsp # drop vector number and error code
18307 decl early_recursion_flag(%rip)
18308 INTERRUPT_RETURN
18309 + .previous
18310
18311 + __INITDATA
18312 .balign 4
18313 early_recursion_flag:
18314 .long 0
18315 + .previous
18316
18317 + .section .rodata,"a",@progbits
18318 #ifdef CONFIG_EARLY_PRINTK
18319 early_idt_msg:
18320 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18321 @@ -360,6 +369,7 @@ early_idt_ripmsg:
18322 #endif /* CONFIG_EARLY_PRINTK */
18323 .previous
18324
18325 + .section .rodata,"a",@progbits
18326 #define NEXT_PAGE(name) \
18327 .balign PAGE_SIZE; \
18328 ENTRY(name)
18329 @@ -372,7 +382,6 @@ ENTRY(name)
18330 i = i + 1 ; \
18331 .endr
18332
18333 - .data
18334 /*
18335 * This default setting generates an ident mapping at address 0x100000
18336 * and a mapping for the kernel that precisely maps virtual address
18337 @@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
18338 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18339 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18340 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18341 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
18342 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18343 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
18344 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18345 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18346 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18347 .org init_level4_pgt + L4_START_KERNEL*8, 0
18348 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18349 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18350
18351 +#ifdef CONFIG_PAX_PER_CPU_PGD
18352 +NEXT_PAGE(cpu_pgd)
18353 + .rept NR_CPUS
18354 + .fill 512,8,0
18355 + .endr
18356 +#endif
18357 +
18358 NEXT_PAGE(level3_ident_pgt)
18359 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18360 +#ifdef CONFIG_XEN
18361 .fill 511,8,0
18362 +#else
18363 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18364 + .fill 510,8,0
18365 +#endif
18366 +
18367 +NEXT_PAGE(level3_vmalloc_start_pgt)
18368 + .fill 512,8,0
18369 +
18370 +NEXT_PAGE(level3_vmalloc_end_pgt)
18371 + .fill 512,8,0
18372 +
18373 +NEXT_PAGE(level3_vmemmap_pgt)
18374 + .fill L3_VMEMMAP_START,8,0
18375 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18376
18377 NEXT_PAGE(level3_kernel_pgt)
18378 .fill L3_START_KERNEL,8,0
18379 @@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
18380 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18381 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18382
18383 +NEXT_PAGE(level2_vmemmap_pgt)
18384 + .fill 512,8,0
18385 +
18386 NEXT_PAGE(level2_fixmap_pgt)
18387 - .fill 506,8,0
18388 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18389 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18390 - .fill 5,8,0
18391 + .fill 507,8,0
18392 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18393 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18394 + .fill 4,8,0
18395
18396 -NEXT_PAGE(level1_fixmap_pgt)
18397 +NEXT_PAGE(level1_vsyscall_pgt)
18398 .fill 512,8,0
18399
18400 -NEXT_PAGE(level2_ident_pgt)
18401 - /* Since I easily can, map the first 1G.
18402 + /* Since I easily can, map the first 2G.
18403 * Don't set NX because code runs from these pages.
18404 */
18405 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18406 +NEXT_PAGE(level2_ident_pgt)
18407 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18408
18409 NEXT_PAGE(level2_kernel_pgt)
18410 /*
18411 @@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
18412 * If you want to increase this then increase MODULES_VADDR
18413 * too.)
18414 */
18415 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18416 - KERNEL_IMAGE_SIZE/PMD_SIZE)
18417 -
18418 -NEXT_PAGE(level2_spare_pgt)
18419 - .fill 512, 8, 0
18420 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18421
18422 #undef PMDS
18423 #undef NEXT_PAGE
18424
18425 - .data
18426 + .align PAGE_SIZE
18427 +ENTRY(cpu_gdt_table)
18428 + .rept NR_CPUS
18429 + .quad 0x0000000000000000 /* NULL descriptor */
18430 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18431 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
18432 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
18433 + .quad 0x00cffb000000ffff /* __USER32_CS */
18434 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18435 + .quad 0x00affb000000ffff /* __USER_CS */
18436 +
18437 +#ifdef CONFIG_PAX_KERNEXEC
18438 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18439 +#else
18440 + .quad 0x0 /* unused */
18441 +#endif
18442 +
18443 + .quad 0,0 /* TSS */
18444 + .quad 0,0 /* LDT */
18445 + .quad 0,0,0 /* three TLS descriptors */
18446 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
18447 + /* asm/segment.h:GDT_ENTRIES must match this */
18448 +
18449 + /* zero the remaining page */
18450 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18451 + .endr
18452 +
18453 .align 16
18454 .globl early_gdt_descr
18455 early_gdt_descr:
18456 .word GDT_ENTRIES*8-1
18457 early_gdt_descr_base:
18458 - .quad INIT_PER_CPU_VAR(gdt_page)
18459 + .quad cpu_gdt_table
18460
18461 ENTRY(phys_base)
18462 /* This must match the first entry in level2_kernel_pgt */
18463 .quad 0x0000000000000000
18464
18465 #include "../../x86/xen/xen-head.S"
18466 -
18467 - .section .bss, "aw", @nobits
18468 +
18469 + .section .rodata,"a",@progbits
18470 .align L1_CACHE_BYTES
18471 ENTRY(idt_table)
18472 - .skip IDT_ENTRIES * 16
18473 + .fill 512,8,0
18474
18475 .align L1_CACHE_BYTES
18476 ENTRY(nmi_idt_table)
18477 - .skip IDT_ENTRIES * 16
18478 + .fill 512,8,0
18479
18480 __PAGE_ALIGNED_BSS
18481 .align PAGE_SIZE
18482 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18483 index 9c3bd4a..e1d9b35 100644
18484 --- a/arch/x86/kernel/i386_ksyms_32.c
18485 +++ b/arch/x86/kernel/i386_ksyms_32.c
18486 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18487 EXPORT_SYMBOL(cmpxchg8b_emu);
18488 #endif
18489
18490 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
18491 +
18492 /* Networking helper routines. */
18493 EXPORT_SYMBOL(csum_partial_copy_generic);
18494 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18495 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18496
18497 EXPORT_SYMBOL(__get_user_1);
18498 EXPORT_SYMBOL(__get_user_2);
18499 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18500
18501 EXPORT_SYMBOL(csum_partial);
18502 EXPORT_SYMBOL(empty_zero_page);
18503 +
18504 +#ifdef CONFIG_PAX_KERNEXEC
18505 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18506 +#endif
18507 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18508 index 675a050..95febfd 100644
18509 --- a/arch/x86/kernel/i387.c
18510 +++ b/arch/x86/kernel/i387.c
18511 @@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
18512 static inline bool interrupted_user_mode(void)
18513 {
18514 struct pt_regs *regs = get_irq_regs();
18515 - return regs && user_mode_vm(regs);
18516 + return regs && user_mode(regs);
18517 }
18518
18519 /*
18520 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18521 index 9a5c460..dc4374d 100644
18522 --- a/arch/x86/kernel/i8259.c
18523 +++ b/arch/x86/kernel/i8259.c
18524 @@ -209,7 +209,7 @@ spurious_8259A_irq:
18525 "spurious 8259A interrupt: IRQ%d.\n", irq);
18526 spurious_irq_mask |= irqmask;
18527 }
18528 - atomic_inc(&irq_err_count);
18529 + atomic_inc_unchecked(&irq_err_count);
18530 /*
18531 * Theoretically we do not have to handle this IRQ,
18532 * but in Linux this does not cause problems and is
18533 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18534 index 8c96897..be66bfa 100644
18535 --- a/arch/x86/kernel/ioport.c
18536 +++ b/arch/x86/kernel/ioport.c
18537 @@ -6,6 +6,7 @@
18538 #include <linux/sched.h>
18539 #include <linux/kernel.h>
18540 #include <linux/capability.h>
18541 +#include <linux/security.h>
18542 #include <linux/errno.h>
18543 #include <linux/types.h>
18544 #include <linux/ioport.h>
18545 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18546
18547 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18548 return -EINVAL;
18549 +#ifdef CONFIG_GRKERNSEC_IO
18550 + if (turn_on && grsec_disable_privio) {
18551 + gr_handle_ioperm();
18552 + return -EPERM;
18553 + }
18554 +#endif
18555 if (turn_on && !capable(CAP_SYS_RAWIO))
18556 return -EPERM;
18557
18558 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18559 * because the ->io_bitmap_max value must match the bitmap
18560 * contents:
18561 */
18562 - tss = &per_cpu(init_tss, get_cpu());
18563 + tss = init_tss + get_cpu();
18564
18565 if (turn_on)
18566 bitmap_clear(t->io_bitmap_ptr, from, num);
18567 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
18568 return -EINVAL;
18569 /* Trying to gain more privileges? */
18570 if (level > old) {
18571 +#ifdef CONFIG_GRKERNSEC_IO
18572 + if (grsec_disable_privio) {
18573 + gr_handle_iopl();
18574 + return -EPERM;
18575 + }
18576 +#endif
18577 if (!capable(CAP_SYS_RAWIO))
18578 return -EPERM;
18579 }
18580 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18581 index e4595f1..ee3bfb8 100644
18582 --- a/arch/x86/kernel/irq.c
18583 +++ b/arch/x86/kernel/irq.c
18584 @@ -18,7 +18,7 @@
18585 #include <asm/mce.h>
18586 #include <asm/hw_irq.h>
18587
18588 -atomic_t irq_err_count;
18589 +atomic_unchecked_t irq_err_count;
18590
18591 /* Function pointer for generic interrupt vector handling */
18592 void (*x86_platform_ipi_callback)(void) = NULL;
18593 @@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
18594 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
18595 seq_printf(p, " Machine check polls\n");
18596 #endif
18597 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
18598 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
18599 #if defined(CONFIG_X86_IO_APIC)
18600 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
18601 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
18602 #endif
18603 return 0;
18604 }
18605 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
18606
18607 u64 arch_irq_stat(void)
18608 {
18609 - u64 sum = atomic_read(&irq_err_count);
18610 + u64 sum = atomic_read_unchecked(&irq_err_count);
18611
18612 #ifdef CONFIG_X86_IO_APIC
18613 - sum += atomic_read(&irq_mis_count);
18614 + sum += atomic_read_unchecked(&irq_mis_count);
18615 #endif
18616 return sum;
18617 }
18618 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
18619 index 344faf8..355f60d 100644
18620 --- a/arch/x86/kernel/irq_32.c
18621 +++ b/arch/x86/kernel/irq_32.c
18622 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
18623 __asm__ __volatile__("andl %%esp,%0" :
18624 "=r" (sp) : "0" (THREAD_SIZE - 1));
18625
18626 - return sp < (sizeof(struct thread_info) + STACK_WARN);
18627 + return sp < STACK_WARN;
18628 }
18629
18630 static void print_stack_overflow(void)
18631 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
18632 * per-CPU IRQ handling contexts (thread information and stack)
18633 */
18634 union irq_ctx {
18635 - struct thread_info tinfo;
18636 - u32 stack[THREAD_SIZE/sizeof(u32)];
18637 + unsigned long previous_esp;
18638 + u32 stack[THREAD_SIZE/sizeof(u32)];
18639 } __attribute__((aligned(THREAD_SIZE)));
18640
18641 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
18642 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
18643 static inline int
18644 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18645 {
18646 - union irq_ctx *curctx, *irqctx;
18647 + union irq_ctx *irqctx;
18648 u32 *isp, arg1, arg2;
18649
18650 - curctx = (union irq_ctx *) current_thread_info();
18651 irqctx = __this_cpu_read(hardirq_ctx);
18652
18653 /*
18654 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18655 * handler) we can't do that and just have to keep using the
18656 * current stack (which is the irq stack already after all)
18657 */
18658 - if (unlikely(curctx == irqctx))
18659 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
18660 return 0;
18661
18662 /* build the stack frame on the IRQ stack */
18663 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18664 - irqctx->tinfo.task = curctx->tinfo.task;
18665 - irqctx->tinfo.previous_esp = current_stack_pointer;
18666 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18667 + irqctx->previous_esp = current_stack_pointer;
18668
18669 - /* Copy the preempt_count so that the [soft]irq checks work. */
18670 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
18671 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18672 + __set_fs(MAKE_MM_SEG(0));
18673 +#endif
18674
18675 if (unlikely(overflow))
18676 call_on_stack(print_stack_overflow, isp);
18677 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18678 : "0" (irq), "1" (desc), "2" (isp),
18679 "D" (desc->handle_irq)
18680 : "memory", "cc", "ecx");
18681 +
18682 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18683 + __set_fs(current_thread_info()->addr_limit);
18684 +#endif
18685 +
18686 return 1;
18687 }
18688
18689 @@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18690 */
18691 void __cpuinit irq_ctx_init(int cpu)
18692 {
18693 - union irq_ctx *irqctx;
18694 -
18695 if (per_cpu(hardirq_ctx, cpu))
18696 return;
18697
18698 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
18699 - THREADINFO_GFP,
18700 - THREAD_SIZE_ORDER));
18701 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
18702 - irqctx->tinfo.cpu = cpu;
18703 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18704 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18705 -
18706 - per_cpu(hardirq_ctx, cpu) = irqctx;
18707 -
18708 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
18709 - THREADINFO_GFP,
18710 - THREAD_SIZE_ORDER));
18711 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
18712 - irqctx->tinfo.cpu = cpu;
18713 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18714 -
18715 - per_cpu(softirq_ctx, cpu) = irqctx;
18716 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
18717 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
18718 +
18719 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18720 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18721
18722 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18723 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18724 @@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
18725 asmlinkage void do_softirq(void)
18726 {
18727 unsigned long flags;
18728 - struct thread_info *curctx;
18729 union irq_ctx *irqctx;
18730 u32 *isp;
18731
18732 @@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
18733 local_irq_save(flags);
18734
18735 if (local_softirq_pending()) {
18736 - curctx = current_thread_info();
18737 irqctx = __this_cpu_read(softirq_ctx);
18738 - irqctx->tinfo.task = curctx->task;
18739 - irqctx->tinfo.previous_esp = current_stack_pointer;
18740 + irqctx->previous_esp = current_stack_pointer;
18741
18742 /* build the stack frame on the softirq stack */
18743 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18744 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18745 +
18746 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18747 + __set_fs(MAKE_MM_SEG(0));
18748 +#endif
18749
18750 call_on_stack(__do_softirq, isp);
18751 +
18752 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18753 + __set_fs(current_thread_info()->addr_limit);
18754 +#endif
18755 +
18756 /*
18757 * Shouldn't happen, we returned above if in_interrupt():
18758 */
18759 @@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
18760 if (unlikely(!desc))
18761 return false;
18762
18763 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
18764 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
18765 if (unlikely(overflow))
18766 print_stack_overflow();
18767 desc->handle_irq(irq, desc);
18768 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
18769 index d04d3ec..ea4b374 100644
18770 --- a/arch/x86/kernel/irq_64.c
18771 +++ b/arch/x86/kernel/irq_64.c
18772 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
18773 u64 estack_top, estack_bottom;
18774 u64 curbase = (u64)task_stack_page(current);
18775
18776 - if (user_mode_vm(regs))
18777 + if (user_mode(regs))
18778 return;
18779
18780 if (regs->sp >= curbase + sizeof(struct thread_info) +
18781 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
18782 index dc1404b..bbc43e7 100644
18783 --- a/arch/x86/kernel/kdebugfs.c
18784 +++ b/arch/x86/kernel/kdebugfs.c
18785 @@ -27,7 +27,7 @@ struct setup_data_node {
18786 u32 len;
18787 };
18788
18789 -static ssize_t setup_data_read(struct file *file, char __user *user_buf,
18790 +static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
18791 size_t count, loff_t *ppos)
18792 {
18793 struct setup_data_node *node = file->private_data;
18794 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18795 index 836f832..a8bda67 100644
18796 --- a/arch/x86/kernel/kgdb.c
18797 +++ b/arch/x86/kernel/kgdb.c
18798 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
18799 #ifdef CONFIG_X86_32
18800 switch (regno) {
18801 case GDB_SS:
18802 - if (!user_mode_vm(regs))
18803 + if (!user_mode(regs))
18804 *(unsigned long *)mem = __KERNEL_DS;
18805 break;
18806 case GDB_SP:
18807 - if (!user_mode_vm(regs))
18808 + if (!user_mode(regs))
18809 *(unsigned long *)mem = kernel_stack_pointer(regs);
18810 break;
18811 case GDB_GS:
18812 @@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
18813 bp->attr.bp_addr = breakinfo[breakno].addr;
18814 bp->attr.bp_len = breakinfo[breakno].len;
18815 bp->attr.bp_type = breakinfo[breakno].type;
18816 - info->address = breakinfo[breakno].addr;
18817 + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
18818 + info->address = ktla_ktva(breakinfo[breakno].addr);
18819 + else
18820 + info->address = breakinfo[breakno].addr;
18821 info->len = breakinfo[breakno].len;
18822 info->type = breakinfo[breakno].type;
18823 val = arch_install_hw_breakpoint(bp);
18824 @@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18825 case 'k':
18826 /* clear the trace bit */
18827 linux_regs->flags &= ~X86_EFLAGS_TF;
18828 - atomic_set(&kgdb_cpu_doing_single_step, -1);
18829 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18830
18831 /* set the trace bit if we're stepping */
18832 if (remcomInBuffer[0] == 's') {
18833 linux_regs->flags |= X86_EFLAGS_TF;
18834 - atomic_set(&kgdb_cpu_doing_single_step,
18835 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18836 raw_smp_processor_id());
18837 }
18838
18839 @@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18840
18841 switch (cmd) {
18842 case DIE_DEBUG:
18843 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
18844 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
18845 if (user_mode(regs))
18846 return single_step_cont(regs, args);
18847 break;
18848 @@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
18849 #endif /* CONFIG_DEBUG_RODATA */
18850
18851 bpt->type = BP_BREAKPOINT;
18852 - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
18853 + err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
18854 BREAK_INSTR_SIZE);
18855 if (err)
18856 return err;
18857 - err = probe_kernel_write((char *)bpt->bpt_addr,
18858 + err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
18859 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
18860 #ifdef CONFIG_DEBUG_RODATA
18861 if (!err)
18862 @@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
18863 return -EBUSY;
18864 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
18865 BREAK_INSTR_SIZE);
18866 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
18867 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
18868 if (err)
18869 return err;
18870 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
18871 @@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
18872 if (mutex_is_locked(&text_mutex))
18873 goto knl_write;
18874 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
18875 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
18876 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
18877 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
18878 goto knl_write;
18879 return err;
18880 knl_write:
18881 #endif /* CONFIG_DEBUG_RODATA */
18882 - return probe_kernel_write((char *)bpt->bpt_addr,
18883 + return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
18884 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
18885 }
18886
18887 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
18888 index c5e410e..ed5a7f0 100644
18889 --- a/arch/x86/kernel/kprobes-opt.c
18890 +++ b/arch/x86/kernel/kprobes-opt.c
18891 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
18892 * Verify if the address gap is in 2GB range, because this uses
18893 * a relative jump.
18894 */
18895 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
18896 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
18897 if (abs(rel) > 0x7fffffff)
18898 return -ERANGE;
18899
18900 @@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
18901 op->optinsn.size = ret;
18902
18903 /* Copy arch-dep-instance from template */
18904 - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
18905 + memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
18906
18907 /* Set probe information */
18908 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
18909
18910 /* Set probe function call */
18911 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
18912 + synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
18913
18914 /* Set returning jmp instruction at the tail of out-of-line buffer */
18915 - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
18916 + synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
18917 (u8 *)op->kp.addr + op->optinsn.size);
18918
18919 flush_icache_range((unsigned long) buf,
18920 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
18921 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
18922
18923 /* Backup instructions which will be replaced by jump address */
18924 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
18925 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
18926 RELATIVE_ADDR_SIZE);
18927
18928 insn_buf[0] = RELATIVEJUMP_OPCODE;
18929 @@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
18930 /* This kprobe is really able to run optimized path. */
18931 op = container_of(p, struct optimized_kprobe, kp);
18932 /* Detour through copied instructions */
18933 - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
18934 + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
18935 if (!reenter)
18936 reset_current_kprobe();
18937 preempt_enable_no_resched();
18938 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18939 index 57916c0..9e0b9d0 100644
18940 --- a/arch/x86/kernel/kprobes.c
18941 +++ b/arch/x86/kernel/kprobes.c
18942 @@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
18943 s32 raddr;
18944 } __attribute__((packed)) *insn;
18945
18946 - insn = (struct __arch_relative_insn *)from;
18947 + insn = (struct __arch_relative_insn *)ktla_ktva(from);
18948 +
18949 + pax_open_kernel();
18950 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
18951 insn->op = op;
18952 + pax_close_kernel();
18953 }
18954
18955 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
18956 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
18957 kprobe_opcode_t opcode;
18958 kprobe_opcode_t *orig_opcodes = opcodes;
18959
18960 - if (search_exception_tables((unsigned long)opcodes))
18961 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18962 return 0; /* Page fault may occur on this address. */
18963
18964 retry:
18965 @@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
18966 * for the first byte, we can recover the original instruction
18967 * from it and kp->opcode.
18968 */
18969 - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18970 + memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18971 buf[0] = kp->opcode;
18972 - return (unsigned long)buf;
18973 + return ktva_ktla((unsigned long)buf);
18974 }
18975
18976 /*
18977 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
18978 /* Another subsystem puts a breakpoint, failed to recover */
18979 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
18980 return 0;
18981 + pax_open_kernel();
18982 memcpy(dest, insn.kaddr, insn.length);
18983 + pax_close_kernel();
18984
18985 #ifdef CONFIG_X86_64
18986 if (insn_rip_relative(&insn)) {
18987 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
18988 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
18989 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
18990 disp = (u8 *) dest + insn_offset_displacement(&insn);
18991 + pax_open_kernel();
18992 *(s32 *) disp = (s32) newdisp;
18993 + pax_close_kernel();
18994 }
18995 #endif
18996 return insn.length;
18997 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
18998 * nor set current_kprobe, because it doesn't use single
18999 * stepping.
19000 */
19001 - regs->ip = (unsigned long)p->ainsn.insn;
19002 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19003 preempt_enable_no_resched();
19004 return;
19005 }
19006 @@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19007 regs->flags &= ~X86_EFLAGS_IF;
19008 /* single step inline if the instruction is an int3 */
19009 if (p->opcode == BREAKPOINT_INSTRUCTION)
19010 - regs->ip = (unsigned long)p->addr;
19011 + regs->ip = ktla_ktva((unsigned long)p->addr);
19012 else
19013 - regs->ip = (unsigned long)p->ainsn.insn;
19014 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19015 }
19016
19017 /*
19018 @@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19019 setup_singlestep(p, regs, kcb, 0);
19020 return 1;
19021 }
19022 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
19023 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19024 /*
19025 * The breakpoint instruction was removed right
19026 * after we hit it. Another cpu has removed
19027 @@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19028 " movq %rax, 152(%rsp)\n"
19029 RESTORE_REGS_STRING
19030 " popfq\n"
19031 +#ifdef KERNEXEC_PLUGIN
19032 + " btsq $63,(%rsp)\n"
19033 +#endif
19034 #else
19035 " pushf\n"
19036 SAVE_REGS_STRING
19037 @@ -788,7 +798,7 @@ static void __kprobes
19038 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19039 {
19040 unsigned long *tos = stack_addr(regs);
19041 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19042 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19043 unsigned long orig_ip = (unsigned long)p->addr;
19044 kprobe_opcode_t *insn = p->ainsn.insn;
19045
19046 @@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
19047 struct die_args *args = data;
19048 int ret = NOTIFY_DONE;
19049
19050 - if (args->regs && user_mode_vm(args->regs))
19051 + if (args->regs && user_mode(args->regs))
19052 return ret;
19053
19054 switch (val) {
19055 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19056 index ebc9873..1b9724b 100644
19057 --- a/arch/x86/kernel/ldt.c
19058 +++ b/arch/x86/kernel/ldt.c
19059 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19060 if (reload) {
19061 #ifdef CONFIG_SMP
19062 preempt_disable();
19063 - load_LDT(pc);
19064 + load_LDT_nolock(pc);
19065 if (!cpumask_equal(mm_cpumask(current->mm),
19066 cpumask_of(smp_processor_id())))
19067 smp_call_function(flush_ldt, current->mm, 1);
19068 preempt_enable();
19069 #else
19070 - load_LDT(pc);
19071 + load_LDT_nolock(pc);
19072 #endif
19073 }
19074 if (oldsize) {
19075 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19076 return err;
19077
19078 for (i = 0; i < old->size; i++)
19079 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19080 + write_ldt_entry(new->ldt, i, old->ldt + i);
19081 return 0;
19082 }
19083
19084 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19085 retval = copy_ldt(&mm->context, &old_mm->context);
19086 mutex_unlock(&old_mm->context.lock);
19087 }
19088 +
19089 + if (tsk == current) {
19090 + mm->context.vdso = 0;
19091 +
19092 +#ifdef CONFIG_X86_32
19093 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19094 + mm->context.user_cs_base = 0UL;
19095 + mm->context.user_cs_limit = ~0UL;
19096 +
19097 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19098 + cpus_clear(mm->context.cpu_user_cs_mask);
19099 +#endif
19100 +
19101 +#endif
19102 +#endif
19103 +
19104 + }
19105 +
19106 return retval;
19107 }
19108
19109 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19110 }
19111 }
19112
19113 +#ifdef CONFIG_PAX_SEGMEXEC
19114 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19115 + error = -EINVAL;
19116 + goto out_unlock;
19117 + }
19118 +#endif
19119 +
19120 fill_ldt(&ldt, &ldt_info);
19121 if (oldmode)
19122 ldt.avl = 0;
19123 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19124 index 5b19e4d..6476a76 100644
19125 --- a/arch/x86/kernel/machine_kexec_32.c
19126 +++ b/arch/x86/kernel/machine_kexec_32.c
19127 @@ -26,7 +26,7 @@
19128 #include <asm/cacheflush.h>
19129 #include <asm/debugreg.h>
19130
19131 -static void set_idt(void *newidt, __u16 limit)
19132 +static void set_idt(struct desc_struct *newidt, __u16 limit)
19133 {
19134 struct desc_ptr curidt;
19135
19136 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19137 }
19138
19139
19140 -static void set_gdt(void *newgdt, __u16 limit)
19141 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19142 {
19143 struct desc_ptr curgdt;
19144
19145 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
19146 }
19147
19148 control_page = page_address(image->control_code_page);
19149 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19150 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19151
19152 relocate_kernel_ptr = control_page;
19153 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19154 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19155 index 3544aed..01ddc1c 100644
19156 --- a/arch/x86/kernel/microcode_intel.c
19157 +++ b/arch/x86/kernel/microcode_intel.c
19158 @@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
19159
19160 static int get_ucode_user(void *to, const void *from, size_t n)
19161 {
19162 - return copy_from_user(to, from, n);
19163 + return copy_from_user(to, (const void __force_user *)from, n);
19164 }
19165
19166 static enum ucode_state
19167 request_microcode_user(int cpu, const void __user *buf, size_t size)
19168 {
19169 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19170 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19171 }
19172
19173 static void microcode_fini_cpu(int cpu)
19174 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19175 index 216a4d7..b328f09 100644
19176 --- a/arch/x86/kernel/module.c
19177 +++ b/arch/x86/kernel/module.c
19178 @@ -43,15 +43,60 @@ do { \
19179 } while (0)
19180 #endif
19181
19182 -void *module_alloc(unsigned long size)
19183 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
19184 {
19185 - if (PAGE_ALIGN(size) > MODULES_LEN)
19186 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
19187 return NULL;
19188 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
19189 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
19190 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
19191 -1, __builtin_return_address(0));
19192 }
19193
19194 +void *module_alloc(unsigned long size)
19195 +{
19196 +
19197 +#ifdef CONFIG_PAX_KERNEXEC
19198 + return __module_alloc(size, PAGE_KERNEL);
19199 +#else
19200 + return __module_alloc(size, PAGE_KERNEL_EXEC);
19201 +#endif
19202 +
19203 +}
19204 +
19205 +#ifdef CONFIG_PAX_KERNEXEC
19206 +#ifdef CONFIG_X86_32
19207 +void *module_alloc_exec(unsigned long size)
19208 +{
19209 + struct vm_struct *area;
19210 +
19211 + if (size == 0)
19212 + return NULL;
19213 +
19214 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19215 + return area ? area->addr : NULL;
19216 +}
19217 +EXPORT_SYMBOL(module_alloc_exec);
19218 +
19219 +void module_free_exec(struct module *mod, void *module_region)
19220 +{
19221 + vunmap(module_region);
19222 +}
19223 +EXPORT_SYMBOL(module_free_exec);
19224 +#else
19225 +void module_free_exec(struct module *mod, void *module_region)
19226 +{
19227 + module_free(mod, module_region);
19228 +}
19229 +EXPORT_SYMBOL(module_free_exec);
19230 +
19231 +void *module_alloc_exec(unsigned long size)
19232 +{
19233 + return __module_alloc(size, PAGE_KERNEL_RX);
19234 +}
19235 +EXPORT_SYMBOL(module_alloc_exec);
19236 +#endif
19237 +#endif
19238 +
19239 #ifdef CONFIG_X86_32
19240 int apply_relocate(Elf32_Shdr *sechdrs,
19241 const char *strtab,
19242 @@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19243 unsigned int i;
19244 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19245 Elf32_Sym *sym;
19246 - uint32_t *location;
19247 + uint32_t *plocation, location;
19248
19249 DEBUGP("Applying relocate section %u to %u\n",
19250 relsec, sechdrs[relsec].sh_info);
19251 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19252 /* This is where to make the change */
19253 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19254 - + rel[i].r_offset;
19255 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19256 + location = (uint32_t)plocation;
19257 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19258 + plocation = ktla_ktva((void *)plocation);
19259 /* This is the symbol it is referring to. Note that all
19260 undefined symbols have been resolved. */
19261 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19262 @@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19263 switch (ELF32_R_TYPE(rel[i].r_info)) {
19264 case R_386_32:
19265 /* We add the value into the location given */
19266 - *location += sym->st_value;
19267 + pax_open_kernel();
19268 + *plocation += sym->st_value;
19269 + pax_close_kernel();
19270 break;
19271 case R_386_PC32:
19272 /* Add the value, subtract its position */
19273 - *location += sym->st_value - (uint32_t)location;
19274 + pax_open_kernel();
19275 + *plocation += sym->st_value - location;
19276 + pax_close_kernel();
19277 break;
19278 default:
19279 pr_err("%s: Unknown relocation: %u\n",
19280 @@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19281 case R_X86_64_NONE:
19282 break;
19283 case R_X86_64_64:
19284 + pax_open_kernel();
19285 *(u64 *)loc = val;
19286 + pax_close_kernel();
19287 break;
19288 case R_X86_64_32:
19289 + pax_open_kernel();
19290 *(u32 *)loc = val;
19291 + pax_close_kernel();
19292 if (val != *(u32 *)loc)
19293 goto overflow;
19294 break;
19295 case R_X86_64_32S:
19296 + pax_open_kernel();
19297 *(s32 *)loc = val;
19298 + pax_close_kernel();
19299 if ((s64)val != *(s32 *)loc)
19300 goto overflow;
19301 break;
19302 case R_X86_64_PC32:
19303 val -= (u64)loc;
19304 + pax_open_kernel();
19305 *(u32 *)loc = val;
19306 + pax_close_kernel();
19307 +
19308 #if 0
19309 if ((s64)val != *(s32 *)loc)
19310 goto overflow;
19311 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
19312 index f84f5c5..e27e54b 100644
19313 --- a/arch/x86/kernel/nmi.c
19314 +++ b/arch/x86/kernel/nmi.c
19315 @@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
19316 dotraplinkage notrace __kprobes void
19317 do_nmi(struct pt_regs *regs, long error_code)
19318 {
19319 +
19320 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19321 + if (!user_mode(regs)) {
19322 + unsigned long cs = regs->cs & 0xFFFF;
19323 + unsigned long ip = ktva_ktla(regs->ip);
19324 +
19325 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
19326 + regs->ip = ip;
19327 + }
19328 +#endif
19329 +
19330 nmi_nesting_preprocess(regs);
19331
19332 nmi_enter();
19333 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19334 index 676b8c7..870ba04 100644
19335 --- a/arch/x86/kernel/paravirt-spinlocks.c
19336 +++ b/arch/x86/kernel/paravirt-spinlocks.c
19337 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
19338 arch_spin_lock(lock);
19339 }
19340
19341 -struct pv_lock_ops pv_lock_ops = {
19342 +struct pv_lock_ops pv_lock_ops __read_only = {
19343 #ifdef CONFIG_SMP
19344 .spin_is_locked = __ticket_spin_is_locked,
19345 .spin_is_contended = __ticket_spin_is_contended,
19346 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19347 index 17fff18..7bb2088 100644
19348 --- a/arch/x86/kernel/paravirt.c
19349 +++ b/arch/x86/kernel/paravirt.c
19350 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
19351 {
19352 return x;
19353 }
19354 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19355 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19356 +#endif
19357
19358 void __init default_banner(void)
19359 {
19360 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19361 if (opfunc == NULL)
19362 /* If there's no function, patch it with a ud2a (BUG) */
19363 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19364 - else if (opfunc == _paravirt_nop)
19365 + else if (opfunc == (void *)_paravirt_nop)
19366 /* If the operation is a nop, then nop the callsite */
19367 ret = paravirt_patch_nop();
19368
19369 /* identity functions just return their single argument */
19370 - else if (opfunc == _paravirt_ident_32)
19371 + else if (opfunc == (void *)_paravirt_ident_32)
19372 ret = paravirt_patch_ident_32(insnbuf, len);
19373 - else if (opfunc == _paravirt_ident_64)
19374 + else if (opfunc == (void *)_paravirt_ident_64)
19375 ret = paravirt_patch_ident_64(insnbuf, len);
19376 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19377 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19378 + ret = paravirt_patch_ident_64(insnbuf, len);
19379 +#endif
19380
19381 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19382 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19383 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19384 if (insn_len > len || start == NULL)
19385 insn_len = len;
19386 else
19387 - memcpy(insnbuf, start, insn_len);
19388 + memcpy(insnbuf, ktla_ktva(start), insn_len);
19389
19390 return insn_len;
19391 }
19392 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
19393 preempt_enable();
19394 }
19395
19396 -struct pv_info pv_info = {
19397 +struct pv_info pv_info __read_only = {
19398 .name = "bare hardware",
19399 .paravirt_enabled = 0,
19400 .kernel_rpl = 0,
19401 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
19402 #endif
19403 };
19404
19405 -struct pv_init_ops pv_init_ops = {
19406 +struct pv_init_ops pv_init_ops __read_only = {
19407 .patch = native_patch,
19408 };
19409
19410 -struct pv_time_ops pv_time_ops = {
19411 +struct pv_time_ops pv_time_ops __read_only = {
19412 .sched_clock = native_sched_clock,
19413 .steal_clock = native_steal_clock,
19414 };
19415
19416 -struct pv_irq_ops pv_irq_ops = {
19417 +struct pv_irq_ops pv_irq_ops __read_only = {
19418 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19419 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19420 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19421 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
19422 #endif
19423 };
19424
19425 -struct pv_cpu_ops pv_cpu_ops = {
19426 +struct pv_cpu_ops pv_cpu_ops __read_only = {
19427 .cpuid = native_cpuid,
19428 .get_debugreg = native_get_debugreg,
19429 .set_debugreg = native_set_debugreg,
19430 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19431 .end_context_switch = paravirt_nop,
19432 };
19433
19434 -struct pv_apic_ops pv_apic_ops = {
19435 +struct pv_apic_ops pv_apic_ops __read_only = {
19436 #ifdef CONFIG_X86_LOCAL_APIC
19437 .startup_ipi_hook = paravirt_nop,
19438 #endif
19439 };
19440
19441 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19442 +#ifdef CONFIG_X86_32
19443 +#ifdef CONFIG_X86_PAE
19444 +/* 64-bit pagetable entries */
19445 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19446 +#else
19447 /* 32-bit pagetable entries */
19448 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19449 +#endif
19450 #else
19451 /* 64-bit pagetable entries */
19452 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19453 #endif
19454
19455 -struct pv_mmu_ops pv_mmu_ops = {
19456 +struct pv_mmu_ops pv_mmu_ops __read_only = {
19457
19458 .read_cr2 = native_read_cr2,
19459 .write_cr2 = native_write_cr2,
19460 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19461 .make_pud = PTE_IDENT,
19462
19463 .set_pgd = native_set_pgd,
19464 + .set_pgd_batched = native_set_pgd_batched,
19465 #endif
19466 #endif /* PAGETABLE_LEVELS >= 3 */
19467
19468 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19469 },
19470
19471 .set_fixmap = native_set_fixmap,
19472 +
19473 +#ifdef CONFIG_PAX_KERNEXEC
19474 + .pax_open_kernel = native_pax_open_kernel,
19475 + .pax_close_kernel = native_pax_close_kernel,
19476 +#endif
19477 +
19478 };
19479
19480 EXPORT_SYMBOL_GPL(pv_time_ops);
19481 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
19482 index 35ccf75..7a15747 100644
19483 --- a/arch/x86/kernel/pci-iommu_table.c
19484 +++ b/arch/x86/kernel/pci-iommu_table.c
19485 @@ -2,7 +2,7 @@
19486 #include <asm/iommu_table.h>
19487 #include <linux/string.h>
19488 #include <linux/kallsyms.h>
19489 -
19490 +#include <linux/sched.h>
19491
19492 #define DEBUG 1
19493
19494 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19495 index b644e1c..4a6d379 100644
19496 --- a/arch/x86/kernel/process.c
19497 +++ b/arch/x86/kernel/process.c
19498 @@ -36,7 +36,8 @@
19499 * section. Since TSS's are completely CPU-local, we want them
19500 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
19501 */
19502 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
19503 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
19504 +EXPORT_SYMBOL(init_tss);
19505
19506 #ifdef CONFIG_X86_64
19507 static DEFINE_PER_CPU(unsigned char, is_idle);
19508 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
19509 task_xstate_cachep =
19510 kmem_cache_create("task_xstate", xstate_size,
19511 __alignof__(union thread_xstate),
19512 - SLAB_PANIC | SLAB_NOTRACK, NULL);
19513 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19514 }
19515
19516 /*
19517 @@ -105,7 +106,7 @@ void exit_thread(void)
19518 unsigned long *bp = t->io_bitmap_ptr;
19519
19520 if (bp) {
19521 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19522 + struct tss_struct *tss = init_tss + get_cpu();
19523
19524 t->io_bitmap_ptr = NULL;
19525 clear_thread_flag(TIF_IO_BITMAP);
19526 @@ -136,7 +137,7 @@ void show_regs_common(void)
19527 board = dmi_get_system_info(DMI_BOARD_NAME);
19528
19529 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
19530 - current->pid, current->comm, print_tainted(),
19531 + task_pid_nr(current), current->comm, print_tainted(),
19532 init_utsname()->release,
19533 (int)strcspn(init_utsname()->version, " "),
19534 init_utsname()->version,
19535 @@ -149,6 +150,9 @@ void flush_thread(void)
19536 {
19537 struct task_struct *tsk = current;
19538
19539 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19540 + loadsegment(gs, 0);
19541 +#endif
19542 flush_ptrace_hw_breakpoint(tsk);
19543 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
19544 drop_init_fpu(tsk);
19545 @@ -336,7 +340,7 @@ static void __exit_idle(void)
19546 void exit_idle(void)
19547 {
19548 /* idle loop has pid 0 */
19549 - if (current->pid)
19550 + if (task_pid_nr(current))
19551 return;
19552 __exit_idle();
19553 }
19554 @@ -445,7 +449,7 @@ bool set_pm_idle_to_default(void)
19555
19556 return ret;
19557 }
19558 -void stop_this_cpu(void *dummy)
19559 +__noreturn void stop_this_cpu(void *dummy)
19560 {
19561 local_irq_disable();
19562 /*
19563 @@ -673,16 +677,37 @@ static int __init idle_setup(char *str)
19564 }
19565 early_param("idle", idle_setup);
19566
19567 -unsigned long arch_align_stack(unsigned long sp)
19568 +#ifdef CONFIG_PAX_RANDKSTACK
19569 +void pax_randomize_kstack(struct pt_regs *regs)
19570 {
19571 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19572 - sp -= get_random_int() % 8192;
19573 - return sp & ~0xf;
19574 -}
19575 + struct thread_struct *thread = &current->thread;
19576 + unsigned long time;
19577
19578 -unsigned long arch_randomize_brk(struct mm_struct *mm)
19579 -{
19580 - unsigned long range_end = mm->brk + 0x02000000;
19581 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19582 -}
19583 + if (!randomize_va_space)
19584 + return;
19585 +
19586 + if (v8086_mode(regs))
19587 + return;
19588
19589 + rdtscl(time);
19590 +
19591 + /* P4 seems to return a 0 LSB, ignore it */
19592 +#ifdef CONFIG_MPENTIUM4
19593 + time &= 0x3EUL;
19594 + time <<= 2;
19595 +#elif defined(CONFIG_X86_64)
19596 + time &= 0xFUL;
19597 + time <<= 4;
19598 +#else
19599 + time &= 0x1FUL;
19600 + time <<= 3;
19601 +#endif
19602 +
19603 + thread->sp0 ^= time;
19604 + load_sp0(init_tss + smp_processor_id(), thread);
19605 +
19606 +#ifdef CONFIG_X86_64
19607 + this_cpu_write(kernel_stack, thread->sp0);
19608 +#endif
19609 +}
19610 +#endif
19611 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
19612 index 44e0bff..5ceb99c 100644
19613 --- a/arch/x86/kernel/process_32.c
19614 +++ b/arch/x86/kernel/process_32.c
19615 @@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
19616 unsigned long thread_saved_pc(struct task_struct *tsk)
19617 {
19618 return ((unsigned long *)tsk->thread.sp)[3];
19619 +//XXX return tsk->thread.eip;
19620 }
19621
19622 void __show_regs(struct pt_regs *regs, int all)
19623 @@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
19624 unsigned long sp;
19625 unsigned short ss, gs;
19626
19627 - if (user_mode_vm(regs)) {
19628 + if (user_mode(regs)) {
19629 sp = regs->sp;
19630 ss = regs->ss & 0xffff;
19631 - gs = get_user_gs(regs);
19632 } else {
19633 sp = kernel_stack_pointer(regs);
19634 savesegment(ss, ss);
19635 - savesegment(gs, gs);
19636 }
19637 + gs = get_user_gs(regs);
19638
19639 show_regs_common();
19640
19641 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
19642 (u16)regs->cs, regs->ip, regs->flags,
19643 - smp_processor_id());
19644 + raw_smp_processor_id());
19645 print_symbol("EIP is at %s\n", regs->ip);
19646
19647 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
19648 @@ -131,20 +131,21 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19649 unsigned long arg,
19650 struct task_struct *p, struct pt_regs *regs)
19651 {
19652 - struct pt_regs *childregs = task_pt_regs(p);
19653 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
19654 struct task_struct *tsk;
19655 int err;
19656
19657 p->thread.sp = (unsigned long) childregs;
19658 p->thread.sp0 = (unsigned long) (childregs+1);
19659 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19660
19661 if (unlikely(!regs)) {
19662 /* kernel thread */
19663 memset(childregs, 0, sizeof(struct pt_regs));
19664 p->thread.ip = (unsigned long) ret_from_kernel_thread;
19665 - task_user_gs(p) = __KERNEL_STACK_CANARY;
19666 - childregs->ds = __USER_DS;
19667 - childregs->es = __USER_DS;
19668 + savesegment(gs, childregs->gs);
19669 + childregs->ds = __KERNEL_DS;
19670 + childregs->es = __KERNEL_DS;
19671 childregs->fs = __KERNEL_PERCPU;
19672 childregs->bx = sp; /* function */
19673 childregs->bp = arg;
19674 @@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19675 struct thread_struct *prev = &prev_p->thread,
19676 *next = &next_p->thread;
19677 int cpu = smp_processor_id();
19678 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19679 + struct tss_struct *tss = init_tss + cpu;
19680 fpu_switch_t fpu;
19681
19682 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
19683 @@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19684 */
19685 lazy_save_gs(prev->gs);
19686
19687 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19688 + __set_fs(task_thread_info(next_p)->addr_limit);
19689 +#endif
19690 +
19691 /*
19692 * Load the per-thread Thread-Local Storage descriptor.
19693 */
19694 @@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19695 */
19696 arch_end_context_switch(next_p);
19697
19698 + this_cpu_write(current_task, next_p);
19699 + this_cpu_write(current_tinfo, &next_p->tinfo);
19700 +
19701 /*
19702 * Restore %gs if needed (which is common)
19703 */
19704 @@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19705
19706 switch_fpu_finish(next_p, fpu);
19707
19708 - this_cpu_write(current_task, next_p);
19709 -
19710 return prev_p;
19711 }
19712
19713 @@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
19714 } while (count++ < 16);
19715 return 0;
19716 }
19717 -
19718 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19719 index 16c6365..5d32218 100644
19720 --- a/arch/x86/kernel/process_64.c
19721 +++ b/arch/x86/kernel/process_64.c
19722 @@ -153,10 +153,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19723 struct pt_regs *childregs;
19724 struct task_struct *me = current;
19725
19726 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
19727 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
19728 childregs = task_pt_regs(p);
19729 p->thread.sp = (unsigned long) childregs;
19730 p->thread.usersp = me->thread.usersp;
19731 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19732 set_tsk_thread_flag(p, TIF_FORK);
19733 p->fpu_counter = 0;
19734 p->thread.io_bitmap_ptr = NULL;
19735 @@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19736 struct thread_struct *prev = &prev_p->thread;
19737 struct thread_struct *next = &next_p->thread;
19738 int cpu = smp_processor_id();
19739 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19740 + struct tss_struct *tss = init_tss + cpu;
19741 unsigned fsindex, gsindex;
19742 fpu_switch_t fpu;
19743
19744 @@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19745 prev->usersp = this_cpu_read(old_rsp);
19746 this_cpu_write(old_rsp, next->usersp);
19747 this_cpu_write(current_task, next_p);
19748 + this_cpu_write(current_tinfo, &next_p->tinfo);
19749
19750 - this_cpu_write(kernel_stack,
19751 - (unsigned long)task_stack_page(next_p) +
19752 - THREAD_SIZE - KERNEL_STACK_OFFSET);
19753 + this_cpu_write(kernel_stack, next->sp0);
19754
19755 /*
19756 * Now maybe reload the debug registers and handle I/O bitmaps
19757 @@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
19758 if (!p || p == current || p->state == TASK_RUNNING)
19759 return 0;
19760 stack = (unsigned long)task_stack_page(p);
19761 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19762 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19763 return 0;
19764 fp = *(u64 *)(p->thread.sp);
19765 do {
19766 - if (fp < (unsigned long)stack ||
19767 - fp >= (unsigned long)stack+THREAD_SIZE)
19768 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19769 return 0;
19770 ip = *(u64 *)(fp+8);
19771 if (!in_sched_functions(ip))
19772 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19773 index 974b67e..044111b 100644
19774 --- a/arch/x86/kernel/ptrace.c
19775 +++ b/arch/x86/kernel/ptrace.c
19776 @@ -183,14 +183,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
19777 {
19778 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
19779 unsigned long sp = (unsigned long)&regs->sp;
19780 - struct thread_info *tinfo;
19781
19782 - if (context == (sp & ~(THREAD_SIZE - 1)))
19783 + if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
19784 return sp;
19785
19786 - tinfo = (struct thread_info *)context;
19787 - if (tinfo->previous_esp)
19788 - return tinfo->previous_esp;
19789 + sp = *(unsigned long *)context;
19790 + if (sp)
19791 + return sp;
19792
19793 return (unsigned long)regs;
19794 }
19795 @@ -855,7 +854,7 @@ long arch_ptrace(struct task_struct *child, long request,
19796 unsigned long addr, unsigned long data)
19797 {
19798 int ret;
19799 - unsigned long __user *datap = (unsigned long __user *)data;
19800 + unsigned long __user *datap = (__force unsigned long __user *)data;
19801
19802 switch (request) {
19803 /* read the word at location addr in the USER area. */
19804 @@ -940,14 +939,14 @@ long arch_ptrace(struct task_struct *child, long request,
19805 if ((int) addr < 0)
19806 return -EIO;
19807 ret = do_get_thread_area(child, addr,
19808 - (struct user_desc __user *)data);
19809 + (__force struct user_desc __user *) data);
19810 break;
19811
19812 case PTRACE_SET_THREAD_AREA:
19813 if ((int) addr < 0)
19814 return -EIO;
19815 ret = do_set_thread_area(child, addr,
19816 - (struct user_desc __user *)data, 0);
19817 + (__force struct user_desc __user *) data, 0);
19818 break;
19819 #endif
19820
19821 @@ -1325,7 +1324,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
19822
19823 #ifdef CONFIG_X86_64
19824
19825 -static struct user_regset x86_64_regsets[] __read_mostly = {
19826 +static struct user_regset x86_64_regsets[] = {
19827 [REGSET_GENERAL] = {
19828 .core_note_type = NT_PRSTATUS,
19829 .n = sizeof(struct user_regs_struct) / sizeof(long),
19830 @@ -1366,7 +1365,7 @@ static const struct user_regset_view user_x86_64_view = {
19831 #endif /* CONFIG_X86_64 */
19832
19833 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
19834 -static struct user_regset x86_32_regsets[] __read_mostly = {
19835 +static struct user_regset x86_32_regsets[] = {
19836 [REGSET_GENERAL] = {
19837 .core_note_type = NT_PRSTATUS,
19838 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
19839 @@ -1419,13 +1418,13 @@ static const struct user_regset_view user_x86_32_view = {
19840 */
19841 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
19842
19843 -void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
19844 +void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
19845 {
19846 #ifdef CONFIG_X86_64
19847 - x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
19848 + *(unsigned int *)&x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
19849 #endif
19850 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
19851 - x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
19852 + *(unsigned int *)&x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
19853 #endif
19854 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
19855 }
19856 @@ -1454,7 +1453,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
19857 memset(info, 0, sizeof(*info));
19858 info->si_signo = SIGTRAP;
19859 info->si_code = si_code;
19860 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
19861 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
19862 }
19863
19864 void user_single_step_siginfo(struct task_struct *tsk,
19865 @@ -1483,6 +1482,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19866 # define IS_IA32 0
19867 #endif
19868
19869 +#ifdef CONFIG_GRKERNSEC_SETXID
19870 +extern void gr_delayed_cred_worker(void);
19871 +#endif
19872 +
19873 /*
19874 * We must return the syscall number to actually look up in the table.
19875 * This can be -1L to skip running any syscall at all.
19876 @@ -1493,6 +1496,11 @@ long syscall_trace_enter(struct pt_regs *regs)
19877
19878 rcu_user_exit();
19879
19880 +#ifdef CONFIG_GRKERNSEC_SETXID
19881 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
19882 + gr_delayed_cred_worker();
19883 +#endif
19884 +
19885 /*
19886 * If we stepped into a sysenter/syscall insn, it trapped in
19887 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
19888 @@ -1548,6 +1556,11 @@ void syscall_trace_leave(struct pt_regs *regs)
19889 */
19890 rcu_user_exit();
19891
19892 +#ifdef CONFIG_GRKERNSEC_SETXID
19893 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
19894 + gr_delayed_cred_worker();
19895 +#endif
19896 +
19897 audit_syscall_exit(regs);
19898
19899 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
19900 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
19901 index 42eb330..139955c 100644
19902 --- a/arch/x86/kernel/pvclock.c
19903 +++ b/arch/x86/kernel/pvclock.c
19904 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
19905 return pv_tsc_khz;
19906 }
19907
19908 -static atomic64_t last_value = ATOMIC64_INIT(0);
19909 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
19910
19911 void pvclock_resume(void)
19912 {
19913 - atomic64_set(&last_value, 0);
19914 + atomic64_set_unchecked(&last_value, 0);
19915 }
19916
19917 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
19918 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
19919 * updating at the same time, and one of them could be slightly behind,
19920 * making the assumption that last_value always go forward fail to hold.
19921 */
19922 - last = atomic64_read(&last_value);
19923 + last = atomic64_read_unchecked(&last_value);
19924 do {
19925 if (ret < last)
19926 return last;
19927 - last = atomic64_cmpxchg(&last_value, last, ret);
19928 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
19929 } while (unlikely(last != ret));
19930
19931 return ret;
19932 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19933 index 4e8ba39..e0186b3 100644
19934 --- a/arch/x86/kernel/reboot.c
19935 +++ b/arch/x86/kernel/reboot.c
19936 @@ -36,7 +36,7 @@ void (*pm_power_off)(void);
19937 EXPORT_SYMBOL(pm_power_off);
19938
19939 static const struct desc_ptr no_idt = {};
19940 -static int reboot_mode;
19941 +static unsigned short reboot_mode;
19942 enum reboot_type reboot_type = BOOT_ACPI;
19943 int reboot_force;
19944
19945 @@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
19946
19947 void __noreturn machine_real_restart(unsigned int type)
19948 {
19949 +
19950 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19951 + struct desc_struct *gdt;
19952 +#endif
19953 +
19954 local_irq_disable();
19955
19956 /*
19957 @@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
19958
19959 /* Jump to the identity-mapped low memory code */
19960 #ifdef CONFIG_X86_32
19961 - asm volatile("jmpl *%0" : :
19962 +
19963 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
19964 + gdt = get_cpu_gdt_table(smp_processor_id());
19965 + pax_open_kernel();
19966 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19967 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
19968 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
19969 + loadsegment(ds, __KERNEL_DS);
19970 + loadsegment(es, __KERNEL_DS);
19971 + loadsegment(ss, __KERNEL_DS);
19972 +#endif
19973 +#ifdef CONFIG_PAX_KERNEXEC
19974 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
19975 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
19976 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
19977 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
19978 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
19979 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
19980 +#endif
19981 + pax_close_kernel();
19982 +#endif
19983 +
19984 + asm volatile("ljmpl *%0" : :
19985 "rm" (real_mode_header->machine_real_restart_asm),
19986 "a" (type));
19987 #else
19988 @@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19989 * try to force a triple fault and then cycle between hitting the keyboard
19990 * controller and doing that
19991 */
19992 -static void native_machine_emergency_restart(void)
19993 +static void __noreturn native_machine_emergency_restart(void)
19994 {
19995 int i;
19996 int attempt = 0;
19997 @@ -654,13 +681,13 @@ void native_machine_shutdown(void)
19998 #endif
19999 }
20000
20001 -static void __machine_emergency_restart(int emergency)
20002 +static void __noreturn __machine_emergency_restart(int emergency)
20003 {
20004 reboot_emergency = emergency;
20005 machine_ops.emergency_restart();
20006 }
20007
20008 -static void native_machine_restart(char *__unused)
20009 +static void __noreturn native_machine_restart(char *__unused)
20010 {
20011 pr_notice("machine restart\n");
20012
20013 @@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
20014 __machine_emergency_restart(0);
20015 }
20016
20017 -static void native_machine_halt(void)
20018 +static void __noreturn native_machine_halt(void)
20019 {
20020 /* Stop other cpus and apics */
20021 machine_shutdown();
20022 @@ -679,7 +706,7 @@ static void native_machine_halt(void)
20023 stop_this_cpu(NULL);
20024 }
20025
20026 -static void native_machine_power_off(void)
20027 +static void __noreturn native_machine_power_off(void)
20028 {
20029 if (pm_power_off) {
20030 if (!reboot_force)
20031 @@ -688,6 +715,7 @@ static void native_machine_power_off(void)
20032 }
20033 /* A fallback in case there is no PM info available */
20034 tboot_shutdown(TB_SHUTDOWN_HALT);
20035 + unreachable();
20036 }
20037
20038 struct machine_ops machine_ops = {
20039 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20040 index 7a6f3b3..bed145d7 100644
20041 --- a/arch/x86/kernel/relocate_kernel_64.S
20042 +++ b/arch/x86/kernel/relocate_kernel_64.S
20043 @@ -11,6 +11,7 @@
20044 #include <asm/kexec.h>
20045 #include <asm/processor-flags.h>
20046 #include <asm/pgtable_types.h>
20047 +#include <asm/alternative-asm.h>
20048
20049 /*
20050 * Must be relocatable PIC code callable as a C function
20051 @@ -160,13 +161,14 @@ identity_mapped:
20052 xorq %rbp, %rbp
20053 xorq %r8, %r8
20054 xorq %r9, %r9
20055 - xorq %r10, %r9
20056 + xorq %r10, %r10
20057 xorq %r11, %r11
20058 xorq %r12, %r12
20059 xorq %r13, %r13
20060 xorq %r14, %r14
20061 xorq %r15, %r15
20062
20063 + pax_force_retaddr 0, 1
20064 ret
20065
20066 1:
20067 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20068 index ca45696..6384a09 100644
20069 --- a/arch/x86/kernel/setup.c
20070 +++ b/arch/x86/kernel/setup.c
20071 @@ -441,7 +441,7 @@ static void __init parse_setup_data(void)
20072
20073 switch (data->type) {
20074 case SETUP_E820_EXT:
20075 - parse_e820_ext(data);
20076 + parse_e820_ext((struct setup_data __force_kernel *)data);
20077 break;
20078 case SETUP_DTB:
20079 add_dtb(pa_data);
20080 @@ -633,7 +633,7 @@ static void __init trim_bios_range(void)
20081 * area (640->1Mb) as ram even though it is not.
20082 * take them out.
20083 */
20084 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
20085 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
20086 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
20087 }
20088
20089 @@ -756,14 +756,14 @@ void __init setup_arch(char **cmdline_p)
20090
20091 if (!boot_params.hdr.root_flags)
20092 root_mountflags &= ~MS_RDONLY;
20093 - init_mm.start_code = (unsigned long) _text;
20094 - init_mm.end_code = (unsigned long) _etext;
20095 + init_mm.start_code = ktla_ktva((unsigned long) _text);
20096 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
20097 init_mm.end_data = (unsigned long) _edata;
20098 init_mm.brk = _brk_end;
20099
20100 - code_resource.start = virt_to_phys(_text);
20101 - code_resource.end = virt_to_phys(_etext)-1;
20102 - data_resource.start = virt_to_phys(_etext);
20103 + code_resource.start = virt_to_phys(ktla_ktva(_text));
20104 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20105 + data_resource.start = virt_to_phys(_sdata);
20106 data_resource.end = virt_to_phys(_edata)-1;
20107 bss_resource.start = virt_to_phys(&__bss_start);
20108 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20109 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20110 index 5cdff03..5810740 100644
20111 --- a/arch/x86/kernel/setup_percpu.c
20112 +++ b/arch/x86/kernel/setup_percpu.c
20113 @@ -21,19 +21,17 @@
20114 #include <asm/cpu.h>
20115 #include <asm/stackprotector.h>
20116
20117 -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
20118 +#ifdef CONFIG_SMP
20119 +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
20120 EXPORT_PER_CPU_SYMBOL(cpu_number);
20121 +#endif
20122
20123 -#ifdef CONFIG_X86_64
20124 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20125 -#else
20126 -#define BOOT_PERCPU_OFFSET 0
20127 -#endif
20128
20129 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20130 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20131
20132 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20133 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20134 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20135 };
20136 EXPORT_SYMBOL(__per_cpu_offset);
20137 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
20138 {
20139 #ifdef CONFIG_X86_32
20140 struct desc_struct gdt;
20141 + unsigned long base = per_cpu_offset(cpu);
20142
20143 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20144 - 0x2 | DESCTYPE_S, 0x8);
20145 - gdt.s = 1;
20146 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20147 + 0x83 | DESCTYPE_S, 0xC);
20148 write_gdt_entry(get_cpu_gdt_table(cpu),
20149 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20150 #endif
20151 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
20152 /* alrighty, percpu areas up and running */
20153 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20154 for_each_possible_cpu(cpu) {
20155 +#ifdef CONFIG_CC_STACKPROTECTOR
20156 +#ifdef CONFIG_X86_32
20157 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
20158 +#endif
20159 +#endif
20160 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20161 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20162 per_cpu(cpu_number, cpu) = cpu;
20163 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
20164 */
20165 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
20166 #endif
20167 +#ifdef CONFIG_CC_STACKPROTECTOR
20168 +#ifdef CONFIG_X86_32
20169 + if (!cpu)
20170 + per_cpu(stack_canary.canary, cpu) = canary;
20171 +#endif
20172 +#endif
20173 /*
20174 * Up to this point, the boot CPU has been using .init.data
20175 * area. Reload any changed state for the boot CPU.
20176 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20177 index 70b27ee..fcf827f 100644
20178 --- a/arch/x86/kernel/signal.c
20179 +++ b/arch/x86/kernel/signal.c
20180 @@ -195,7 +195,7 @@ static unsigned long align_sigframe(unsigned long sp)
20181 * Align the stack pointer according to the i386 ABI,
20182 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20183 */
20184 - sp = ((sp + 4) & -16ul) - 4;
20185 + sp = ((sp - 12) & -16ul) - 4;
20186 #else /* !CONFIG_X86_32 */
20187 sp = round_down(sp, 16) - 8;
20188 #endif
20189 @@ -303,9 +303,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20190 }
20191
20192 if (current->mm->context.vdso)
20193 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20194 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20195 else
20196 - restorer = &frame->retcode;
20197 + restorer = (void __user *)&frame->retcode;
20198 if (ka->sa.sa_flags & SA_RESTORER)
20199 restorer = ka->sa.sa_restorer;
20200
20201 @@ -319,7 +319,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20202 * reasons and because gdb uses it as a signature to notice
20203 * signal handler stack frames.
20204 */
20205 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20206 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20207
20208 if (err)
20209 return -EFAULT;
20210 @@ -369,7 +369,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20211 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
20212
20213 /* Set up to return from userspace. */
20214 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20215 + if (current->mm->context.vdso)
20216 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20217 + else
20218 + restorer = (void __user *)&frame->retcode;
20219 if (ka->sa.sa_flags & SA_RESTORER)
20220 restorer = ka->sa.sa_restorer;
20221 put_user_ex(restorer, &frame->pretcode);
20222 @@ -381,7 +384,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20223 * reasons and because gdb uses it as a signature to notice
20224 * signal handler stack frames.
20225 */
20226 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20227 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20228 } put_user_catch(err);
20229
20230 err |= copy_siginfo_to_user(&frame->info, info);
20231 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20232 index f3e2ec8..ad5287a 100644
20233 --- a/arch/x86/kernel/smpboot.c
20234 +++ b/arch/x86/kernel/smpboot.c
20235 @@ -673,6 +673,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20236 idle->thread.sp = (unsigned long) (((struct pt_regs *)
20237 (THREAD_SIZE + task_stack_page(idle))) - 1);
20238 per_cpu(current_task, cpu) = idle;
20239 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
20240
20241 #ifdef CONFIG_X86_32
20242 /* Stack for startup_32 can be just as for start_secondary onwards */
20243 @@ -680,11 +681,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20244 #else
20245 clear_tsk_thread_flag(idle, TIF_FORK);
20246 initial_gs = per_cpu_offset(cpu);
20247 - per_cpu(kernel_stack, cpu) =
20248 - (unsigned long)task_stack_page(idle) -
20249 - KERNEL_STACK_OFFSET + THREAD_SIZE;
20250 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
20251 #endif
20252 +
20253 + pax_open_kernel();
20254 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20255 + pax_close_kernel();
20256 +
20257 initial_code = (unsigned long)start_secondary;
20258 stack_start = idle->thread.sp;
20259
20260 @@ -823,6 +826,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
20261 /* the FPU context is blank, nobody can own it */
20262 __cpu_disable_lazy_restore(cpu);
20263
20264 +#ifdef CONFIG_PAX_PER_CPU_PGD
20265 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20266 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20267 + KERNEL_PGD_PTRS);
20268 +#endif
20269 +
20270 + /* the FPU context is blank, nobody can own it */
20271 + __cpu_disable_lazy_restore(cpu);
20272 +
20273 err = do_boot_cpu(apicid, cpu, tidle);
20274 if (err) {
20275 pr_debug("do_boot_cpu failed %d\n", err);
20276 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20277 index cd3b243..4ba27a4 100644
20278 --- a/arch/x86/kernel/step.c
20279 +++ b/arch/x86/kernel/step.c
20280 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20281 struct desc_struct *desc;
20282 unsigned long base;
20283
20284 - seg &= ~7UL;
20285 + seg >>= 3;
20286
20287 mutex_lock(&child->mm->context.lock);
20288 - if (unlikely((seg >> 3) >= child->mm->context.size))
20289 + if (unlikely(seg >= child->mm->context.size))
20290 addr = -1L; /* bogus selector, access would fault */
20291 else {
20292 desc = child->mm->context.ldt + seg;
20293 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20294 addr += base;
20295 }
20296 mutex_unlock(&child->mm->context.lock);
20297 - }
20298 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20299 + addr = ktla_ktva(addr);
20300
20301 return addr;
20302 }
20303 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20304 unsigned char opcode[15];
20305 unsigned long addr = convert_ip_to_linear(child, regs);
20306
20307 + if (addr == -EINVAL)
20308 + return 0;
20309 +
20310 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20311 for (i = 0; i < copied; i++) {
20312 switch (opcode[i]) {
20313 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20314 new file mode 100644
20315 index 0000000..db6b9ed
20316 --- /dev/null
20317 +++ b/arch/x86/kernel/sys_i386_32.c
20318 @@ -0,0 +1,247 @@
20319 +/*
20320 + * This file contains various random system calls that
20321 + * have a non-standard calling sequence on the Linux/i386
20322 + * platform.
20323 + */
20324 +
20325 +#include <linux/errno.h>
20326 +#include <linux/sched.h>
20327 +#include <linux/mm.h>
20328 +#include <linux/fs.h>
20329 +#include <linux/smp.h>
20330 +#include <linux/sem.h>
20331 +#include <linux/msg.h>
20332 +#include <linux/shm.h>
20333 +#include <linux/stat.h>
20334 +#include <linux/syscalls.h>
20335 +#include <linux/mman.h>
20336 +#include <linux/file.h>
20337 +#include <linux/utsname.h>
20338 +#include <linux/ipc.h>
20339 +
20340 +#include <linux/uaccess.h>
20341 +#include <linux/unistd.h>
20342 +
20343 +#include <asm/syscalls.h>
20344 +
20345 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20346 +{
20347 + unsigned long pax_task_size = TASK_SIZE;
20348 +
20349 +#ifdef CONFIG_PAX_SEGMEXEC
20350 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20351 + pax_task_size = SEGMEXEC_TASK_SIZE;
20352 +#endif
20353 +
20354 + if (len > pax_task_size || addr > pax_task_size - len)
20355 + return -EINVAL;
20356 +
20357 + return 0;
20358 +}
20359 +
20360 +unsigned long
20361 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
20362 + unsigned long len, unsigned long pgoff, unsigned long flags)
20363 +{
20364 + struct mm_struct *mm = current->mm;
20365 + struct vm_area_struct *vma;
20366 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20367 +
20368 +#ifdef CONFIG_PAX_SEGMEXEC
20369 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20370 + pax_task_size = SEGMEXEC_TASK_SIZE;
20371 +#endif
20372 +
20373 + pax_task_size -= PAGE_SIZE;
20374 +
20375 + if (len > pax_task_size)
20376 + return -ENOMEM;
20377 +
20378 + if (flags & MAP_FIXED)
20379 + return addr;
20380 +
20381 +#ifdef CONFIG_PAX_RANDMMAP
20382 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20383 +#endif
20384 +
20385 + if (addr) {
20386 + addr = PAGE_ALIGN(addr);
20387 + if (pax_task_size - len >= addr) {
20388 + vma = find_vma(mm, addr);
20389 + if (check_heap_stack_gap(vma, addr, len))
20390 + return addr;
20391 + }
20392 + }
20393 + if (len > mm->cached_hole_size) {
20394 + start_addr = addr = mm->free_area_cache;
20395 + } else {
20396 + start_addr = addr = mm->mmap_base;
20397 + mm->cached_hole_size = 0;
20398 + }
20399 +
20400 +#ifdef CONFIG_PAX_PAGEEXEC
20401 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20402 + start_addr = 0x00110000UL;
20403 +
20404 +#ifdef CONFIG_PAX_RANDMMAP
20405 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20406 + start_addr += mm->delta_mmap & 0x03FFF000UL;
20407 +#endif
20408 +
20409 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20410 + start_addr = addr = mm->mmap_base;
20411 + else
20412 + addr = start_addr;
20413 + }
20414 +#endif
20415 +
20416 +full_search:
20417 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20418 + /* At this point: (!vma || addr < vma->vm_end). */
20419 + if (pax_task_size - len < addr) {
20420 + /*
20421 + * Start a new search - just in case we missed
20422 + * some holes.
20423 + */
20424 + if (start_addr != mm->mmap_base) {
20425 + start_addr = addr = mm->mmap_base;
20426 + mm->cached_hole_size = 0;
20427 + goto full_search;
20428 + }
20429 + return -ENOMEM;
20430 + }
20431 + if (check_heap_stack_gap(vma, addr, len))
20432 + break;
20433 + if (addr + mm->cached_hole_size < vma->vm_start)
20434 + mm->cached_hole_size = vma->vm_start - addr;
20435 + addr = vma->vm_end;
20436 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
20437 + start_addr = addr = mm->mmap_base;
20438 + mm->cached_hole_size = 0;
20439 + goto full_search;
20440 + }
20441 + }
20442 +
20443 + /*
20444 + * Remember the place where we stopped the search:
20445 + */
20446 + mm->free_area_cache = addr + len;
20447 + return addr;
20448 +}
20449 +
20450 +unsigned long
20451 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20452 + const unsigned long len, const unsigned long pgoff,
20453 + const unsigned long flags)
20454 +{
20455 + struct vm_area_struct *vma;
20456 + struct mm_struct *mm = current->mm;
20457 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20458 +
20459 +#ifdef CONFIG_PAX_SEGMEXEC
20460 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20461 + pax_task_size = SEGMEXEC_TASK_SIZE;
20462 +#endif
20463 +
20464 + pax_task_size -= PAGE_SIZE;
20465 +
20466 + /* requested length too big for entire address space */
20467 + if (len > pax_task_size)
20468 + return -ENOMEM;
20469 +
20470 + if (flags & MAP_FIXED)
20471 + return addr;
20472 +
20473 +#ifdef CONFIG_PAX_PAGEEXEC
20474 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20475 + goto bottomup;
20476 +#endif
20477 +
20478 +#ifdef CONFIG_PAX_RANDMMAP
20479 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20480 +#endif
20481 +
20482 + /* requesting a specific address */
20483 + if (addr) {
20484 + addr = PAGE_ALIGN(addr);
20485 + if (pax_task_size - len >= addr) {
20486 + vma = find_vma(mm, addr);
20487 + if (check_heap_stack_gap(vma, addr, len))
20488 + return addr;
20489 + }
20490 + }
20491 +
20492 + /* check if free_area_cache is useful for us */
20493 + if (len <= mm->cached_hole_size) {
20494 + mm->cached_hole_size = 0;
20495 + mm->free_area_cache = mm->mmap_base;
20496 + }
20497 +
20498 + /* either no address requested or can't fit in requested address hole */
20499 + addr = mm->free_area_cache;
20500 +
20501 + /* make sure it can fit in the remaining address space */
20502 + if (addr > len) {
20503 + vma = find_vma(mm, addr-len);
20504 + if (check_heap_stack_gap(vma, addr - len, len))
20505 + /* remember the address as a hint for next time */
20506 + return (mm->free_area_cache = addr-len);
20507 + }
20508 +
20509 + if (mm->mmap_base < len)
20510 + goto bottomup;
20511 +
20512 + addr = mm->mmap_base-len;
20513 +
20514 + do {
20515 + /*
20516 + * Lookup failure means no vma is above this address,
20517 + * else if new region fits below vma->vm_start,
20518 + * return with success:
20519 + */
20520 + vma = find_vma(mm, addr);
20521 + if (check_heap_stack_gap(vma, addr, len))
20522 + /* remember the address as a hint for next time */
20523 + return (mm->free_area_cache = addr);
20524 +
20525 + /* remember the largest hole we saw so far */
20526 + if (addr + mm->cached_hole_size < vma->vm_start)
20527 + mm->cached_hole_size = vma->vm_start - addr;
20528 +
20529 + /* try just below the current vma->vm_start */
20530 + addr = skip_heap_stack_gap(vma, len);
20531 + } while (!IS_ERR_VALUE(addr));
20532 +
20533 +bottomup:
20534 + /*
20535 + * A failed mmap() very likely causes application failure,
20536 + * so fall back to the bottom-up function here. This scenario
20537 + * can happen with large stack limits and large mmap()
20538 + * allocations.
20539 + */
20540 +
20541 +#ifdef CONFIG_PAX_SEGMEXEC
20542 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20543 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20544 + else
20545 +#endif
20546 +
20547 + mm->mmap_base = TASK_UNMAPPED_BASE;
20548 +
20549 +#ifdef CONFIG_PAX_RANDMMAP
20550 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20551 + mm->mmap_base += mm->delta_mmap;
20552 +#endif
20553 +
20554 + mm->free_area_cache = mm->mmap_base;
20555 + mm->cached_hole_size = ~0UL;
20556 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20557 + /*
20558 + * Restore the topdown base:
20559 + */
20560 + mm->mmap_base = base;
20561 + mm->free_area_cache = base;
20562 + mm->cached_hole_size = ~0UL;
20563 +
20564 + return addr;
20565 +}
20566 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
20567 index b4d3c39..82bb73b 100644
20568 --- a/arch/x86/kernel/sys_x86_64.c
20569 +++ b/arch/x86/kernel/sys_x86_64.c
20570 @@ -95,8 +95,8 @@ out:
20571 return error;
20572 }
20573
20574 -static void find_start_end(unsigned long flags, unsigned long *begin,
20575 - unsigned long *end)
20576 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
20577 + unsigned long *begin, unsigned long *end)
20578 {
20579 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
20580 unsigned long new_begin;
20581 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
20582 *begin = new_begin;
20583 }
20584 } else {
20585 - *begin = TASK_UNMAPPED_BASE;
20586 + *begin = mm->mmap_base;
20587 *end = TASK_SIZE;
20588 }
20589 }
20590 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
20591 if (flags & MAP_FIXED)
20592 return addr;
20593
20594 - find_start_end(flags, &begin, &end);
20595 + find_start_end(mm, flags, &begin, &end);
20596
20597 if (len > end)
20598 return -ENOMEM;
20599
20600 +#ifdef CONFIG_PAX_RANDMMAP
20601 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20602 +#endif
20603 +
20604 if (addr) {
20605 addr = PAGE_ALIGN(addr);
20606 vma = find_vma(mm, addr);
20607 - if (end - len >= addr &&
20608 - (!vma || addr + len <= vma->vm_start))
20609 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
20610 return addr;
20611 }
20612 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
20613 @@ -172,7 +175,7 @@ full_search:
20614 }
20615 return -ENOMEM;
20616 }
20617 - if (!vma || addr + len <= vma->vm_start) {
20618 + if (check_heap_stack_gap(vma, addr, len)) {
20619 /*
20620 * Remember the place where we stopped the search:
20621 */
20622 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20623 {
20624 struct vm_area_struct *vma;
20625 struct mm_struct *mm = current->mm;
20626 - unsigned long addr = addr0, start_addr;
20627 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
20628
20629 /* requested length too big for entire address space */
20630 if (len > TASK_SIZE)
20631 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20632 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
20633 goto bottomup;
20634
20635 +#ifdef CONFIG_PAX_RANDMMAP
20636 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20637 +#endif
20638 +
20639 /* requesting a specific address */
20640 if (addr) {
20641 addr = PAGE_ALIGN(addr);
20642 - vma = find_vma(mm, addr);
20643 - if (TASK_SIZE - len >= addr &&
20644 - (!vma || addr + len <= vma->vm_start))
20645 - return addr;
20646 + if (TASK_SIZE - len >= addr) {
20647 + vma = find_vma(mm, addr);
20648 + if (check_heap_stack_gap(vma, addr, len))
20649 + return addr;
20650 + }
20651 }
20652
20653 /* check if free_area_cache is useful for us */
20654 @@ -240,7 +248,7 @@ try_again:
20655 * return with success:
20656 */
20657 vma = find_vma(mm, addr);
20658 - if (!vma || addr+len <= vma->vm_start)
20659 + if (check_heap_stack_gap(vma, addr, len))
20660 /* remember the address as a hint for next time */
20661 return mm->free_area_cache = addr;
20662
20663 @@ -249,8 +257,8 @@ try_again:
20664 mm->cached_hole_size = vma->vm_start - addr;
20665
20666 /* try just below the current vma->vm_start */
20667 - addr = vma->vm_start-len;
20668 - } while (len < vma->vm_start);
20669 + addr = skip_heap_stack_gap(vma, len);
20670 + } while (!IS_ERR_VALUE(addr));
20671
20672 fail:
20673 /*
20674 @@ -270,13 +278,21 @@ bottomup:
20675 * can happen with large stack limits and large mmap()
20676 * allocations.
20677 */
20678 + mm->mmap_base = TASK_UNMAPPED_BASE;
20679 +
20680 +#ifdef CONFIG_PAX_RANDMMAP
20681 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20682 + mm->mmap_base += mm->delta_mmap;
20683 +#endif
20684 +
20685 + mm->free_area_cache = mm->mmap_base;
20686 mm->cached_hole_size = ~0UL;
20687 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20688 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20689 /*
20690 * Restore the topdown base:
20691 */
20692 - mm->free_area_cache = mm->mmap_base;
20693 + mm->mmap_base = base;
20694 + mm->free_area_cache = base;
20695 mm->cached_hole_size = ~0UL;
20696
20697 return addr;
20698 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20699 index f84fe00..93fe08f 100644
20700 --- a/arch/x86/kernel/tboot.c
20701 +++ b/arch/x86/kernel/tboot.c
20702 @@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
20703
20704 void tboot_shutdown(u32 shutdown_type)
20705 {
20706 - void (*shutdown)(void);
20707 + void (* __noreturn shutdown)(void);
20708
20709 if (!tboot_enabled())
20710 return;
20711 @@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
20712
20713 switch_to_tboot_pt();
20714
20715 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20716 + shutdown = (void *)tboot->shutdown_entry;
20717 shutdown();
20718
20719 /* should not reach here */
20720 @@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20721 return 0;
20722 }
20723
20724 -static atomic_t ap_wfs_count;
20725 +static atomic_unchecked_t ap_wfs_count;
20726
20727 static int tboot_wait_for_aps(int num_aps)
20728 {
20729 @@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20730 {
20731 switch (action) {
20732 case CPU_DYING:
20733 - atomic_inc(&ap_wfs_count);
20734 + atomic_inc_unchecked(&ap_wfs_count);
20735 if (num_online_cpus() == 1)
20736 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20737 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20738 return NOTIFY_BAD;
20739 break;
20740 }
20741 @@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
20742
20743 tboot_create_trampoline();
20744
20745 - atomic_set(&ap_wfs_count, 0);
20746 + atomic_set_unchecked(&ap_wfs_count, 0);
20747 register_hotcpu_notifier(&tboot_cpu_notifier);
20748
20749 acpi_os_set_prepare_sleep(&tboot_sleep);
20750 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20751 index 24d3c91..d06b473 100644
20752 --- a/arch/x86/kernel/time.c
20753 +++ b/arch/x86/kernel/time.c
20754 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
20755 {
20756 unsigned long pc = instruction_pointer(regs);
20757
20758 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20759 + if (!user_mode(regs) && in_lock_functions(pc)) {
20760 #ifdef CONFIG_FRAME_POINTER
20761 - return *(unsigned long *)(regs->bp + sizeof(long));
20762 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20763 #else
20764 unsigned long *sp =
20765 (unsigned long *)kernel_stack_pointer(regs);
20766 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20767 * or above a saved flags. Eflags has bits 22-31 zero,
20768 * kernel addresses don't.
20769 */
20770 +
20771 +#ifdef CONFIG_PAX_KERNEXEC
20772 + return ktla_ktva(sp[0]);
20773 +#else
20774 if (sp[0] >> 22)
20775 return sp[0];
20776 if (sp[1] >> 22)
20777 return sp[1];
20778 #endif
20779 +
20780 +#endif
20781 }
20782 return pc;
20783 }
20784 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20785 index 9d9d2f9..cad418a 100644
20786 --- a/arch/x86/kernel/tls.c
20787 +++ b/arch/x86/kernel/tls.c
20788 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20789 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20790 return -EINVAL;
20791
20792 +#ifdef CONFIG_PAX_SEGMEXEC
20793 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20794 + return -EINVAL;
20795 +#endif
20796 +
20797 set_tls_desc(p, idx, &info, 1);
20798
20799 return 0;
20800 @@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
20801
20802 if (kbuf)
20803 info = kbuf;
20804 - else if (__copy_from_user(infobuf, ubuf, count))
20805 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
20806 return -EFAULT;
20807 else
20808 info = infobuf;
20809 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20810 index 8276dc6..4ca48a2 100644
20811 --- a/arch/x86/kernel/traps.c
20812 +++ b/arch/x86/kernel/traps.c
20813 @@ -71,12 +71,6 @@ asmlinkage int system_call(void);
20814
20815 /* Do we ignore FPU interrupts ? */
20816 char ignore_fpu_irq;
20817 -
20818 -/*
20819 - * The IDT has to be page-aligned to simplify the Pentium
20820 - * F0 0F bug workaround.
20821 - */
20822 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20823 #endif
20824
20825 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20826 @@ -109,11 +103,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20827 }
20828
20829 static int __kprobes
20830 -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
20831 +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
20832 struct pt_regs *regs, long error_code)
20833 {
20834 #ifdef CONFIG_X86_32
20835 - if (regs->flags & X86_VM_MASK) {
20836 + if (v8086_mode(regs)) {
20837 /*
20838 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20839 * On nmi (interrupt 2), do_trap should not be called.
20840 @@ -126,12 +120,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
20841 return -1;
20842 }
20843 #endif
20844 - if (!user_mode(regs)) {
20845 + if (!user_mode_novm(regs)) {
20846 if (!fixup_exception(regs)) {
20847 tsk->thread.error_code = error_code;
20848 tsk->thread.trap_nr = trapnr;
20849 +
20850 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20851 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20852 + str = "PAX: suspicious stack segment fault";
20853 +#endif
20854 +
20855 die(str, regs, error_code);
20856 }
20857 +
20858 +#ifdef CONFIG_PAX_REFCOUNT
20859 + if (trapnr == 4)
20860 + pax_report_refcount_overflow(regs);
20861 +#endif
20862 +
20863 return 0;
20864 }
20865
20866 @@ -139,7 +145,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
20867 }
20868
20869 static void __kprobes
20870 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20871 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20872 long error_code, siginfo_t *info)
20873 {
20874 struct task_struct *tsk = current;
20875 @@ -163,7 +169,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20876 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
20877 printk_ratelimit()) {
20878 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20879 - tsk->comm, tsk->pid, str,
20880 + tsk->comm, task_pid_nr(tsk), str,
20881 regs->ip, regs->sp, error_code);
20882 print_vma_addr(" in ", regs->ip);
20883 pr_cont("\n");
20884 @@ -269,7 +275,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
20885 conditional_sti(regs);
20886
20887 #ifdef CONFIG_X86_32
20888 - if (regs->flags & X86_VM_MASK) {
20889 + if (v8086_mode(regs)) {
20890 local_irq_enable();
20891 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
20892 goto exit;
20893 @@ -277,18 +283,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
20894 #endif
20895
20896 tsk = current;
20897 - if (!user_mode(regs)) {
20898 + if (!user_mode_novm(regs)) {
20899 if (fixup_exception(regs))
20900 goto exit;
20901
20902 tsk->thread.error_code = error_code;
20903 tsk->thread.trap_nr = X86_TRAP_GP;
20904 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
20905 - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
20906 + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
20907 +
20908 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20909 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20910 + die("PAX: suspicious general protection fault", regs, error_code);
20911 + else
20912 +#endif
20913 +
20914 die("general protection fault", regs, error_code);
20915 + }
20916 goto exit;
20917 }
20918
20919 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20920 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20921 + struct mm_struct *mm = tsk->mm;
20922 + unsigned long limit;
20923 +
20924 + down_write(&mm->mmap_sem);
20925 + limit = mm->context.user_cs_limit;
20926 + if (limit < TASK_SIZE) {
20927 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20928 + up_write(&mm->mmap_sem);
20929 + return;
20930 + }
20931 + up_write(&mm->mmap_sem);
20932 + }
20933 +#endif
20934 +
20935 tsk->thread.error_code = error_code;
20936 tsk->thread.trap_nr = X86_TRAP_GP;
20937
20938 @@ -443,7 +473,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20939 /* It's safe to allow irq's after DR6 has been saved */
20940 preempt_conditional_sti(regs);
20941
20942 - if (regs->flags & X86_VM_MASK) {
20943 + if (v8086_mode(regs)) {
20944 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
20945 X86_TRAP_DB);
20946 preempt_conditional_cli(regs);
20947 @@ -458,7 +488,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20948 * We already checked v86 mode above, so we can check for kernel mode
20949 * by just checking the CPL of CS.
20950 */
20951 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
20952 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
20953 tsk->thread.debugreg6 &= ~DR_STEP;
20954 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
20955 regs->flags &= ~X86_EFLAGS_TF;
20956 @@ -490,7 +520,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
20957 return;
20958 conditional_sti(regs);
20959
20960 - if (!user_mode_vm(regs))
20961 + if (!user_mode(regs))
20962 {
20963 if (!fixup_exception(regs)) {
20964 task->thread.error_code = error_code;
20965 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
20966 index aafa555..a04691a 100644
20967 --- a/arch/x86/kernel/uprobes.c
20968 +++ b/arch/x86/kernel/uprobes.c
20969 @@ -614,7 +614,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
20970 int ret = NOTIFY_DONE;
20971
20972 /* We are only interested in userspace traps */
20973 - if (regs && !user_mode_vm(regs))
20974 + if (regs && !user_mode(regs))
20975 return NOTIFY_DONE;
20976
20977 switch (val) {
20978 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20979 index b9242ba..50c5edd 100644
20980 --- a/arch/x86/kernel/verify_cpu.S
20981 +++ b/arch/x86/kernel/verify_cpu.S
20982 @@ -20,6 +20,7 @@
20983 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20984 * arch/x86/kernel/trampoline_64.S: secondary processor verification
20985 * arch/x86/kernel/head_32.S: processor startup
20986 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20987 *
20988 * verify_cpu, returns the status of longmode and SSE in register %eax.
20989 * 0: Success 1: Failure
20990 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20991 index 5c9687b..5f857d3 100644
20992 --- a/arch/x86/kernel/vm86_32.c
20993 +++ b/arch/x86/kernel/vm86_32.c
20994 @@ -43,6 +43,7 @@
20995 #include <linux/ptrace.h>
20996 #include <linux/audit.h>
20997 #include <linux/stddef.h>
20998 +#include <linux/grsecurity.h>
20999
21000 #include <asm/uaccess.h>
21001 #include <asm/io.h>
21002 @@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21003 do_exit(SIGSEGV);
21004 }
21005
21006 - tss = &per_cpu(init_tss, get_cpu());
21007 + tss = init_tss + get_cpu();
21008 current->thread.sp0 = current->thread.saved_sp0;
21009 current->thread.sysenter_cs = __KERNEL_CS;
21010 load_sp0(tss, &current->thread);
21011 @@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
21012 struct task_struct *tsk;
21013 int tmp, ret = -EPERM;
21014
21015 +#ifdef CONFIG_GRKERNSEC_VM86
21016 + if (!capable(CAP_SYS_RAWIO)) {
21017 + gr_handle_vm86();
21018 + goto out;
21019 + }
21020 +#endif
21021 +
21022 tsk = current;
21023 if (tsk->thread.saved_sp0)
21024 goto out;
21025 @@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
21026 int tmp, ret;
21027 struct vm86plus_struct __user *v86;
21028
21029 +#ifdef CONFIG_GRKERNSEC_VM86
21030 + if (!capable(CAP_SYS_RAWIO)) {
21031 + gr_handle_vm86();
21032 + ret = -EPERM;
21033 + goto out;
21034 + }
21035 +#endif
21036 +
21037 tsk = current;
21038 switch (cmd) {
21039 case VM86_REQUEST_IRQ:
21040 @@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21041 tsk->thread.saved_fs = info->regs32->fs;
21042 tsk->thread.saved_gs = get_user_gs(info->regs32);
21043
21044 - tss = &per_cpu(init_tss, get_cpu());
21045 + tss = init_tss + get_cpu();
21046 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21047 if (cpu_has_sep)
21048 tsk->thread.sysenter_cs = 0;
21049 @@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21050 goto cannot_handle;
21051 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21052 goto cannot_handle;
21053 - intr_ptr = (unsigned long __user *) (i << 2);
21054 + intr_ptr = (__force unsigned long __user *) (i << 2);
21055 if (get_user(segoffs, intr_ptr))
21056 goto cannot_handle;
21057 if ((segoffs >> 16) == BIOSSEG)
21058 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
21059 index 22a1530..8fbaaad 100644
21060 --- a/arch/x86/kernel/vmlinux.lds.S
21061 +++ b/arch/x86/kernel/vmlinux.lds.S
21062 @@ -26,6 +26,13 @@
21063 #include <asm/page_types.h>
21064 #include <asm/cache.h>
21065 #include <asm/boot.h>
21066 +#include <asm/segment.h>
21067 +
21068 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21069 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
21070 +#else
21071 +#define __KERNEL_TEXT_OFFSET 0
21072 +#endif
21073
21074 #undef i386 /* in case the preprocessor is a 32bit one */
21075
21076 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
21077
21078 PHDRS {
21079 text PT_LOAD FLAGS(5); /* R_E */
21080 +#ifdef CONFIG_X86_32
21081 + module PT_LOAD FLAGS(5); /* R_E */
21082 +#endif
21083 +#ifdef CONFIG_XEN
21084 + rodata PT_LOAD FLAGS(5); /* R_E */
21085 +#else
21086 + rodata PT_LOAD FLAGS(4); /* R__ */
21087 +#endif
21088 data PT_LOAD FLAGS(6); /* RW_ */
21089 -#ifdef CONFIG_X86_64
21090 + init.begin PT_LOAD FLAGS(6); /* RW_ */
21091 #ifdef CONFIG_SMP
21092 percpu PT_LOAD FLAGS(6); /* RW_ */
21093 #endif
21094 + text.init PT_LOAD FLAGS(5); /* R_E */
21095 + text.exit PT_LOAD FLAGS(5); /* R_E */
21096 init PT_LOAD FLAGS(7); /* RWE */
21097 -#endif
21098 note PT_NOTE FLAGS(0); /* ___ */
21099 }
21100
21101 SECTIONS
21102 {
21103 #ifdef CONFIG_X86_32
21104 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21105 - phys_startup_32 = startup_32 - LOAD_OFFSET;
21106 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21107 #else
21108 - . = __START_KERNEL;
21109 - phys_startup_64 = startup_64 - LOAD_OFFSET;
21110 + . = __START_KERNEL;
21111 #endif
21112
21113 /* Text and read-only data */
21114 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
21115 - _text = .;
21116 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21117 /* bootstrapping code */
21118 +#ifdef CONFIG_X86_32
21119 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21120 +#else
21121 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21122 +#endif
21123 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21124 + _text = .;
21125 HEAD_TEXT
21126 #ifdef CONFIG_X86_32
21127 . = ALIGN(PAGE_SIZE);
21128 @@ -108,13 +128,48 @@ SECTIONS
21129 IRQENTRY_TEXT
21130 *(.fixup)
21131 *(.gnu.warning)
21132 - /* End of text section */
21133 - _etext = .;
21134 } :text = 0x9090
21135
21136 - NOTES :text :note
21137 + . += __KERNEL_TEXT_OFFSET;
21138
21139 - EXCEPTION_TABLE(16) :text = 0x9090
21140 +#ifdef CONFIG_X86_32
21141 + . = ALIGN(PAGE_SIZE);
21142 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21143 +
21144 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21145 + MODULES_EXEC_VADDR = .;
21146 + BYTE(0)
21147 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21148 + . = ALIGN(HPAGE_SIZE) - 1;
21149 + MODULES_EXEC_END = .;
21150 +#endif
21151 +
21152 + } :module
21153 +#endif
21154 +
21155 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21156 + /* End of text section */
21157 + BYTE(0)
21158 + _etext = . - __KERNEL_TEXT_OFFSET;
21159 + }
21160 +
21161 +#ifdef CONFIG_X86_32
21162 + . = ALIGN(PAGE_SIZE);
21163 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21164 + *(.idt)
21165 + . = ALIGN(PAGE_SIZE);
21166 + *(.empty_zero_page)
21167 + *(.initial_pg_fixmap)
21168 + *(.initial_pg_pmd)
21169 + *(.initial_page_table)
21170 + *(.swapper_pg_dir)
21171 + } :rodata
21172 +#endif
21173 +
21174 + . = ALIGN(PAGE_SIZE);
21175 + NOTES :rodata :note
21176 +
21177 + EXCEPTION_TABLE(16) :rodata
21178
21179 #if defined(CONFIG_DEBUG_RODATA)
21180 /* .text should occupy whole number of pages */
21181 @@ -126,16 +181,20 @@ SECTIONS
21182
21183 /* Data */
21184 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21185 +
21186 +#ifdef CONFIG_PAX_KERNEXEC
21187 + . = ALIGN(HPAGE_SIZE);
21188 +#else
21189 + . = ALIGN(PAGE_SIZE);
21190 +#endif
21191 +
21192 /* Start of data section */
21193 _sdata = .;
21194
21195 /* init_task */
21196 INIT_TASK_DATA(THREAD_SIZE)
21197
21198 -#ifdef CONFIG_X86_32
21199 - /* 32 bit has nosave before _edata */
21200 NOSAVE_DATA
21201 -#endif
21202
21203 PAGE_ALIGNED_DATA(PAGE_SIZE)
21204
21205 @@ -176,12 +235,19 @@ SECTIONS
21206 #endif /* CONFIG_X86_64 */
21207
21208 /* Init code and data - will be freed after init */
21209 - . = ALIGN(PAGE_SIZE);
21210 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21211 + BYTE(0)
21212 +
21213 +#ifdef CONFIG_PAX_KERNEXEC
21214 + . = ALIGN(HPAGE_SIZE);
21215 +#else
21216 + . = ALIGN(PAGE_SIZE);
21217 +#endif
21218 +
21219 __init_begin = .; /* paired with __init_end */
21220 - }
21221 + } :init.begin
21222
21223 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21224 +#ifdef CONFIG_SMP
21225 /*
21226 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21227 * output PHDR, so the next output section - .init.text - should
21228 @@ -190,12 +256,27 @@ SECTIONS
21229 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
21230 #endif
21231
21232 - INIT_TEXT_SECTION(PAGE_SIZE)
21233 -#ifdef CONFIG_X86_64
21234 - :init
21235 -#endif
21236 + . = ALIGN(PAGE_SIZE);
21237 + init_begin = .;
21238 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21239 + VMLINUX_SYMBOL(_sinittext) = .;
21240 + INIT_TEXT
21241 + VMLINUX_SYMBOL(_einittext) = .;
21242 + . = ALIGN(PAGE_SIZE);
21243 + } :text.init
21244
21245 - INIT_DATA_SECTION(16)
21246 + /*
21247 + * .exit.text is discard at runtime, not link time, to deal with
21248 + * references from .altinstructions and .eh_frame
21249 + */
21250 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21251 + EXIT_TEXT
21252 + . = ALIGN(16);
21253 + } :text.exit
21254 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21255 +
21256 + . = ALIGN(PAGE_SIZE);
21257 + INIT_DATA_SECTION(16) :init
21258
21259 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21260 __x86_cpu_dev_start = .;
21261 @@ -257,19 +338,12 @@ SECTIONS
21262 }
21263
21264 . = ALIGN(8);
21265 - /*
21266 - * .exit.text is discard at runtime, not link time, to deal with
21267 - * references from .altinstructions and .eh_frame
21268 - */
21269 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21270 - EXIT_TEXT
21271 - }
21272
21273 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21274 EXIT_DATA
21275 }
21276
21277 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21278 +#ifndef CONFIG_SMP
21279 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
21280 #endif
21281
21282 @@ -288,16 +362,10 @@ SECTIONS
21283 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
21284 __smp_locks = .;
21285 *(.smp_locks)
21286 - . = ALIGN(PAGE_SIZE);
21287 __smp_locks_end = .;
21288 + . = ALIGN(PAGE_SIZE);
21289 }
21290
21291 -#ifdef CONFIG_X86_64
21292 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21293 - NOSAVE_DATA
21294 - }
21295 -#endif
21296 -
21297 /* BSS */
21298 . = ALIGN(PAGE_SIZE);
21299 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21300 @@ -313,6 +381,7 @@ SECTIONS
21301 __brk_base = .;
21302 . += 64 * 1024; /* 64k alignment slop space */
21303 *(.brk_reservation) /* areas brk users have reserved */
21304 + . = ALIGN(HPAGE_SIZE);
21305 __brk_limit = .;
21306 }
21307
21308 @@ -339,13 +408,12 @@ SECTIONS
21309 * for the boot processor.
21310 */
21311 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
21312 -INIT_PER_CPU(gdt_page);
21313 INIT_PER_CPU(irq_stack_union);
21314
21315 /*
21316 * Build-time check on the image size:
21317 */
21318 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21319 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21320 "kernel image bigger than KERNEL_IMAGE_SIZE");
21321
21322 #ifdef CONFIG_SMP
21323 diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
21324 index 992f890..fc38904 100644
21325 --- a/arch/x86/kernel/vsmp_64.c
21326 +++ b/arch/x86/kernel/vsmp_64.c
21327 @@ -217,8 +217,8 @@ static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
21328 static void vsmp_apic_post_init(void)
21329 {
21330 /* need to update phys_pkg_id */
21331 - apic->phys_pkg_id = apicid_phys_pkg_id;
21332 - apic->vector_allocation_domain = fill_vector_allocation_domain;
21333 + *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
21334 + *(void **)&apic->vector_allocation_domain = fill_vector_allocation_domain;
21335 }
21336
21337 void __init vsmp_init(void)
21338 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21339 index 3a3e8c9..1af9465 100644
21340 --- a/arch/x86/kernel/vsyscall_64.c
21341 +++ b/arch/x86/kernel/vsyscall_64.c
21342 @@ -56,15 +56,13 @@
21343 DEFINE_VVAR(int, vgetcpu_mode);
21344 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
21345
21346 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
21347 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
21348
21349 static int __init vsyscall_setup(char *str)
21350 {
21351 if (str) {
21352 if (!strcmp("emulate", str))
21353 vsyscall_mode = EMULATE;
21354 - else if (!strcmp("native", str))
21355 - vsyscall_mode = NATIVE;
21356 else if (!strcmp("none", str))
21357 vsyscall_mode = NONE;
21358 else
21359 @@ -315,8 +313,7 @@ done:
21360 return true;
21361
21362 sigsegv:
21363 - force_sig(SIGSEGV, current);
21364 - return true;
21365 + do_group_exit(SIGKILL);
21366 }
21367
21368 /*
21369 @@ -369,10 +366,7 @@ void __init map_vsyscall(void)
21370 extern char __vvar_page;
21371 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
21372
21373 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
21374 - vsyscall_mode == NATIVE
21375 - ? PAGE_KERNEL_VSYSCALL
21376 - : PAGE_KERNEL_VVAR);
21377 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
21378 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
21379 (unsigned long)VSYSCALL_START);
21380
21381 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21382 index 1330dd1..d220b99 100644
21383 --- a/arch/x86/kernel/x8664_ksyms_64.c
21384 +++ b/arch/x86/kernel/x8664_ksyms_64.c
21385 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
21386 EXPORT_SYMBOL(copy_user_generic_unrolled);
21387 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
21388 EXPORT_SYMBOL(__copy_user_nocache);
21389 -EXPORT_SYMBOL(_copy_from_user);
21390 -EXPORT_SYMBOL(_copy_to_user);
21391
21392 EXPORT_SYMBOL(copy_page);
21393 EXPORT_SYMBOL(clear_page);
21394 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21395 index ada87a3..afea76d 100644
21396 --- a/arch/x86/kernel/xsave.c
21397 +++ b/arch/x86/kernel/xsave.c
21398 @@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
21399 {
21400 int err;
21401
21402 + buf = (struct xsave_struct __user *)____m(buf);
21403 if (use_xsave())
21404 err = xsave_user(buf);
21405 else if (use_fxsr())
21406 @@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
21407 */
21408 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
21409 {
21410 + buf = (void __user *)____m(buf);
21411 if (use_xsave()) {
21412 if ((unsigned long)buf % 64 || fx_only) {
21413 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
21414 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
21415 index ec79e77..420f5cc 100644
21416 --- a/arch/x86/kvm/cpuid.c
21417 +++ b/arch/x86/kvm/cpuid.c
21418 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21419 struct kvm_cpuid2 *cpuid,
21420 struct kvm_cpuid_entry2 __user *entries)
21421 {
21422 - int r;
21423 + int r, i;
21424
21425 r = -E2BIG;
21426 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21427 goto out;
21428 r = -EFAULT;
21429 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21430 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21431 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21432 goto out;
21433 + for (i = 0; i < cpuid->nent; ++i) {
21434 + struct kvm_cpuid_entry2 cpuid_entry;
21435 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21436 + goto out;
21437 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21438 + }
21439 vcpu->arch.cpuid_nent = cpuid->nent;
21440 kvm_apic_set_version(vcpu);
21441 kvm_x86_ops->cpuid_update(vcpu);
21442 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21443 struct kvm_cpuid2 *cpuid,
21444 struct kvm_cpuid_entry2 __user *entries)
21445 {
21446 - int r;
21447 + int r, i;
21448
21449 r = -E2BIG;
21450 if (cpuid->nent < vcpu->arch.cpuid_nent)
21451 goto out;
21452 r = -EFAULT;
21453 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21454 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21455 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21456 goto out;
21457 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21458 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21459 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21460 + goto out;
21461 + }
21462 return 0;
21463
21464 out:
21465 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21466 index bba39bf..296540a 100644
21467 --- a/arch/x86/kvm/emulate.c
21468 +++ b/arch/x86/kvm/emulate.c
21469 @@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21470
21471 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
21472 do { \
21473 + unsigned long _tmp; \
21474 __asm__ __volatile__ ( \
21475 _PRE_EFLAGS("0", "4", "2") \
21476 _op _suffix " %"_x"3,%1; " \
21477 @@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21478 /* Raw emulation: instruction has two explicit operands. */
21479 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
21480 do { \
21481 - unsigned long _tmp; \
21482 - \
21483 switch ((ctxt)->dst.bytes) { \
21484 case 2: \
21485 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
21486 @@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21487
21488 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21489 do { \
21490 - unsigned long _tmp; \
21491 switch ((ctxt)->dst.bytes) { \
21492 case 1: \
21493 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
21494 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21495 index 43e9fad..3b7c059 100644
21496 --- a/arch/x86/kvm/lapic.c
21497 +++ b/arch/x86/kvm/lapic.c
21498 @@ -55,7 +55,7 @@
21499 #define APIC_BUS_CYCLE_NS 1
21500
21501 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21502 -#define apic_debug(fmt, arg...)
21503 +#define apic_debug(fmt, arg...) do {} while (0)
21504
21505 #define APIC_LVT_NUM 6
21506 /* 14 is the version for Xeon and Pentium 8.4.8*/
21507 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21508 index 714e2c0..3f7a086 100644
21509 --- a/arch/x86/kvm/paging_tmpl.h
21510 +++ b/arch/x86/kvm/paging_tmpl.h
21511 @@ -208,7 +208,7 @@ retry_walk:
21512 if (unlikely(kvm_is_error_hva(host_addr)))
21513 goto error;
21514
21515 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
21516 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
21517 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
21518 goto error;
21519 walker->ptep_user[walker->level - 1] = ptep_user;
21520 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21521 index d017df3..61ae42e 100644
21522 --- a/arch/x86/kvm/svm.c
21523 +++ b/arch/x86/kvm/svm.c
21524 @@ -3500,7 +3500,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21525 int cpu = raw_smp_processor_id();
21526
21527 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
21528 +
21529 + pax_open_kernel();
21530 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
21531 + pax_close_kernel();
21532 +
21533 load_TR_desc();
21534 }
21535
21536 @@ -3874,6 +3878,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
21537 #endif
21538 #endif
21539
21540 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21541 + __set_fs(current_thread_info()->addr_limit);
21542 +#endif
21543 +
21544 reload_tss(vcpu);
21545
21546 local_irq_disable();
21547 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21548 index f858159..4ab7dba 100644
21549 --- a/arch/x86/kvm/vmx.c
21550 +++ b/arch/x86/kvm/vmx.c
21551 @@ -1332,7 +1332,11 @@ static void reload_tss(void)
21552 struct desc_struct *descs;
21553
21554 descs = (void *)gdt->address;
21555 +
21556 + pax_open_kernel();
21557 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21558 + pax_close_kernel();
21559 +
21560 load_TR_desc();
21561 }
21562
21563 @@ -1546,6 +1550,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
21564 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
21565 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
21566
21567 +#ifdef CONFIG_PAX_PER_CPU_PGD
21568 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
21569 +#endif
21570 +
21571 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
21572 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
21573 vmx->loaded_vmcs->cpu = cpu;
21574 @@ -2669,8 +2677,11 @@ static __init int hardware_setup(void)
21575 if (!cpu_has_vmx_flexpriority())
21576 flexpriority_enabled = 0;
21577
21578 - if (!cpu_has_vmx_tpr_shadow())
21579 - kvm_x86_ops->update_cr8_intercept = NULL;
21580 + if (!cpu_has_vmx_tpr_shadow()) {
21581 + pax_open_kernel();
21582 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21583 + pax_close_kernel();
21584 + }
21585
21586 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21587 kvm_disable_largepages();
21588 @@ -3712,7 +3723,10 @@ static void vmx_set_constant_host_state(void)
21589
21590 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
21591 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
21592 +
21593 +#ifndef CONFIG_PAX_PER_CPU_PGD
21594 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
21595 +#endif
21596
21597 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
21598 #ifdef CONFIG_X86_64
21599 @@ -3733,7 +3747,7 @@ static void vmx_set_constant_host_state(void)
21600 native_store_idt(&dt);
21601 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
21602
21603 - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
21604 + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
21605
21606 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
21607 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
21608 @@ -6279,6 +6293,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21609 "jmp 2f \n\t"
21610 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
21611 "2: "
21612 +
21613 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21614 + "ljmp %[cs],$3f\n\t"
21615 + "3: "
21616 +#endif
21617 +
21618 /* Save guest registers, load host registers, keep flags */
21619 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
21620 "pop %0 \n\t"
21621 @@ -6331,6 +6351,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21622 #endif
21623 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
21624 [wordsize]"i"(sizeof(ulong))
21625 +
21626 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21627 + ,[cs]"i"(__KERNEL_CS)
21628 +#endif
21629 +
21630 : "cc", "memory"
21631 #ifdef CONFIG_X86_64
21632 , "rax", "rbx", "rdi", "rsi"
21633 @@ -6344,7 +6369,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21634 if (debugctlmsr)
21635 update_debugctlmsr(debugctlmsr);
21636
21637 -#ifndef CONFIG_X86_64
21638 +#ifdef CONFIG_X86_32
21639 /*
21640 * The sysexit path does not restore ds/es, so we must set them to
21641 * a reasonable value ourselves.
21642 @@ -6353,8 +6378,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
21643 * may be executed in interrupt context, which saves and restore segments
21644 * around it, nullifying its effect.
21645 */
21646 - loadsegment(ds, __USER_DS);
21647 - loadsegment(es, __USER_DS);
21648 + loadsegment(ds, __KERNEL_DS);
21649 + loadsegment(es, __KERNEL_DS);
21650 + loadsegment(ss, __KERNEL_DS);
21651 +
21652 +#ifdef CONFIG_PAX_KERNEXEC
21653 + loadsegment(fs, __KERNEL_PERCPU);
21654 +#endif
21655 +
21656 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21657 + __set_fs(current_thread_info()->addr_limit);
21658 +#endif
21659 +
21660 #endif
21661
21662 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
21663 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21664 index 4f76417..93429b5 100644
21665 --- a/arch/x86/kvm/x86.c
21666 +++ b/arch/x86/kvm/x86.c
21667 @@ -1390,8 +1390,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
21668 {
21669 struct kvm *kvm = vcpu->kvm;
21670 int lm = is_long_mode(vcpu);
21671 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
21672 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
21673 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
21674 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
21675 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
21676 : kvm->arch.xen_hvm_config.blob_size_32;
21677 u32 page_num = data & ~PAGE_MASK;
21678 @@ -2255,6 +2255,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
21679 if (n < msr_list.nmsrs)
21680 goto out;
21681 r = -EFAULT;
21682 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
21683 + goto out;
21684 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
21685 num_msrs_to_save * sizeof(u32)))
21686 goto out;
21687 @@ -2379,7 +2381,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21688 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21689 struct kvm_interrupt *irq)
21690 {
21691 - if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
21692 + if (irq->irq >= KVM_NR_INTERRUPTS)
21693 return -EINVAL;
21694 if (irqchip_in_kernel(vcpu->kvm))
21695 return -ENXIO;
21696 @@ -4881,7 +4883,7 @@ static void kvm_set_mmio_spte_mask(void)
21697 kvm_mmu_set_mmio_spte_mask(mask);
21698 }
21699
21700 -int kvm_arch_init(void *opaque)
21701 +int kvm_arch_init(const void *opaque)
21702 {
21703 int r;
21704 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21705 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21706 index 642d880..cc9ebac 100644
21707 --- a/arch/x86/lguest/boot.c
21708 +++ b/arch/x86/lguest/boot.c
21709 @@ -1116,12 +1116,12 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
21710
21711 static void set_lguest_basic_apic_ops(void)
21712 {
21713 - apic->read = lguest_apic_read;
21714 - apic->write = lguest_apic_write;
21715 - apic->icr_read = lguest_apic_icr_read;
21716 - apic->icr_write = lguest_apic_icr_write;
21717 - apic->wait_icr_idle = lguest_apic_wait_icr_idle;
21718 - apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
21719 + *(void **)&apic->read = lguest_apic_read;
21720 + *(void **)&apic->write = lguest_apic_write;
21721 + *(void **)&apic->icr_read = lguest_apic_icr_read;
21722 + *(void **)&apic->icr_write = lguest_apic_icr_write;
21723 + *(void **)&apic->wait_icr_idle = lguest_apic_wait_icr_idle;
21724 + *(void **)&apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
21725 };
21726 #endif
21727
21728 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21729 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21730 * Launcher to reboot us.
21731 */
21732 -static void lguest_restart(char *reason)
21733 +static __noreturn void lguest_restart(char *reason)
21734 {
21735 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
21736 + BUG();
21737 }
21738
21739 /*G:050
21740 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
21741 index 00933d5..3a64af9 100644
21742 --- a/arch/x86/lib/atomic64_386_32.S
21743 +++ b/arch/x86/lib/atomic64_386_32.S
21744 @@ -48,6 +48,10 @@ BEGIN(read)
21745 movl (v), %eax
21746 movl 4(v), %edx
21747 RET_ENDP
21748 +BEGIN(read_unchecked)
21749 + movl (v), %eax
21750 + movl 4(v), %edx
21751 +RET_ENDP
21752 #undef v
21753
21754 #define v %esi
21755 @@ -55,6 +59,10 @@ BEGIN(set)
21756 movl %ebx, (v)
21757 movl %ecx, 4(v)
21758 RET_ENDP
21759 +BEGIN(set_unchecked)
21760 + movl %ebx, (v)
21761 + movl %ecx, 4(v)
21762 +RET_ENDP
21763 #undef v
21764
21765 #define v %esi
21766 @@ -70,6 +78,20 @@ RET_ENDP
21767 BEGIN(add)
21768 addl %eax, (v)
21769 adcl %edx, 4(v)
21770 +
21771 +#ifdef CONFIG_PAX_REFCOUNT
21772 + jno 0f
21773 + subl %eax, (v)
21774 + sbbl %edx, 4(v)
21775 + int $4
21776 +0:
21777 + _ASM_EXTABLE(0b, 0b)
21778 +#endif
21779 +
21780 +RET_ENDP
21781 +BEGIN(add_unchecked)
21782 + addl %eax, (v)
21783 + adcl %edx, 4(v)
21784 RET_ENDP
21785 #undef v
21786
21787 @@ -77,6 +99,24 @@ RET_ENDP
21788 BEGIN(add_return)
21789 addl (v), %eax
21790 adcl 4(v), %edx
21791 +
21792 +#ifdef CONFIG_PAX_REFCOUNT
21793 + into
21794 +1234:
21795 + _ASM_EXTABLE(1234b, 2f)
21796 +#endif
21797 +
21798 + movl %eax, (v)
21799 + movl %edx, 4(v)
21800 +
21801 +#ifdef CONFIG_PAX_REFCOUNT
21802 +2:
21803 +#endif
21804 +
21805 +RET_ENDP
21806 +BEGIN(add_return_unchecked)
21807 + addl (v), %eax
21808 + adcl 4(v), %edx
21809 movl %eax, (v)
21810 movl %edx, 4(v)
21811 RET_ENDP
21812 @@ -86,6 +126,20 @@ RET_ENDP
21813 BEGIN(sub)
21814 subl %eax, (v)
21815 sbbl %edx, 4(v)
21816 +
21817 +#ifdef CONFIG_PAX_REFCOUNT
21818 + jno 0f
21819 + addl %eax, (v)
21820 + adcl %edx, 4(v)
21821 + int $4
21822 +0:
21823 + _ASM_EXTABLE(0b, 0b)
21824 +#endif
21825 +
21826 +RET_ENDP
21827 +BEGIN(sub_unchecked)
21828 + subl %eax, (v)
21829 + sbbl %edx, 4(v)
21830 RET_ENDP
21831 #undef v
21832
21833 @@ -96,6 +150,27 @@ BEGIN(sub_return)
21834 sbbl $0, %edx
21835 addl (v), %eax
21836 adcl 4(v), %edx
21837 +
21838 +#ifdef CONFIG_PAX_REFCOUNT
21839 + into
21840 +1234:
21841 + _ASM_EXTABLE(1234b, 2f)
21842 +#endif
21843 +
21844 + movl %eax, (v)
21845 + movl %edx, 4(v)
21846 +
21847 +#ifdef CONFIG_PAX_REFCOUNT
21848 +2:
21849 +#endif
21850 +
21851 +RET_ENDP
21852 +BEGIN(sub_return_unchecked)
21853 + negl %edx
21854 + negl %eax
21855 + sbbl $0, %edx
21856 + addl (v), %eax
21857 + adcl 4(v), %edx
21858 movl %eax, (v)
21859 movl %edx, 4(v)
21860 RET_ENDP
21861 @@ -105,6 +180,20 @@ RET_ENDP
21862 BEGIN(inc)
21863 addl $1, (v)
21864 adcl $0, 4(v)
21865 +
21866 +#ifdef CONFIG_PAX_REFCOUNT
21867 + jno 0f
21868 + subl $1, (v)
21869 + sbbl $0, 4(v)
21870 + int $4
21871 +0:
21872 + _ASM_EXTABLE(0b, 0b)
21873 +#endif
21874 +
21875 +RET_ENDP
21876 +BEGIN(inc_unchecked)
21877 + addl $1, (v)
21878 + adcl $0, 4(v)
21879 RET_ENDP
21880 #undef v
21881
21882 @@ -114,6 +203,26 @@ BEGIN(inc_return)
21883 movl 4(v), %edx
21884 addl $1, %eax
21885 adcl $0, %edx
21886 +
21887 +#ifdef CONFIG_PAX_REFCOUNT
21888 + into
21889 +1234:
21890 + _ASM_EXTABLE(1234b, 2f)
21891 +#endif
21892 +
21893 + movl %eax, (v)
21894 + movl %edx, 4(v)
21895 +
21896 +#ifdef CONFIG_PAX_REFCOUNT
21897 +2:
21898 +#endif
21899 +
21900 +RET_ENDP
21901 +BEGIN(inc_return_unchecked)
21902 + movl (v), %eax
21903 + movl 4(v), %edx
21904 + addl $1, %eax
21905 + adcl $0, %edx
21906 movl %eax, (v)
21907 movl %edx, 4(v)
21908 RET_ENDP
21909 @@ -123,6 +232,20 @@ RET_ENDP
21910 BEGIN(dec)
21911 subl $1, (v)
21912 sbbl $0, 4(v)
21913 +
21914 +#ifdef CONFIG_PAX_REFCOUNT
21915 + jno 0f
21916 + addl $1, (v)
21917 + adcl $0, 4(v)
21918 + int $4
21919 +0:
21920 + _ASM_EXTABLE(0b, 0b)
21921 +#endif
21922 +
21923 +RET_ENDP
21924 +BEGIN(dec_unchecked)
21925 + subl $1, (v)
21926 + sbbl $0, 4(v)
21927 RET_ENDP
21928 #undef v
21929
21930 @@ -132,6 +255,26 @@ BEGIN(dec_return)
21931 movl 4(v), %edx
21932 subl $1, %eax
21933 sbbl $0, %edx
21934 +
21935 +#ifdef CONFIG_PAX_REFCOUNT
21936 + into
21937 +1234:
21938 + _ASM_EXTABLE(1234b, 2f)
21939 +#endif
21940 +
21941 + movl %eax, (v)
21942 + movl %edx, 4(v)
21943 +
21944 +#ifdef CONFIG_PAX_REFCOUNT
21945 +2:
21946 +#endif
21947 +
21948 +RET_ENDP
21949 +BEGIN(dec_return_unchecked)
21950 + movl (v), %eax
21951 + movl 4(v), %edx
21952 + subl $1, %eax
21953 + sbbl $0, %edx
21954 movl %eax, (v)
21955 movl %edx, 4(v)
21956 RET_ENDP
21957 @@ -143,6 +286,13 @@ BEGIN(add_unless)
21958 adcl %edx, %edi
21959 addl (v), %eax
21960 adcl 4(v), %edx
21961 +
21962 +#ifdef CONFIG_PAX_REFCOUNT
21963 + into
21964 +1234:
21965 + _ASM_EXTABLE(1234b, 2f)
21966 +#endif
21967 +
21968 cmpl %eax, %ecx
21969 je 3f
21970 1:
21971 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
21972 1:
21973 addl $1, %eax
21974 adcl $0, %edx
21975 +
21976 +#ifdef CONFIG_PAX_REFCOUNT
21977 + into
21978 +1234:
21979 + _ASM_EXTABLE(1234b, 2f)
21980 +#endif
21981 +
21982 movl %eax, (v)
21983 movl %edx, 4(v)
21984 movl $1, %eax
21985 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
21986 movl 4(v), %edx
21987 subl $1, %eax
21988 sbbl $0, %edx
21989 +
21990 +#ifdef CONFIG_PAX_REFCOUNT
21991 + into
21992 +1234:
21993 + _ASM_EXTABLE(1234b, 1f)
21994 +#endif
21995 +
21996 js 1f
21997 movl %eax, (v)
21998 movl %edx, 4(v)
21999 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
22000 index f5cc9eb..51fa319 100644
22001 --- a/arch/x86/lib/atomic64_cx8_32.S
22002 +++ b/arch/x86/lib/atomic64_cx8_32.S
22003 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
22004 CFI_STARTPROC
22005
22006 read64 %ecx
22007 + pax_force_retaddr
22008 ret
22009 CFI_ENDPROC
22010 ENDPROC(atomic64_read_cx8)
22011
22012 +ENTRY(atomic64_read_unchecked_cx8)
22013 + CFI_STARTPROC
22014 +
22015 + read64 %ecx
22016 + pax_force_retaddr
22017 + ret
22018 + CFI_ENDPROC
22019 +ENDPROC(atomic64_read_unchecked_cx8)
22020 +
22021 ENTRY(atomic64_set_cx8)
22022 CFI_STARTPROC
22023
22024 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
22025 cmpxchg8b (%esi)
22026 jne 1b
22027
22028 + pax_force_retaddr
22029 ret
22030 CFI_ENDPROC
22031 ENDPROC(atomic64_set_cx8)
22032
22033 +ENTRY(atomic64_set_unchecked_cx8)
22034 + CFI_STARTPROC
22035 +
22036 +1:
22037 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
22038 + * are atomic on 586 and newer */
22039 + cmpxchg8b (%esi)
22040 + jne 1b
22041 +
22042 + pax_force_retaddr
22043 + ret
22044 + CFI_ENDPROC
22045 +ENDPROC(atomic64_set_unchecked_cx8)
22046 +
22047 ENTRY(atomic64_xchg_cx8)
22048 CFI_STARTPROC
22049
22050 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
22051 cmpxchg8b (%esi)
22052 jne 1b
22053
22054 + pax_force_retaddr
22055 ret
22056 CFI_ENDPROC
22057 ENDPROC(atomic64_xchg_cx8)
22058
22059 -.macro addsub_return func ins insc
22060 -ENTRY(atomic64_\func\()_return_cx8)
22061 +.macro addsub_return func ins insc unchecked=""
22062 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22063 CFI_STARTPROC
22064 SAVE ebp
22065 SAVE ebx
22066 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
22067 movl %edx, %ecx
22068 \ins\()l %esi, %ebx
22069 \insc\()l %edi, %ecx
22070 +
22071 +.ifb \unchecked
22072 +#ifdef CONFIG_PAX_REFCOUNT
22073 + into
22074 +2:
22075 + _ASM_EXTABLE(2b, 3f)
22076 +#endif
22077 +.endif
22078 +
22079 LOCK_PREFIX
22080 cmpxchg8b (%ebp)
22081 jne 1b
22082 -
22083 -10:
22084 movl %ebx, %eax
22085 movl %ecx, %edx
22086 +
22087 +.ifb \unchecked
22088 +#ifdef CONFIG_PAX_REFCOUNT
22089 +3:
22090 +#endif
22091 +.endif
22092 +
22093 RESTORE edi
22094 RESTORE esi
22095 RESTORE ebx
22096 RESTORE ebp
22097 + pax_force_retaddr
22098 ret
22099 CFI_ENDPROC
22100 -ENDPROC(atomic64_\func\()_return_cx8)
22101 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22102 .endm
22103
22104 addsub_return add add adc
22105 addsub_return sub sub sbb
22106 +addsub_return add add adc _unchecked
22107 +addsub_return sub sub sbb _unchecked
22108
22109 -.macro incdec_return func ins insc
22110 -ENTRY(atomic64_\func\()_return_cx8)
22111 +.macro incdec_return func ins insc unchecked=""
22112 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22113 CFI_STARTPROC
22114 SAVE ebx
22115
22116 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
22117 movl %edx, %ecx
22118 \ins\()l $1, %ebx
22119 \insc\()l $0, %ecx
22120 +
22121 +.ifb \unchecked
22122 +#ifdef CONFIG_PAX_REFCOUNT
22123 + into
22124 +2:
22125 + _ASM_EXTABLE(2b, 3f)
22126 +#endif
22127 +.endif
22128 +
22129 LOCK_PREFIX
22130 cmpxchg8b (%esi)
22131 jne 1b
22132
22133 -10:
22134 movl %ebx, %eax
22135 movl %ecx, %edx
22136 +
22137 +.ifb \unchecked
22138 +#ifdef CONFIG_PAX_REFCOUNT
22139 +3:
22140 +#endif
22141 +.endif
22142 +
22143 RESTORE ebx
22144 + pax_force_retaddr
22145 ret
22146 CFI_ENDPROC
22147 -ENDPROC(atomic64_\func\()_return_cx8)
22148 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22149 .endm
22150
22151 incdec_return inc add adc
22152 incdec_return dec sub sbb
22153 +incdec_return inc add adc _unchecked
22154 +incdec_return dec sub sbb _unchecked
22155
22156 ENTRY(atomic64_dec_if_positive_cx8)
22157 CFI_STARTPROC
22158 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
22159 movl %edx, %ecx
22160 subl $1, %ebx
22161 sbb $0, %ecx
22162 +
22163 +#ifdef CONFIG_PAX_REFCOUNT
22164 + into
22165 +1234:
22166 + _ASM_EXTABLE(1234b, 2f)
22167 +#endif
22168 +
22169 js 2f
22170 LOCK_PREFIX
22171 cmpxchg8b (%esi)
22172 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
22173 movl %ebx, %eax
22174 movl %ecx, %edx
22175 RESTORE ebx
22176 + pax_force_retaddr
22177 ret
22178 CFI_ENDPROC
22179 ENDPROC(atomic64_dec_if_positive_cx8)
22180 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
22181 movl %edx, %ecx
22182 addl %ebp, %ebx
22183 adcl %edi, %ecx
22184 +
22185 +#ifdef CONFIG_PAX_REFCOUNT
22186 + into
22187 +1234:
22188 + _ASM_EXTABLE(1234b, 3f)
22189 +#endif
22190 +
22191 LOCK_PREFIX
22192 cmpxchg8b (%esi)
22193 jne 1b
22194 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
22195 CFI_ADJUST_CFA_OFFSET -8
22196 RESTORE ebx
22197 RESTORE ebp
22198 + pax_force_retaddr
22199 ret
22200 4:
22201 cmpl %edx, 4(%esp)
22202 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
22203 xorl %ecx, %ecx
22204 addl $1, %ebx
22205 adcl %edx, %ecx
22206 +
22207 +#ifdef CONFIG_PAX_REFCOUNT
22208 + into
22209 +1234:
22210 + _ASM_EXTABLE(1234b, 3f)
22211 +#endif
22212 +
22213 LOCK_PREFIX
22214 cmpxchg8b (%esi)
22215 jne 1b
22216 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
22217 movl $1, %eax
22218 3:
22219 RESTORE ebx
22220 + pax_force_retaddr
22221 ret
22222 CFI_ENDPROC
22223 ENDPROC(atomic64_inc_not_zero_cx8)
22224 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22225 index 2af5df3..62b1a5a 100644
22226 --- a/arch/x86/lib/checksum_32.S
22227 +++ b/arch/x86/lib/checksum_32.S
22228 @@ -29,7 +29,8 @@
22229 #include <asm/dwarf2.h>
22230 #include <asm/errno.h>
22231 #include <asm/asm.h>
22232 -
22233 +#include <asm/segment.h>
22234 +
22235 /*
22236 * computes a partial checksum, e.g. for TCP/UDP fragments
22237 */
22238 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22239
22240 #define ARGBASE 16
22241 #define FP 12
22242 -
22243 -ENTRY(csum_partial_copy_generic)
22244 +
22245 +ENTRY(csum_partial_copy_generic_to_user)
22246 CFI_STARTPROC
22247 +
22248 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22249 + pushl_cfi %gs
22250 + popl_cfi %es
22251 + jmp csum_partial_copy_generic
22252 +#endif
22253 +
22254 +ENTRY(csum_partial_copy_generic_from_user)
22255 +
22256 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22257 + pushl_cfi %gs
22258 + popl_cfi %ds
22259 +#endif
22260 +
22261 +ENTRY(csum_partial_copy_generic)
22262 subl $4,%esp
22263 CFI_ADJUST_CFA_OFFSET 4
22264 pushl_cfi %edi
22265 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
22266 jmp 4f
22267 SRC(1: movw (%esi), %bx )
22268 addl $2, %esi
22269 -DST( movw %bx, (%edi) )
22270 +DST( movw %bx, %es:(%edi) )
22271 addl $2, %edi
22272 addw %bx, %ax
22273 adcl $0, %eax
22274 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
22275 SRC(1: movl (%esi), %ebx )
22276 SRC( movl 4(%esi), %edx )
22277 adcl %ebx, %eax
22278 -DST( movl %ebx, (%edi) )
22279 +DST( movl %ebx, %es:(%edi) )
22280 adcl %edx, %eax
22281 -DST( movl %edx, 4(%edi) )
22282 +DST( movl %edx, %es:4(%edi) )
22283
22284 SRC( movl 8(%esi), %ebx )
22285 SRC( movl 12(%esi), %edx )
22286 adcl %ebx, %eax
22287 -DST( movl %ebx, 8(%edi) )
22288 +DST( movl %ebx, %es:8(%edi) )
22289 adcl %edx, %eax
22290 -DST( movl %edx, 12(%edi) )
22291 +DST( movl %edx, %es:12(%edi) )
22292
22293 SRC( movl 16(%esi), %ebx )
22294 SRC( movl 20(%esi), %edx )
22295 adcl %ebx, %eax
22296 -DST( movl %ebx, 16(%edi) )
22297 +DST( movl %ebx, %es:16(%edi) )
22298 adcl %edx, %eax
22299 -DST( movl %edx, 20(%edi) )
22300 +DST( movl %edx, %es:20(%edi) )
22301
22302 SRC( movl 24(%esi), %ebx )
22303 SRC( movl 28(%esi), %edx )
22304 adcl %ebx, %eax
22305 -DST( movl %ebx, 24(%edi) )
22306 +DST( movl %ebx, %es:24(%edi) )
22307 adcl %edx, %eax
22308 -DST( movl %edx, 28(%edi) )
22309 +DST( movl %edx, %es:28(%edi) )
22310
22311 lea 32(%esi), %esi
22312 lea 32(%edi), %edi
22313 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
22314 shrl $2, %edx # This clears CF
22315 SRC(3: movl (%esi), %ebx )
22316 adcl %ebx, %eax
22317 -DST( movl %ebx, (%edi) )
22318 +DST( movl %ebx, %es:(%edi) )
22319 lea 4(%esi), %esi
22320 lea 4(%edi), %edi
22321 dec %edx
22322 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
22323 jb 5f
22324 SRC( movw (%esi), %cx )
22325 leal 2(%esi), %esi
22326 -DST( movw %cx, (%edi) )
22327 +DST( movw %cx, %es:(%edi) )
22328 leal 2(%edi), %edi
22329 je 6f
22330 shll $16,%ecx
22331 SRC(5: movb (%esi), %cl )
22332 -DST( movb %cl, (%edi) )
22333 +DST( movb %cl, %es:(%edi) )
22334 6: addl %ecx, %eax
22335 adcl $0, %eax
22336 7:
22337 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
22338
22339 6001:
22340 movl ARGBASE+20(%esp), %ebx # src_err_ptr
22341 - movl $-EFAULT, (%ebx)
22342 + movl $-EFAULT, %ss:(%ebx)
22343
22344 # zero the complete destination - computing the rest
22345 # is too much work
22346 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
22347
22348 6002:
22349 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22350 - movl $-EFAULT,(%ebx)
22351 + movl $-EFAULT,%ss:(%ebx)
22352 jmp 5000b
22353
22354 .previous
22355
22356 + pushl_cfi %ss
22357 + popl_cfi %ds
22358 + pushl_cfi %ss
22359 + popl_cfi %es
22360 popl_cfi %ebx
22361 CFI_RESTORE ebx
22362 popl_cfi %esi
22363 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
22364 popl_cfi %ecx # equivalent to addl $4,%esp
22365 ret
22366 CFI_ENDPROC
22367 -ENDPROC(csum_partial_copy_generic)
22368 +ENDPROC(csum_partial_copy_generic_to_user)
22369
22370 #else
22371
22372 /* Version for PentiumII/PPro */
22373
22374 #define ROUND1(x) \
22375 + nop; nop; nop; \
22376 SRC(movl x(%esi), %ebx ) ; \
22377 addl %ebx, %eax ; \
22378 - DST(movl %ebx, x(%edi) ) ;
22379 + DST(movl %ebx, %es:x(%edi)) ;
22380
22381 #define ROUND(x) \
22382 + nop; nop; nop; \
22383 SRC(movl x(%esi), %ebx ) ; \
22384 adcl %ebx, %eax ; \
22385 - DST(movl %ebx, x(%edi) ) ;
22386 + DST(movl %ebx, %es:x(%edi)) ;
22387
22388 #define ARGBASE 12
22389 -
22390 -ENTRY(csum_partial_copy_generic)
22391 +
22392 +ENTRY(csum_partial_copy_generic_to_user)
22393 CFI_STARTPROC
22394 +
22395 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22396 + pushl_cfi %gs
22397 + popl_cfi %es
22398 + jmp csum_partial_copy_generic
22399 +#endif
22400 +
22401 +ENTRY(csum_partial_copy_generic_from_user)
22402 +
22403 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22404 + pushl_cfi %gs
22405 + popl_cfi %ds
22406 +#endif
22407 +
22408 +ENTRY(csum_partial_copy_generic)
22409 pushl_cfi %ebx
22410 CFI_REL_OFFSET ebx, 0
22411 pushl_cfi %edi
22412 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
22413 subl %ebx, %edi
22414 lea -1(%esi),%edx
22415 andl $-32,%edx
22416 - lea 3f(%ebx,%ebx), %ebx
22417 + lea 3f(%ebx,%ebx,2), %ebx
22418 testl %esi, %esi
22419 jmp *%ebx
22420 1: addl $64,%esi
22421 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
22422 jb 5f
22423 SRC( movw (%esi), %dx )
22424 leal 2(%esi), %esi
22425 -DST( movw %dx, (%edi) )
22426 +DST( movw %dx, %es:(%edi) )
22427 leal 2(%edi), %edi
22428 je 6f
22429 shll $16,%edx
22430 5:
22431 SRC( movb (%esi), %dl )
22432 -DST( movb %dl, (%edi) )
22433 +DST( movb %dl, %es:(%edi) )
22434 6: addl %edx, %eax
22435 adcl $0, %eax
22436 7:
22437 .section .fixup, "ax"
22438 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22439 - movl $-EFAULT, (%ebx)
22440 + movl $-EFAULT, %ss:(%ebx)
22441 # zero the complete destination (computing the rest is too much work)
22442 movl ARGBASE+8(%esp),%edi # dst
22443 movl ARGBASE+12(%esp),%ecx # len
22444 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
22445 rep; stosb
22446 jmp 7b
22447 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22448 - movl $-EFAULT, (%ebx)
22449 + movl $-EFAULT, %ss:(%ebx)
22450 jmp 7b
22451 .previous
22452
22453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22454 + pushl_cfi %ss
22455 + popl_cfi %ds
22456 + pushl_cfi %ss
22457 + popl_cfi %es
22458 +#endif
22459 +
22460 popl_cfi %esi
22461 CFI_RESTORE esi
22462 popl_cfi %edi
22463 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
22464 CFI_RESTORE ebx
22465 ret
22466 CFI_ENDPROC
22467 -ENDPROC(csum_partial_copy_generic)
22468 +ENDPROC(csum_partial_copy_generic_to_user)
22469
22470 #undef ROUND
22471 #undef ROUND1
22472 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22473 index f2145cf..cea889d 100644
22474 --- a/arch/x86/lib/clear_page_64.S
22475 +++ b/arch/x86/lib/clear_page_64.S
22476 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
22477 movl $4096/8,%ecx
22478 xorl %eax,%eax
22479 rep stosq
22480 + pax_force_retaddr
22481 ret
22482 CFI_ENDPROC
22483 ENDPROC(clear_page_c)
22484 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
22485 movl $4096,%ecx
22486 xorl %eax,%eax
22487 rep stosb
22488 + pax_force_retaddr
22489 ret
22490 CFI_ENDPROC
22491 ENDPROC(clear_page_c_e)
22492 @@ -43,6 +45,7 @@ ENTRY(clear_page)
22493 leaq 64(%rdi),%rdi
22494 jnz .Lloop
22495 nop
22496 + pax_force_retaddr
22497 ret
22498 CFI_ENDPROC
22499 .Lclear_page_end:
22500 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
22501
22502 #include <asm/cpufeature.h>
22503
22504 - .section .altinstr_replacement,"ax"
22505 + .section .altinstr_replacement,"a"
22506 1: .byte 0xeb /* jmp <disp8> */
22507 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22508 2: .byte 0xeb /* jmp <disp8> */
22509 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
22510 index 1e572c5..2a162cd 100644
22511 --- a/arch/x86/lib/cmpxchg16b_emu.S
22512 +++ b/arch/x86/lib/cmpxchg16b_emu.S
22513 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
22514
22515 popf
22516 mov $1, %al
22517 + pax_force_retaddr
22518 ret
22519
22520 not_same:
22521 popf
22522 xor %al,%al
22523 + pax_force_retaddr
22524 ret
22525
22526 CFI_ENDPROC
22527 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22528 index 6b34d04..dccb07f 100644
22529 --- a/arch/x86/lib/copy_page_64.S
22530 +++ b/arch/x86/lib/copy_page_64.S
22531 @@ -9,6 +9,7 @@ copy_page_c:
22532 CFI_STARTPROC
22533 movl $4096/8,%ecx
22534 rep movsq
22535 + pax_force_retaddr
22536 ret
22537 CFI_ENDPROC
22538 ENDPROC(copy_page_c)
22539 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
22540
22541 ENTRY(copy_page)
22542 CFI_STARTPROC
22543 - subq $2*8,%rsp
22544 - CFI_ADJUST_CFA_OFFSET 2*8
22545 + subq $3*8,%rsp
22546 + CFI_ADJUST_CFA_OFFSET 3*8
22547 movq %rbx,(%rsp)
22548 CFI_REL_OFFSET rbx, 0
22549 movq %r12,1*8(%rsp)
22550 CFI_REL_OFFSET r12, 1*8
22551 + movq %r13,2*8(%rsp)
22552 + CFI_REL_OFFSET r13, 2*8
22553
22554 movl $(4096/64)-5,%ecx
22555 .p2align 4
22556 @@ -37,7 +40,7 @@ ENTRY(copy_page)
22557 movq 16 (%rsi), %rdx
22558 movq 24 (%rsi), %r8
22559 movq 32 (%rsi), %r9
22560 - movq 40 (%rsi), %r10
22561 + movq 40 (%rsi), %r13
22562 movq 48 (%rsi), %r11
22563 movq 56 (%rsi), %r12
22564
22565 @@ -48,7 +51,7 @@ ENTRY(copy_page)
22566 movq %rdx, 16 (%rdi)
22567 movq %r8, 24 (%rdi)
22568 movq %r9, 32 (%rdi)
22569 - movq %r10, 40 (%rdi)
22570 + movq %r13, 40 (%rdi)
22571 movq %r11, 48 (%rdi)
22572 movq %r12, 56 (%rdi)
22573
22574 @@ -67,7 +70,7 @@ ENTRY(copy_page)
22575 movq 16 (%rsi), %rdx
22576 movq 24 (%rsi), %r8
22577 movq 32 (%rsi), %r9
22578 - movq 40 (%rsi), %r10
22579 + movq 40 (%rsi), %r13
22580 movq 48 (%rsi), %r11
22581 movq 56 (%rsi), %r12
22582
22583 @@ -76,7 +79,7 @@ ENTRY(copy_page)
22584 movq %rdx, 16 (%rdi)
22585 movq %r8, 24 (%rdi)
22586 movq %r9, 32 (%rdi)
22587 - movq %r10, 40 (%rdi)
22588 + movq %r13, 40 (%rdi)
22589 movq %r11, 48 (%rdi)
22590 movq %r12, 56 (%rdi)
22591
22592 @@ -89,8 +92,11 @@ ENTRY(copy_page)
22593 CFI_RESTORE rbx
22594 movq 1*8(%rsp),%r12
22595 CFI_RESTORE r12
22596 - addq $2*8,%rsp
22597 - CFI_ADJUST_CFA_OFFSET -2*8
22598 + movq 2*8(%rsp),%r13
22599 + CFI_RESTORE r13
22600 + addq $3*8,%rsp
22601 + CFI_ADJUST_CFA_OFFSET -3*8
22602 + pax_force_retaddr
22603 ret
22604 .Lcopy_page_end:
22605 CFI_ENDPROC
22606 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
22607
22608 #include <asm/cpufeature.h>
22609
22610 - .section .altinstr_replacement,"ax"
22611 + .section .altinstr_replacement,"a"
22612 1: .byte 0xeb /* jmp <disp8> */
22613 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22614 2:
22615 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22616 index a30ca15..d25fab6 100644
22617 --- a/arch/x86/lib/copy_user_64.S
22618 +++ b/arch/x86/lib/copy_user_64.S
22619 @@ -18,6 +18,7 @@
22620 #include <asm/alternative-asm.h>
22621 #include <asm/asm.h>
22622 #include <asm/smap.h>
22623 +#include <asm/pgtable.h>
22624
22625 /*
22626 * By placing feature2 after feature1 in altinstructions section, we logically
22627 @@ -31,7 +32,7 @@
22628 .byte 0xe9 /* 32bit jump */
22629 .long \orig-1f /* by default jump to orig */
22630 1:
22631 - .section .altinstr_replacement,"ax"
22632 + .section .altinstr_replacement,"a"
22633 2: .byte 0xe9 /* near jump with 32bit immediate */
22634 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
22635 3: .byte 0xe9 /* near jump with 32bit immediate */
22636 @@ -70,47 +71,20 @@
22637 #endif
22638 .endm
22639
22640 -/* Standard copy_to_user with segment limit checking */
22641 -ENTRY(_copy_to_user)
22642 - CFI_STARTPROC
22643 - GET_THREAD_INFO(%rax)
22644 - movq %rdi,%rcx
22645 - addq %rdx,%rcx
22646 - jc bad_to_user
22647 - cmpq TI_addr_limit(%rax),%rcx
22648 - ja bad_to_user
22649 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
22650 - copy_user_generic_unrolled,copy_user_generic_string, \
22651 - copy_user_enhanced_fast_string
22652 - CFI_ENDPROC
22653 -ENDPROC(_copy_to_user)
22654 -
22655 -/* Standard copy_from_user with segment limit checking */
22656 -ENTRY(_copy_from_user)
22657 - CFI_STARTPROC
22658 - GET_THREAD_INFO(%rax)
22659 - movq %rsi,%rcx
22660 - addq %rdx,%rcx
22661 - jc bad_from_user
22662 - cmpq TI_addr_limit(%rax),%rcx
22663 - ja bad_from_user
22664 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
22665 - copy_user_generic_unrolled,copy_user_generic_string, \
22666 - copy_user_enhanced_fast_string
22667 - CFI_ENDPROC
22668 -ENDPROC(_copy_from_user)
22669 -
22670 .section .fixup,"ax"
22671 /* must zero dest */
22672 ENTRY(bad_from_user)
22673 bad_from_user:
22674 CFI_STARTPROC
22675 + testl %edx,%edx
22676 + js bad_to_user
22677 movl %edx,%ecx
22678 xorl %eax,%eax
22679 rep
22680 stosb
22681 bad_to_user:
22682 movl %edx,%eax
22683 + pax_force_retaddr
22684 ret
22685 CFI_ENDPROC
22686 ENDPROC(bad_from_user)
22687 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22688 jz 17f
22689 1: movq (%rsi),%r8
22690 2: movq 1*8(%rsi),%r9
22691 -3: movq 2*8(%rsi),%r10
22692 +3: movq 2*8(%rsi),%rax
22693 4: movq 3*8(%rsi),%r11
22694 5: movq %r8,(%rdi)
22695 6: movq %r9,1*8(%rdi)
22696 -7: movq %r10,2*8(%rdi)
22697 +7: movq %rax,2*8(%rdi)
22698 8: movq %r11,3*8(%rdi)
22699 9: movq 4*8(%rsi),%r8
22700 10: movq 5*8(%rsi),%r9
22701 -11: movq 6*8(%rsi),%r10
22702 +11: movq 6*8(%rsi),%rax
22703 12: movq 7*8(%rsi),%r11
22704 13: movq %r8,4*8(%rdi)
22705 14: movq %r9,5*8(%rdi)
22706 -15: movq %r10,6*8(%rdi)
22707 +15: movq %rax,6*8(%rdi)
22708 16: movq %r11,7*8(%rdi)
22709 leaq 64(%rsi),%rsi
22710 leaq 64(%rdi),%rdi
22711 @@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
22712 jnz 21b
22713 23: xor %eax,%eax
22714 ASM_CLAC
22715 + pax_force_retaddr
22716 ret
22717
22718 .section .fixup,"ax"
22719 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
22720 movsb
22721 4: xorl %eax,%eax
22722 ASM_CLAC
22723 + pax_force_retaddr
22724 ret
22725
22726 .section .fixup,"ax"
22727 @@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
22728 movsb
22729 2: xorl %eax,%eax
22730 ASM_CLAC
22731 + pax_force_retaddr
22732 ret
22733
22734 .section .fixup,"ax"
22735 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22736 index 6a4f43c..f5f9e26 100644
22737 --- a/arch/x86/lib/copy_user_nocache_64.S
22738 +++ b/arch/x86/lib/copy_user_nocache_64.S
22739 @@ -8,6 +8,7 @@
22740
22741 #include <linux/linkage.h>
22742 #include <asm/dwarf2.h>
22743 +#include <asm/alternative-asm.h>
22744
22745 #define FIX_ALIGNMENT 1
22746
22747 @@ -16,6 +17,7 @@
22748 #include <asm/thread_info.h>
22749 #include <asm/asm.h>
22750 #include <asm/smap.h>
22751 +#include <asm/pgtable.h>
22752
22753 .macro ALIGN_DESTINATION
22754 #ifdef FIX_ALIGNMENT
22755 @@ -49,6 +51,15 @@
22756 */
22757 ENTRY(__copy_user_nocache)
22758 CFI_STARTPROC
22759 +
22760 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22761 + mov $PAX_USER_SHADOW_BASE,%rcx
22762 + cmp %rcx,%rsi
22763 + jae 1f
22764 + add %rcx,%rsi
22765 +1:
22766 +#endif
22767 +
22768 ASM_STAC
22769 cmpl $8,%edx
22770 jb 20f /* less then 8 bytes, go to byte copy loop */
22771 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22772 jz 17f
22773 1: movq (%rsi),%r8
22774 2: movq 1*8(%rsi),%r9
22775 -3: movq 2*8(%rsi),%r10
22776 +3: movq 2*8(%rsi),%rax
22777 4: movq 3*8(%rsi),%r11
22778 5: movnti %r8,(%rdi)
22779 6: movnti %r9,1*8(%rdi)
22780 -7: movnti %r10,2*8(%rdi)
22781 +7: movnti %rax,2*8(%rdi)
22782 8: movnti %r11,3*8(%rdi)
22783 9: movq 4*8(%rsi),%r8
22784 10: movq 5*8(%rsi),%r9
22785 -11: movq 6*8(%rsi),%r10
22786 +11: movq 6*8(%rsi),%rax
22787 12: movq 7*8(%rsi),%r11
22788 13: movnti %r8,4*8(%rdi)
22789 14: movnti %r9,5*8(%rdi)
22790 -15: movnti %r10,6*8(%rdi)
22791 +15: movnti %rax,6*8(%rdi)
22792 16: movnti %r11,7*8(%rdi)
22793 leaq 64(%rsi),%rsi
22794 leaq 64(%rdi),%rdi
22795 @@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
22796 23: xorl %eax,%eax
22797 ASM_CLAC
22798 sfence
22799 + pax_force_retaddr
22800 ret
22801
22802 .section .fixup,"ax"
22803 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22804 index 2419d5f..953ee51 100644
22805 --- a/arch/x86/lib/csum-copy_64.S
22806 +++ b/arch/x86/lib/csum-copy_64.S
22807 @@ -9,6 +9,7 @@
22808 #include <asm/dwarf2.h>
22809 #include <asm/errno.h>
22810 #include <asm/asm.h>
22811 +#include <asm/alternative-asm.h>
22812
22813 /*
22814 * Checksum copy with exception handling.
22815 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
22816 CFI_RESTORE rbp
22817 addq $7*8, %rsp
22818 CFI_ADJUST_CFA_OFFSET -7*8
22819 + pax_force_retaddr 0, 1
22820 ret
22821 CFI_RESTORE_STATE
22822
22823 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22824 index 25b7ae8..169fafc 100644
22825 --- a/arch/x86/lib/csum-wrappers_64.c
22826 +++ b/arch/x86/lib/csum-wrappers_64.c
22827 @@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22828 len -= 2;
22829 }
22830 }
22831 - isum = csum_partial_copy_generic((__force const void *)src,
22832 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
22833 dst, len, isum, errp, NULL);
22834 if (unlikely(*errp))
22835 goto out_err;
22836 @@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22837 }
22838
22839 *errp = 0;
22840 - return csum_partial_copy_generic(src, (void __force *)dst,
22841 + return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
22842 len, isum, NULL, errp);
22843 }
22844 EXPORT_SYMBOL(csum_partial_copy_to_user);
22845 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22846 index 156b9c8..b144132 100644
22847 --- a/arch/x86/lib/getuser.S
22848 +++ b/arch/x86/lib/getuser.S
22849 @@ -34,17 +34,40 @@
22850 #include <asm/thread_info.h>
22851 #include <asm/asm.h>
22852 #include <asm/smap.h>
22853 +#include <asm/segment.h>
22854 +#include <asm/pgtable.h>
22855 +#include <asm/alternative-asm.h>
22856 +
22857 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22858 +#define __copyuser_seg gs;
22859 +#else
22860 +#define __copyuser_seg
22861 +#endif
22862
22863 .text
22864 ENTRY(__get_user_1)
22865 CFI_STARTPROC
22866 +
22867 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22868 GET_THREAD_INFO(%_ASM_DX)
22869 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22870 jae bad_get_user
22871 ASM_STAC
22872 -1: movzb (%_ASM_AX),%edx
22873 +
22874 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22875 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22876 + cmp %_ASM_DX,%_ASM_AX
22877 + jae 1234f
22878 + add %_ASM_DX,%_ASM_AX
22879 +1234:
22880 +#endif
22881 +
22882 +#endif
22883 +
22884 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22885 xor %eax,%eax
22886 ASM_CLAC
22887 + pax_force_retaddr
22888 ret
22889 CFI_ENDPROC
22890 ENDPROC(__get_user_1)
22891 @@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
22892 ENTRY(__get_user_2)
22893 CFI_STARTPROC
22894 add $1,%_ASM_AX
22895 +
22896 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22897 jc bad_get_user
22898 GET_THREAD_INFO(%_ASM_DX)
22899 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22900 jae bad_get_user
22901 ASM_STAC
22902 -2: movzwl -1(%_ASM_AX),%edx
22903 +
22904 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22905 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22906 + cmp %_ASM_DX,%_ASM_AX
22907 + jae 1234f
22908 + add %_ASM_DX,%_ASM_AX
22909 +1234:
22910 +#endif
22911 +
22912 +#endif
22913 +
22914 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22915 xor %eax,%eax
22916 ASM_CLAC
22917 + pax_force_retaddr
22918 ret
22919 CFI_ENDPROC
22920 ENDPROC(__get_user_2)
22921 @@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
22922 ENTRY(__get_user_4)
22923 CFI_STARTPROC
22924 add $3,%_ASM_AX
22925 +
22926 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22927 jc bad_get_user
22928 GET_THREAD_INFO(%_ASM_DX)
22929 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22930 jae bad_get_user
22931 ASM_STAC
22932 -3: mov -3(%_ASM_AX),%edx
22933 +
22934 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22935 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22936 + cmp %_ASM_DX,%_ASM_AX
22937 + jae 1234f
22938 + add %_ASM_DX,%_ASM_AX
22939 +1234:
22940 +#endif
22941 +
22942 +#endif
22943 +
22944 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22945 xor %eax,%eax
22946 ASM_CLAC
22947 + pax_force_retaddr
22948 ret
22949 CFI_ENDPROC
22950 ENDPROC(__get_user_4)
22951 @@ -87,10 +138,20 @@ ENTRY(__get_user_8)
22952 GET_THREAD_INFO(%_ASM_DX)
22953 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22954 jae bad_get_user
22955 +
22956 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22957 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22958 + cmp %_ASM_DX,%_ASM_AX
22959 + jae 1234f
22960 + add %_ASM_DX,%_ASM_AX
22961 +1234:
22962 +#endif
22963 +
22964 ASM_STAC
22965 4: movq -7(%_ASM_AX),%_ASM_DX
22966 xor %eax,%eax
22967 ASM_CLAC
22968 + pax_force_retaddr
22969 ret
22970 CFI_ENDPROC
22971 ENDPROC(__get_user_8)
22972 @@ -101,6 +162,7 @@ bad_get_user:
22973 xor %edx,%edx
22974 mov $(-EFAULT),%_ASM_AX
22975 ASM_CLAC
22976 + pax_force_retaddr
22977 ret
22978 CFI_ENDPROC
22979 END(bad_get_user)
22980 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
22981 index 54fcffe..7be149e 100644
22982 --- a/arch/x86/lib/insn.c
22983 +++ b/arch/x86/lib/insn.c
22984 @@ -20,8 +20,10 @@
22985
22986 #ifdef __KERNEL__
22987 #include <linux/string.h>
22988 +#include <asm/pgtable_types.h>
22989 #else
22990 #include <string.h>
22991 +#define ktla_ktva(addr) addr
22992 #endif
22993 #include <asm/inat.h>
22994 #include <asm/insn.h>
22995 @@ -53,8 +55,8 @@
22996 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
22997 {
22998 memset(insn, 0, sizeof(*insn));
22999 - insn->kaddr = kaddr;
23000 - insn->next_byte = kaddr;
23001 + insn->kaddr = ktla_ktva(kaddr);
23002 + insn->next_byte = ktla_ktva(kaddr);
23003 insn->x86_64 = x86_64 ? 1 : 0;
23004 insn->opnd_bytes = 4;
23005 if (x86_64)
23006 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23007 index 05a95e7..326f2fa 100644
23008 --- a/arch/x86/lib/iomap_copy_64.S
23009 +++ b/arch/x86/lib/iomap_copy_64.S
23010 @@ -17,6 +17,7 @@
23011
23012 #include <linux/linkage.h>
23013 #include <asm/dwarf2.h>
23014 +#include <asm/alternative-asm.h>
23015
23016 /*
23017 * override generic version in lib/iomap_copy.c
23018 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23019 CFI_STARTPROC
23020 movl %edx,%ecx
23021 rep movsd
23022 + pax_force_retaddr
23023 ret
23024 CFI_ENDPROC
23025 ENDPROC(__iowrite32_copy)
23026 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23027 index 1c273be..da9cc0e 100644
23028 --- a/arch/x86/lib/memcpy_64.S
23029 +++ b/arch/x86/lib/memcpy_64.S
23030 @@ -33,6 +33,7 @@
23031 rep movsq
23032 movl %edx, %ecx
23033 rep movsb
23034 + pax_force_retaddr
23035 ret
23036 .Lmemcpy_e:
23037 .previous
23038 @@ -49,6 +50,7 @@
23039 movq %rdi, %rax
23040 movq %rdx, %rcx
23041 rep movsb
23042 + pax_force_retaddr
23043 ret
23044 .Lmemcpy_e_e:
23045 .previous
23046 @@ -76,13 +78,13 @@ ENTRY(memcpy)
23047 */
23048 movq 0*8(%rsi), %r8
23049 movq 1*8(%rsi), %r9
23050 - movq 2*8(%rsi), %r10
23051 + movq 2*8(%rsi), %rcx
23052 movq 3*8(%rsi), %r11
23053 leaq 4*8(%rsi), %rsi
23054
23055 movq %r8, 0*8(%rdi)
23056 movq %r9, 1*8(%rdi)
23057 - movq %r10, 2*8(%rdi)
23058 + movq %rcx, 2*8(%rdi)
23059 movq %r11, 3*8(%rdi)
23060 leaq 4*8(%rdi), %rdi
23061 jae .Lcopy_forward_loop
23062 @@ -105,12 +107,12 @@ ENTRY(memcpy)
23063 subq $0x20, %rdx
23064 movq -1*8(%rsi), %r8
23065 movq -2*8(%rsi), %r9
23066 - movq -3*8(%rsi), %r10
23067 + movq -3*8(%rsi), %rcx
23068 movq -4*8(%rsi), %r11
23069 leaq -4*8(%rsi), %rsi
23070 movq %r8, -1*8(%rdi)
23071 movq %r9, -2*8(%rdi)
23072 - movq %r10, -3*8(%rdi)
23073 + movq %rcx, -3*8(%rdi)
23074 movq %r11, -4*8(%rdi)
23075 leaq -4*8(%rdi), %rdi
23076 jae .Lcopy_backward_loop
23077 @@ -130,12 +132,13 @@ ENTRY(memcpy)
23078 */
23079 movq 0*8(%rsi), %r8
23080 movq 1*8(%rsi), %r9
23081 - movq -2*8(%rsi, %rdx), %r10
23082 + movq -2*8(%rsi, %rdx), %rcx
23083 movq -1*8(%rsi, %rdx), %r11
23084 movq %r8, 0*8(%rdi)
23085 movq %r9, 1*8(%rdi)
23086 - movq %r10, -2*8(%rdi, %rdx)
23087 + movq %rcx, -2*8(%rdi, %rdx)
23088 movq %r11, -1*8(%rdi, %rdx)
23089 + pax_force_retaddr
23090 retq
23091 .p2align 4
23092 .Lless_16bytes:
23093 @@ -148,6 +151,7 @@ ENTRY(memcpy)
23094 movq -1*8(%rsi, %rdx), %r9
23095 movq %r8, 0*8(%rdi)
23096 movq %r9, -1*8(%rdi, %rdx)
23097 + pax_force_retaddr
23098 retq
23099 .p2align 4
23100 .Lless_8bytes:
23101 @@ -161,6 +165,7 @@ ENTRY(memcpy)
23102 movl -4(%rsi, %rdx), %r8d
23103 movl %ecx, (%rdi)
23104 movl %r8d, -4(%rdi, %rdx)
23105 + pax_force_retaddr
23106 retq
23107 .p2align 4
23108 .Lless_3bytes:
23109 @@ -179,6 +184,7 @@ ENTRY(memcpy)
23110 movb %cl, (%rdi)
23111
23112 .Lend:
23113 + pax_force_retaddr
23114 retq
23115 CFI_ENDPROC
23116 ENDPROC(memcpy)
23117 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
23118 index ee16461..c39c199 100644
23119 --- a/arch/x86/lib/memmove_64.S
23120 +++ b/arch/x86/lib/memmove_64.S
23121 @@ -61,13 +61,13 @@ ENTRY(memmove)
23122 5:
23123 sub $0x20, %rdx
23124 movq 0*8(%rsi), %r11
23125 - movq 1*8(%rsi), %r10
23126 + movq 1*8(%rsi), %rcx
23127 movq 2*8(%rsi), %r9
23128 movq 3*8(%rsi), %r8
23129 leaq 4*8(%rsi), %rsi
23130
23131 movq %r11, 0*8(%rdi)
23132 - movq %r10, 1*8(%rdi)
23133 + movq %rcx, 1*8(%rdi)
23134 movq %r9, 2*8(%rdi)
23135 movq %r8, 3*8(%rdi)
23136 leaq 4*8(%rdi), %rdi
23137 @@ -81,10 +81,10 @@ ENTRY(memmove)
23138 4:
23139 movq %rdx, %rcx
23140 movq -8(%rsi, %rdx), %r11
23141 - lea -8(%rdi, %rdx), %r10
23142 + lea -8(%rdi, %rdx), %r9
23143 shrq $3, %rcx
23144 rep movsq
23145 - movq %r11, (%r10)
23146 + movq %r11, (%r9)
23147 jmp 13f
23148 .Lmemmove_end_forward:
23149
23150 @@ -95,14 +95,14 @@ ENTRY(memmove)
23151 7:
23152 movq %rdx, %rcx
23153 movq (%rsi), %r11
23154 - movq %rdi, %r10
23155 + movq %rdi, %r9
23156 leaq -8(%rsi, %rdx), %rsi
23157 leaq -8(%rdi, %rdx), %rdi
23158 shrq $3, %rcx
23159 std
23160 rep movsq
23161 cld
23162 - movq %r11, (%r10)
23163 + movq %r11, (%r9)
23164 jmp 13f
23165
23166 /*
23167 @@ -127,13 +127,13 @@ ENTRY(memmove)
23168 8:
23169 subq $0x20, %rdx
23170 movq -1*8(%rsi), %r11
23171 - movq -2*8(%rsi), %r10
23172 + movq -2*8(%rsi), %rcx
23173 movq -3*8(%rsi), %r9
23174 movq -4*8(%rsi), %r8
23175 leaq -4*8(%rsi), %rsi
23176
23177 movq %r11, -1*8(%rdi)
23178 - movq %r10, -2*8(%rdi)
23179 + movq %rcx, -2*8(%rdi)
23180 movq %r9, -3*8(%rdi)
23181 movq %r8, -4*8(%rdi)
23182 leaq -4*8(%rdi), %rdi
23183 @@ -151,11 +151,11 @@ ENTRY(memmove)
23184 * Move data from 16 bytes to 31 bytes.
23185 */
23186 movq 0*8(%rsi), %r11
23187 - movq 1*8(%rsi), %r10
23188 + movq 1*8(%rsi), %rcx
23189 movq -2*8(%rsi, %rdx), %r9
23190 movq -1*8(%rsi, %rdx), %r8
23191 movq %r11, 0*8(%rdi)
23192 - movq %r10, 1*8(%rdi)
23193 + movq %rcx, 1*8(%rdi)
23194 movq %r9, -2*8(%rdi, %rdx)
23195 movq %r8, -1*8(%rdi, %rdx)
23196 jmp 13f
23197 @@ -167,9 +167,9 @@ ENTRY(memmove)
23198 * Move data from 8 bytes to 15 bytes.
23199 */
23200 movq 0*8(%rsi), %r11
23201 - movq -1*8(%rsi, %rdx), %r10
23202 + movq -1*8(%rsi, %rdx), %r9
23203 movq %r11, 0*8(%rdi)
23204 - movq %r10, -1*8(%rdi, %rdx)
23205 + movq %r9, -1*8(%rdi, %rdx)
23206 jmp 13f
23207 10:
23208 cmpq $4, %rdx
23209 @@ -178,9 +178,9 @@ ENTRY(memmove)
23210 * Move data from 4 bytes to 7 bytes.
23211 */
23212 movl (%rsi), %r11d
23213 - movl -4(%rsi, %rdx), %r10d
23214 + movl -4(%rsi, %rdx), %r9d
23215 movl %r11d, (%rdi)
23216 - movl %r10d, -4(%rdi, %rdx)
23217 + movl %r9d, -4(%rdi, %rdx)
23218 jmp 13f
23219 11:
23220 cmp $2, %rdx
23221 @@ -189,9 +189,9 @@ ENTRY(memmove)
23222 * Move data from 2 bytes to 3 bytes.
23223 */
23224 movw (%rsi), %r11w
23225 - movw -2(%rsi, %rdx), %r10w
23226 + movw -2(%rsi, %rdx), %r9w
23227 movw %r11w, (%rdi)
23228 - movw %r10w, -2(%rdi, %rdx)
23229 + movw %r9w, -2(%rdi, %rdx)
23230 jmp 13f
23231 12:
23232 cmp $1, %rdx
23233 @@ -202,6 +202,7 @@ ENTRY(memmove)
23234 movb (%rsi), %r11b
23235 movb %r11b, (%rdi)
23236 13:
23237 + pax_force_retaddr
23238 retq
23239 CFI_ENDPROC
23240
23241 @@ -210,6 +211,7 @@ ENTRY(memmove)
23242 /* Forward moving data. */
23243 movq %rdx, %rcx
23244 rep movsb
23245 + pax_force_retaddr
23246 retq
23247 .Lmemmove_end_forward_efs:
23248 .previous
23249 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23250 index 2dcb380..963660a 100644
23251 --- a/arch/x86/lib/memset_64.S
23252 +++ b/arch/x86/lib/memset_64.S
23253 @@ -30,6 +30,7 @@
23254 movl %edx,%ecx
23255 rep stosb
23256 movq %r9,%rax
23257 + pax_force_retaddr
23258 ret
23259 .Lmemset_e:
23260 .previous
23261 @@ -52,6 +53,7 @@
23262 movq %rdx,%rcx
23263 rep stosb
23264 movq %r9,%rax
23265 + pax_force_retaddr
23266 ret
23267 .Lmemset_e_e:
23268 .previous
23269 @@ -59,7 +61,7 @@
23270 ENTRY(memset)
23271 ENTRY(__memset)
23272 CFI_STARTPROC
23273 - movq %rdi,%r10
23274 + movq %rdi,%r11
23275
23276 /* expand byte value */
23277 movzbl %sil,%ecx
23278 @@ -117,7 +119,8 @@ ENTRY(__memset)
23279 jnz .Lloop_1
23280
23281 .Lende:
23282 - movq %r10,%rax
23283 + movq %r11,%rax
23284 + pax_force_retaddr
23285 ret
23286
23287 CFI_RESTORE_STATE
23288 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23289 index c9f2d9b..e7fd2c0 100644
23290 --- a/arch/x86/lib/mmx_32.c
23291 +++ b/arch/x86/lib/mmx_32.c
23292 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23293 {
23294 void *p;
23295 int i;
23296 + unsigned long cr0;
23297
23298 if (unlikely(in_interrupt()))
23299 return __memcpy(to, from, len);
23300 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23301 kernel_fpu_begin();
23302
23303 __asm__ __volatile__ (
23304 - "1: prefetch (%0)\n" /* This set is 28 bytes */
23305 - " prefetch 64(%0)\n"
23306 - " prefetch 128(%0)\n"
23307 - " prefetch 192(%0)\n"
23308 - " prefetch 256(%0)\n"
23309 + "1: prefetch (%1)\n" /* This set is 28 bytes */
23310 + " prefetch 64(%1)\n"
23311 + " prefetch 128(%1)\n"
23312 + " prefetch 192(%1)\n"
23313 + " prefetch 256(%1)\n"
23314 "2: \n"
23315 ".section .fixup, \"ax\"\n"
23316 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23317 + "3: \n"
23318 +
23319 +#ifdef CONFIG_PAX_KERNEXEC
23320 + " movl %%cr0, %0\n"
23321 + " movl %0, %%eax\n"
23322 + " andl $0xFFFEFFFF, %%eax\n"
23323 + " movl %%eax, %%cr0\n"
23324 +#endif
23325 +
23326 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23327 +
23328 +#ifdef CONFIG_PAX_KERNEXEC
23329 + " movl %0, %%cr0\n"
23330 +#endif
23331 +
23332 " jmp 2b\n"
23333 ".previous\n"
23334 _ASM_EXTABLE(1b, 3b)
23335 - : : "r" (from));
23336 + : "=&r" (cr0) : "r" (from) : "ax");
23337
23338 for ( ; i > 5; i--) {
23339 __asm__ __volatile__ (
23340 - "1: prefetch 320(%0)\n"
23341 - "2: movq (%0), %%mm0\n"
23342 - " movq 8(%0), %%mm1\n"
23343 - " movq 16(%0), %%mm2\n"
23344 - " movq 24(%0), %%mm3\n"
23345 - " movq %%mm0, (%1)\n"
23346 - " movq %%mm1, 8(%1)\n"
23347 - " movq %%mm2, 16(%1)\n"
23348 - " movq %%mm3, 24(%1)\n"
23349 - " movq 32(%0), %%mm0\n"
23350 - " movq 40(%0), %%mm1\n"
23351 - " movq 48(%0), %%mm2\n"
23352 - " movq 56(%0), %%mm3\n"
23353 - " movq %%mm0, 32(%1)\n"
23354 - " movq %%mm1, 40(%1)\n"
23355 - " movq %%mm2, 48(%1)\n"
23356 - " movq %%mm3, 56(%1)\n"
23357 + "1: prefetch 320(%1)\n"
23358 + "2: movq (%1), %%mm0\n"
23359 + " movq 8(%1), %%mm1\n"
23360 + " movq 16(%1), %%mm2\n"
23361 + " movq 24(%1), %%mm3\n"
23362 + " movq %%mm0, (%2)\n"
23363 + " movq %%mm1, 8(%2)\n"
23364 + " movq %%mm2, 16(%2)\n"
23365 + " movq %%mm3, 24(%2)\n"
23366 + " movq 32(%1), %%mm0\n"
23367 + " movq 40(%1), %%mm1\n"
23368 + " movq 48(%1), %%mm2\n"
23369 + " movq 56(%1), %%mm3\n"
23370 + " movq %%mm0, 32(%2)\n"
23371 + " movq %%mm1, 40(%2)\n"
23372 + " movq %%mm2, 48(%2)\n"
23373 + " movq %%mm3, 56(%2)\n"
23374 ".section .fixup, \"ax\"\n"
23375 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23376 + "3:\n"
23377 +
23378 +#ifdef CONFIG_PAX_KERNEXEC
23379 + " movl %%cr0, %0\n"
23380 + " movl %0, %%eax\n"
23381 + " andl $0xFFFEFFFF, %%eax\n"
23382 + " movl %%eax, %%cr0\n"
23383 +#endif
23384 +
23385 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23386 +
23387 +#ifdef CONFIG_PAX_KERNEXEC
23388 + " movl %0, %%cr0\n"
23389 +#endif
23390 +
23391 " jmp 2b\n"
23392 ".previous\n"
23393 _ASM_EXTABLE(1b, 3b)
23394 - : : "r" (from), "r" (to) : "memory");
23395 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23396
23397 from += 64;
23398 to += 64;
23399 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23400 static void fast_copy_page(void *to, void *from)
23401 {
23402 int i;
23403 + unsigned long cr0;
23404
23405 kernel_fpu_begin();
23406
23407 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23408 * but that is for later. -AV
23409 */
23410 __asm__ __volatile__(
23411 - "1: prefetch (%0)\n"
23412 - " prefetch 64(%0)\n"
23413 - " prefetch 128(%0)\n"
23414 - " prefetch 192(%0)\n"
23415 - " prefetch 256(%0)\n"
23416 + "1: prefetch (%1)\n"
23417 + " prefetch 64(%1)\n"
23418 + " prefetch 128(%1)\n"
23419 + " prefetch 192(%1)\n"
23420 + " prefetch 256(%1)\n"
23421 "2: \n"
23422 ".section .fixup, \"ax\"\n"
23423 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23424 + "3: \n"
23425 +
23426 +#ifdef CONFIG_PAX_KERNEXEC
23427 + " movl %%cr0, %0\n"
23428 + " movl %0, %%eax\n"
23429 + " andl $0xFFFEFFFF, %%eax\n"
23430 + " movl %%eax, %%cr0\n"
23431 +#endif
23432 +
23433 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23434 +
23435 +#ifdef CONFIG_PAX_KERNEXEC
23436 + " movl %0, %%cr0\n"
23437 +#endif
23438 +
23439 " jmp 2b\n"
23440 ".previous\n"
23441 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
23442 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23443
23444 for (i = 0; i < (4096-320)/64; i++) {
23445 __asm__ __volatile__ (
23446 - "1: prefetch 320(%0)\n"
23447 - "2: movq (%0), %%mm0\n"
23448 - " movntq %%mm0, (%1)\n"
23449 - " movq 8(%0), %%mm1\n"
23450 - " movntq %%mm1, 8(%1)\n"
23451 - " movq 16(%0), %%mm2\n"
23452 - " movntq %%mm2, 16(%1)\n"
23453 - " movq 24(%0), %%mm3\n"
23454 - " movntq %%mm3, 24(%1)\n"
23455 - " movq 32(%0), %%mm4\n"
23456 - " movntq %%mm4, 32(%1)\n"
23457 - " movq 40(%0), %%mm5\n"
23458 - " movntq %%mm5, 40(%1)\n"
23459 - " movq 48(%0), %%mm6\n"
23460 - " movntq %%mm6, 48(%1)\n"
23461 - " movq 56(%0), %%mm7\n"
23462 - " movntq %%mm7, 56(%1)\n"
23463 + "1: prefetch 320(%1)\n"
23464 + "2: movq (%1), %%mm0\n"
23465 + " movntq %%mm0, (%2)\n"
23466 + " movq 8(%1), %%mm1\n"
23467 + " movntq %%mm1, 8(%2)\n"
23468 + " movq 16(%1), %%mm2\n"
23469 + " movntq %%mm2, 16(%2)\n"
23470 + " movq 24(%1), %%mm3\n"
23471 + " movntq %%mm3, 24(%2)\n"
23472 + " movq 32(%1), %%mm4\n"
23473 + " movntq %%mm4, 32(%2)\n"
23474 + " movq 40(%1), %%mm5\n"
23475 + " movntq %%mm5, 40(%2)\n"
23476 + " movq 48(%1), %%mm6\n"
23477 + " movntq %%mm6, 48(%2)\n"
23478 + " movq 56(%1), %%mm7\n"
23479 + " movntq %%mm7, 56(%2)\n"
23480 ".section .fixup, \"ax\"\n"
23481 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23482 + "3:\n"
23483 +
23484 +#ifdef CONFIG_PAX_KERNEXEC
23485 + " movl %%cr0, %0\n"
23486 + " movl %0, %%eax\n"
23487 + " andl $0xFFFEFFFF, %%eax\n"
23488 + " movl %%eax, %%cr0\n"
23489 +#endif
23490 +
23491 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23492 +
23493 +#ifdef CONFIG_PAX_KERNEXEC
23494 + " movl %0, %%cr0\n"
23495 +#endif
23496 +
23497 " jmp 2b\n"
23498 ".previous\n"
23499 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
23500 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23501
23502 from += 64;
23503 to += 64;
23504 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
23505 static void fast_copy_page(void *to, void *from)
23506 {
23507 int i;
23508 + unsigned long cr0;
23509
23510 kernel_fpu_begin();
23511
23512 __asm__ __volatile__ (
23513 - "1: prefetch (%0)\n"
23514 - " prefetch 64(%0)\n"
23515 - " prefetch 128(%0)\n"
23516 - " prefetch 192(%0)\n"
23517 - " prefetch 256(%0)\n"
23518 + "1: prefetch (%1)\n"
23519 + " prefetch 64(%1)\n"
23520 + " prefetch 128(%1)\n"
23521 + " prefetch 192(%1)\n"
23522 + " prefetch 256(%1)\n"
23523 "2: \n"
23524 ".section .fixup, \"ax\"\n"
23525 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23526 + "3: \n"
23527 +
23528 +#ifdef CONFIG_PAX_KERNEXEC
23529 + " movl %%cr0, %0\n"
23530 + " movl %0, %%eax\n"
23531 + " andl $0xFFFEFFFF, %%eax\n"
23532 + " movl %%eax, %%cr0\n"
23533 +#endif
23534 +
23535 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23536 +
23537 +#ifdef CONFIG_PAX_KERNEXEC
23538 + " movl %0, %%cr0\n"
23539 +#endif
23540 +
23541 " jmp 2b\n"
23542 ".previous\n"
23543 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
23544 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23545
23546 for (i = 0; i < 4096/64; i++) {
23547 __asm__ __volatile__ (
23548 - "1: prefetch 320(%0)\n"
23549 - "2: movq (%0), %%mm0\n"
23550 - " movq 8(%0), %%mm1\n"
23551 - " movq 16(%0), %%mm2\n"
23552 - " movq 24(%0), %%mm3\n"
23553 - " movq %%mm0, (%1)\n"
23554 - " movq %%mm1, 8(%1)\n"
23555 - " movq %%mm2, 16(%1)\n"
23556 - " movq %%mm3, 24(%1)\n"
23557 - " movq 32(%0), %%mm0\n"
23558 - " movq 40(%0), %%mm1\n"
23559 - " movq 48(%0), %%mm2\n"
23560 - " movq 56(%0), %%mm3\n"
23561 - " movq %%mm0, 32(%1)\n"
23562 - " movq %%mm1, 40(%1)\n"
23563 - " movq %%mm2, 48(%1)\n"
23564 - " movq %%mm3, 56(%1)\n"
23565 + "1: prefetch 320(%1)\n"
23566 + "2: movq (%1), %%mm0\n"
23567 + " movq 8(%1), %%mm1\n"
23568 + " movq 16(%1), %%mm2\n"
23569 + " movq 24(%1), %%mm3\n"
23570 + " movq %%mm0, (%2)\n"
23571 + " movq %%mm1, 8(%2)\n"
23572 + " movq %%mm2, 16(%2)\n"
23573 + " movq %%mm3, 24(%2)\n"
23574 + " movq 32(%1), %%mm0\n"
23575 + " movq 40(%1), %%mm1\n"
23576 + " movq 48(%1), %%mm2\n"
23577 + " movq 56(%1), %%mm3\n"
23578 + " movq %%mm0, 32(%2)\n"
23579 + " movq %%mm1, 40(%2)\n"
23580 + " movq %%mm2, 48(%2)\n"
23581 + " movq %%mm3, 56(%2)\n"
23582 ".section .fixup, \"ax\"\n"
23583 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23584 + "3:\n"
23585 +
23586 +#ifdef CONFIG_PAX_KERNEXEC
23587 + " movl %%cr0, %0\n"
23588 + " movl %0, %%eax\n"
23589 + " andl $0xFFFEFFFF, %%eax\n"
23590 + " movl %%eax, %%cr0\n"
23591 +#endif
23592 +
23593 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23594 +
23595 +#ifdef CONFIG_PAX_KERNEXEC
23596 + " movl %0, %%cr0\n"
23597 +#endif
23598 +
23599 " jmp 2b\n"
23600 ".previous\n"
23601 _ASM_EXTABLE(1b, 3b)
23602 - : : "r" (from), "r" (to) : "memory");
23603 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23604
23605 from += 64;
23606 to += 64;
23607 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
23608 index f6d13ee..aca5f0b 100644
23609 --- a/arch/x86/lib/msr-reg.S
23610 +++ b/arch/x86/lib/msr-reg.S
23611 @@ -3,6 +3,7 @@
23612 #include <asm/dwarf2.h>
23613 #include <asm/asm.h>
23614 #include <asm/msr.h>
23615 +#include <asm/alternative-asm.h>
23616
23617 #ifdef CONFIG_X86_64
23618 /*
23619 @@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
23620 CFI_STARTPROC
23621 pushq_cfi %rbx
23622 pushq_cfi %rbp
23623 - movq %rdi, %r10 /* Save pointer */
23624 + movq %rdi, %r9 /* Save pointer */
23625 xorl %r11d, %r11d /* Return value */
23626 movl (%rdi), %eax
23627 movl 4(%rdi), %ecx
23628 @@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
23629 movl 28(%rdi), %edi
23630 CFI_REMEMBER_STATE
23631 1: \op
23632 -2: movl %eax, (%r10)
23633 +2: movl %eax, (%r9)
23634 movl %r11d, %eax /* Return value */
23635 - movl %ecx, 4(%r10)
23636 - movl %edx, 8(%r10)
23637 - movl %ebx, 12(%r10)
23638 - movl %ebp, 20(%r10)
23639 - movl %esi, 24(%r10)
23640 - movl %edi, 28(%r10)
23641 + movl %ecx, 4(%r9)
23642 + movl %edx, 8(%r9)
23643 + movl %ebx, 12(%r9)
23644 + movl %ebp, 20(%r9)
23645 + movl %esi, 24(%r9)
23646 + movl %edi, 28(%r9)
23647 popq_cfi %rbp
23648 popq_cfi %rbx
23649 + pax_force_retaddr
23650 ret
23651 3:
23652 CFI_RESTORE_STATE
23653 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23654 index fc6ba17..04471c5 100644
23655 --- a/arch/x86/lib/putuser.S
23656 +++ b/arch/x86/lib/putuser.S
23657 @@ -16,7 +16,9 @@
23658 #include <asm/errno.h>
23659 #include <asm/asm.h>
23660 #include <asm/smap.h>
23661 -
23662 +#include <asm/segment.h>
23663 +#include <asm/pgtable.h>
23664 +#include <asm/alternative-asm.h>
23665
23666 /*
23667 * __put_user_X
23668 @@ -30,57 +32,125 @@
23669 * as they get called from within inline assembly.
23670 */
23671
23672 -#define ENTER CFI_STARTPROC ; \
23673 - GET_THREAD_INFO(%_ASM_BX)
23674 -#define EXIT ASM_CLAC ; \
23675 - ret ; \
23676 +#define ENTER CFI_STARTPROC
23677 +#define EXIT ASM_CLAC ; \
23678 + pax_force_retaddr ; \
23679 + ret ; \
23680 CFI_ENDPROC
23681
23682 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23683 +#define _DEST %_ASM_CX,%_ASM_BX
23684 +#else
23685 +#define _DEST %_ASM_CX
23686 +#endif
23687 +
23688 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23689 +#define __copyuser_seg gs;
23690 +#else
23691 +#define __copyuser_seg
23692 +#endif
23693 +
23694 .text
23695 ENTRY(__put_user_1)
23696 ENTER
23697 +
23698 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23699 + GET_THREAD_INFO(%_ASM_BX)
23700 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23701 jae bad_put_user
23702 ASM_STAC
23703 -1: movb %al,(%_ASM_CX)
23704 +
23705 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23706 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23707 + cmp %_ASM_BX,%_ASM_CX
23708 + jb 1234f
23709 + xor %ebx,%ebx
23710 +1234:
23711 +#endif
23712 +
23713 +#endif
23714 +
23715 +1: __copyuser_seg movb %al,(_DEST)
23716 xor %eax,%eax
23717 EXIT
23718 ENDPROC(__put_user_1)
23719
23720 ENTRY(__put_user_2)
23721 ENTER
23722 +
23723 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23724 + GET_THREAD_INFO(%_ASM_BX)
23725 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23726 sub $1,%_ASM_BX
23727 cmp %_ASM_BX,%_ASM_CX
23728 jae bad_put_user
23729 ASM_STAC
23730 -2: movw %ax,(%_ASM_CX)
23731 +
23732 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23733 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23734 + cmp %_ASM_BX,%_ASM_CX
23735 + jb 1234f
23736 + xor %ebx,%ebx
23737 +1234:
23738 +#endif
23739 +
23740 +#endif
23741 +
23742 +2: __copyuser_seg movw %ax,(_DEST)
23743 xor %eax,%eax
23744 EXIT
23745 ENDPROC(__put_user_2)
23746
23747 ENTRY(__put_user_4)
23748 ENTER
23749 +
23750 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23751 + GET_THREAD_INFO(%_ASM_BX)
23752 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23753 sub $3,%_ASM_BX
23754 cmp %_ASM_BX,%_ASM_CX
23755 jae bad_put_user
23756 ASM_STAC
23757 -3: movl %eax,(%_ASM_CX)
23758 +
23759 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23760 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23761 + cmp %_ASM_BX,%_ASM_CX
23762 + jb 1234f
23763 + xor %ebx,%ebx
23764 +1234:
23765 +#endif
23766 +
23767 +#endif
23768 +
23769 +3: __copyuser_seg movl %eax,(_DEST)
23770 xor %eax,%eax
23771 EXIT
23772 ENDPROC(__put_user_4)
23773
23774 ENTRY(__put_user_8)
23775 ENTER
23776 +
23777 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23778 + GET_THREAD_INFO(%_ASM_BX)
23779 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23780 sub $7,%_ASM_BX
23781 cmp %_ASM_BX,%_ASM_CX
23782 jae bad_put_user
23783 ASM_STAC
23784 -4: mov %_ASM_AX,(%_ASM_CX)
23785 +
23786 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23787 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23788 + cmp %_ASM_BX,%_ASM_CX
23789 + jb 1234f
23790 + xor %ebx,%ebx
23791 +1234:
23792 +#endif
23793 +
23794 +#endif
23795 +
23796 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
23797 #ifdef CONFIG_X86_32
23798 -5: movl %edx,4(%_ASM_CX)
23799 +5: __copyuser_seg movl %edx,4(_DEST)
23800 #endif
23801 xor %eax,%eax
23802 EXIT
23803 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
23804 index 1cad221..de671ee 100644
23805 --- a/arch/x86/lib/rwlock.S
23806 +++ b/arch/x86/lib/rwlock.S
23807 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
23808 FRAME
23809 0: LOCK_PREFIX
23810 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
23811 +
23812 +#ifdef CONFIG_PAX_REFCOUNT
23813 + jno 1234f
23814 + LOCK_PREFIX
23815 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
23816 + int $4
23817 +1234:
23818 + _ASM_EXTABLE(1234b, 1234b)
23819 +#endif
23820 +
23821 1: rep; nop
23822 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
23823 jne 1b
23824 LOCK_PREFIX
23825 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
23826 +
23827 +#ifdef CONFIG_PAX_REFCOUNT
23828 + jno 1234f
23829 + LOCK_PREFIX
23830 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
23831 + int $4
23832 +1234:
23833 + _ASM_EXTABLE(1234b, 1234b)
23834 +#endif
23835 +
23836 jnz 0b
23837 ENDFRAME
23838 + pax_force_retaddr
23839 ret
23840 CFI_ENDPROC
23841 END(__write_lock_failed)
23842 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
23843 FRAME
23844 0: LOCK_PREFIX
23845 READ_LOCK_SIZE(inc) (%__lock_ptr)
23846 +
23847 +#ifdef CONFIG_PAX_REFCOUNT
23848 + jno 1234f
23849 + LOCK_PREFIX
23850 + READ_LOCK_SIZE(dec) (%__lock_ptr)
23851 + int $4
23852 +1234:
23853 + _ASM_EXTABLE(1234b, 1234b)
23854 +#endif
23855 +
23856 1: rep; nop
23857 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
23858 js 1b
23859 LOCK_PREFIX
23860 READ_LOCK_SIZE(dec) (%__lock_ptr)
23861 +
23862 +#ifdef CONFIG_PAX_REFCOUNT
23863 + jno 1234f
23864 + LOCK_PREFIX
23865 + READ_LOCK_SIZE(inc) (%__lock_ptr)
23866 + int $4
23867 +1234:
23868 + _ASM_EXTABLE(1234b, 1234b)
23869 +#endif
23870 +
23871 js 0b
23872 ENDFRAME
23873 + pax_force_retaddr
23874 ret
23875 CFI_ENDPROC
23876 END(__read_lock_failed)
23877 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
23878 index 5dff5f0..cadebf4 100644
23879 --- a/arch/x86/lib/rwsem.S
23880 +++ b/arch/x86/lib/rwsem.S
23881 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
23882 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
23883 CFI_RESTORE __ASM_REG(dx)
23884 restore_common_regs
23885 + pax_force_retaddr
23886 ret
23887 CFI_ENDPROC
23888 ENDPROC(call_rwsem_down_read_failed)
23889 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
23890 movq %rax,%rdi
23891 call rwsem_down_write_failed
23892 restore_common_regs
23893 + pax_force_retaddr
23894 ret
23895 CFI_ENDPROC
23896 ENDPROC(call_rwsem_down_write_failed)
23897 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
23898 movq %rax,%rdi
23899 call rwsem_wake
23900 restore_common_regs
23901 -1: ret
23902 +1: pax_force_retaddr
23903 + ret
23904 CFI_ENDPROC
23905 ENDPROC(call_rwsem_wake)
23906
23907 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
23908 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
23909 CFI_RESTORE __ASM_REG(dx)
23910 restore_common_regs
23911 + pax_force_retaddr
23912 ret
23913 CFI_ENDPROC
23914 ENDPROC(call_rwsem_downgrade_wake)
23915 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23916 index a63efd6..ccecad8 100644
23917 --- a/arch/x86/lib/thunk_64.S
23918 +++ b/arch/x86/lib/thunk_64.S
23919 @@ -8,6 +8,7 @@
23920 #include <linux/linkage.h>
23921 #include <asm/dwarf2.h>
23922 #include <asm/calling.h>
23923 +#include <asm/alternative-asm.h>
23924
23925 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23926 .macro THUNK name, func, put_ret_addr_in_rdi=0
23927 @@ -41,5 +42,6 @@
23928 SAVE_ARGS
23929 restore:
23930 RESTORE_ARGS
23931 + pax_force_retaddr
23932 ret
23933 CFI_ENDPROC
23934 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23935 index 98f6d6b6..d27f045 100644
23936 --- a/arch/x86/lib/usercopy_32.c
23937 +++ b/arch/x86/lib/usercopy_32.c
23938 @@ -42,11 +42,13 @@ do { \
23939 int __d0; \
23940 might_fault(); \
23941 __asm__ __volatile__( \
23942 + __COPYUSER_SET_ES \
23943 ASM_STAC "\n" \
23944 "0: rep; stosl\n" \
23945 " movl %2,%0\n" \
23946 "1: rep; stosb\n" \
23947 "2: " ASM_CLAC "\n" \
23948 + __COPYUSER_RESTORE_ES \
23949 ".section .fixup,\"ax\"\n" \
23950 "3: lea 0(%2,%0,4),%0\n" \
23951 " jmp 2b\n" \
23952 @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
23953
23954 #ifdef CONFIG_X86_INTEL_USERCOPY
23955 static unsigned long
23956 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23957 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23958 {
23959 int d0, d1;
23960 __asm__ __volatile__(
23961 @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23962 " .align 2,0x90\n"
23963 "3: movl 0(%4), %%eax\n"
23964 "4: movl 4(%4), %%edx\n"
23965 - "5: movl %%eax, 0(%3)\n"
23966 - "6: movl %%edx, 4(%3)\n"
23967 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23968 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23969 "7: movl 8(%4), %%eax\n"
23970 "8: movl 12(%4),%%edx\n"
23971 - "9: movl %%eax, 8(%3)\n"
23972 - "10: movl %%edx, 12(%3)\n"
23973 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23974 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23975 "11: movl 16(%4), %%eax\n"
23976 "12: movl 20(%4), %%edx\n"
23977 - "13: movl %%eax, 16(%3)\n"
23978 - "14: movl %%edx, 20(%3)\n"
23979 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23980 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23981 "15: movl 24(%4), %%eax\n"
23982 "16: movl 28(%4), %%edx\n"
23983 - "17: movl %%eax, 24(%3)\n"
23984 - "18: movl %%edx, 28(%3)\n"
23985 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23986 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23987 "19: movl 32(%4), %%eax\n"
23988 "20: movl 36(%4), %%edx\n"
23989 - "21: movl %%eax, 32(%3)\n"
23990 - "22: movl %%edx, 36(%3)\n"
23991 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23992 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23993 "23: movl 40(%4), %%eax\n"
23994 "24: movl 44(%4), %%edx\n"
23995 - "25: movl %%eax, 40(%3)\n"
23996 - "26: movl %%edx, 44(%3)\n"
23997 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23998 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23999 "27: movl 48(%4), %%eax\n"
24000 "28: movl 52(%4), %%edx\n"
24001 - "29: movl %%eax, 48(%3)\n"
24002 - "30: movl %%edx, 52(%3)\n"
24003 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24004 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24005 "31: movl 56(%4), %%eax\n"
24006 "32: movl 60(%4), %%edx\n"
24007 - "33: movl %%eax, 56(%3)\n"
24008 - "34: movl %%edx, 60(%3)\n"
24009 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24010 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24011 " addl $-64, %0\n"
24012 " addl $64, %4\n"
24013 " addl $64, %3\n"
24014 @@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24015 " shrl $2, %0\n"
24016 " andl $3, %%eax\n"
24017 " cld\n"
24018 + __COPYUSER_SET_ES
24019 "99: rep; movsl\n"
24020 "36: movl %%eax, %0\n"
24021 "37: rep; movsb\n"
24022 "100:\n"
24023 + __COPYUSER_RESTORE_ES
24024 ".section .fixup,\"ax\"\n"
24025 "101: lea 0(%%eax,%0,4),%0\n"
24026 " jmp 100b\n"
24027 @@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24028 }
24029
24030 static unsigned long
24031 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24032 +{
24033 + int d0, d1;
24034 + __asm__ __volatile__(
24035 + " .align 2,0x90\n"
24036 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24037 + " cmpl $67, %0\n"
24038 + " jbe 3f\n"
24039 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24040 + " .align 2,0x90\n"
24041 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24042 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24043 + "5: movl %%eax, 0(%3)\n"
24044 + "6: movl %%edx, 4(%3)\n"
24045 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24046 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24047 + "9: movl %%eax, 8(%3)\n"
24048 + "10: movl %%edx, 12(%3)\n"
24049 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24050 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24051 + "13: movl %%eax, 16(%3)\n"
24052 + "14: movl %%edx, 20(%3)\n"
24053 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24054 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24055 + "17: movl %%eax, 24(%3)\n"
24056 + "18: movl %%edx, 28(%3)\n"
24057 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24058 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24059 + "21: movl %%eax, 32(%3)\n"
24060 + "22: movl %%edx, 36(%3)\n"
24061 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24062 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24063 + "25: movl %%eax, 40(%3)\n"
24064 + "26: movl %%edx, 44(%3)\n"
24065 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24066 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24067 + "29: movl %%eax, 48(%3)\n"
24068 + "30: movl %%edx, 52(%3)\n"
24069 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24070 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24071 + "33: movl %%eax, 56(%3)\n"
24072 + "34: movl %%edx, 60(%3)\n"
24073 + " addl $-64, %0\n"
24074 + " addl $64, %4\n"
24075 + " addl $64, %3\n"
24076 + " cmpl $63, %0\n"
24077 + " ja 1b\n"
24078 + "35: movl %0, %%eax\n"
24079 + " shrl $2, %0\n"
24080 + " andl $3, %%eax\n"
24081 + " cld\n"
24082 + "99: rep; "__copyuser_seg" movsl\n"
24083 + "36: movl %%eax, %0\n"
24084 + "37: rep; "__copyuser_seg" movsb\n"
24085 + "100:\n"
24086 + ".section .fixup,\"ax\"\n"
24087 + "101: lea 0(%%eax,%0,4),%0\n"
24088 + " jmp 100b\n"
24089 + ".previous\n"
24090 + _ASM_EXTABLE(1b,100b)
24091 + _ASM_EXTABLE(2b,100b)
24092 + _ASM_EXTABLE(3b,100b)
24093 + _ASM_EXTABLE(4b,100b)
24094 + _ASM_EXTABLE(5b,100b)
24095 + _ASM_EXTABLE(6b,100b)
24096 + _ASM_EXTABLE(7b,100b)
24097 + _ASM_EXTABLE(8b,100b)
24098 + _ASM_EXTABLE(9b,100b)
24099 + _ASM_EXTABLE(10b,100b)
24100 + _ASM_EXTABLE(11b,100b)
24101 + _ASM_EXTABLE(12b,100b)
24102 + _ASM_EXTABLE(13b,100b)
24103 + _ASM_EXTABLE(14b,100b)
24104 + _ASM_EXTABLE(15b,100b)
24105 + _ASM_EXTABLE(16b,100b)
24106 + _ASM_EXTABLE(17b,100b)
24107 + _ASM_EXTABLE(18b,100b)
24108 + _ASM_EXTABLE(19b,100b)
24109 + _ASM_EXTABLE(20b,100b)
24110 + _ASM_EXTABLE(21b,100b)
24111 + _ASM_EXTABLE(22b,100b)
24112 + _ASM_EXTABLE(23b,100b)
24113 + _ASM_EXTABLE(24b,100b)
24114 + _ASM_EXTABLE(25b,100b)
24115 + _ASM_EXTABLE(26b,100b)
24116 + _ASM_EXTABLE(27b,100b)
24117 + _ASM_EXTABLE(28b,100b)
24118 + _ASM_EXTABLE(29b,100b)
24119 + _ASM_EXTABLE(30b,100b)
24120 + _ASM_EXTABLE(31b,100b)
24121 + _ASM_EXTABLE(32b,100b)
24122 + _ASM_EXTABLE(33b,100b)
24123 + _ASM_EXTABLE(34b,100b)
24124 + _ASM_EXTABLE(35b,100b)
24125 + _ASM_EXTABLE(36b,100b)
24126 + _ASM_EXTABLE(37b,100b)
24127 + _ASM_EXTABLE(99b,101b)
24128 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
24129 + : "1"(to), "2"(from), "0"(size)
24130 + : "eax", "edx", "memory");
24131 + return size;
24132 +}
24133 +
24134 +static unsigned long __size_overflow(3)
24135 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24136 {
24137 int d0, d1;
24138 __asm__ __volatile__(
24139 " .align 2,0x90\n"
24140 - "0: movl 32(%4), %%eax\n"
24141 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24142 " cmpl $67, %0\n"
24143 " jbe 2f\n"
24144 - "1: movl 64(%4), %%eax\n"
24145 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24146 " .align 2,0x90\n"
24147 - "2: movl 0(%4), %%eax\n"
24148 - "21: movl 4(%4), %%edx\n"
24149 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24150 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24151 " movl %%eax, 0(%3)\n"
24152 " movl %%edx, 4(%3)\n"
24153 - "3: movl 8(%4), %%eax\n"
24154 - "31: movl 12(%4),%%edx\n"
24155 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24156 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24157 " movl %%eax, 8(%3)\n"
24158 " movl %%edx, 12(%3)\n"
24159 - "4: movl 16(%4), %%eax\n"
24160 - "41: movl 20(%4), %%edx\n"
24161 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24162 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24163 " movl %%eax, 16(%3)\n"
24164 " movl %%edx, 20(%3)\n"
24165 - "10: movl 24(%4), %%eax\n"
24166 - "51: movl 28(%4), %%edx\n"
24167 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24168 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24169 " movl %%eax, 24(%3)\n"
24170 " movl %%edx, 28(%3)\n"
24171 - "11: movl 32(%4), %%eax\n"
24172 - "61: movl 36(%4), %%edx\n"
24173 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24174 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24175 " movl %%eax, 32(%3)\n"
24176 " movl %%edx, 36(%3)\n"
24177 - "12: movl 40(%4), %%eax\n"
24178 - "71: movl 44(%4), %%edx\n"
24179 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24180 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24181 " movl %%eax, 40(%3)\n"
24182 " movl %%edx, 44(%3)\n"
24183 - "13: movl 48(%4), %%eax\n"
24184 - "81: movl 52(%4), %%edx\n"
24185 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24186 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24187 " movl %%eax, 48(%3)\n"
24188 " movl %%edx, 52(%3)\n"
24189 - "14: movl 56(%4), %%eax\n"
24190 - "91: movl 60(%4), %%edx\n"
24191 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24192 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24193 " movl %%eax, 56(%3)\n"
24194 " movl %%edx, 60(%3)\n"
24195 " addl $-64, %0\n"
24196 @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24197 " shrl $2, %0\n"
24198 " andl $3, %%eax\n"
24199 " cld\n"
24200 - "6: rep; movsl\n"
24201 + "6: rep; "__copyuser_seg" movsl\n"
24202 " movl %%eax,%0\n"
24203 - "7: rep; movsb\n"
24204 + "7: rep; "__copyuser_seg" movsb\n"
24205 "8:\n"
24206 ".section .fixup,\"ax\"\n"
24207 "9: lea 0(%%eax,%0,4),%0\n"
24208 @@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24209 * hyoshiok@miraclelinux.com
24210 */
24211
24212 -static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24213 +static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
24214 const void __user *from, unsigned long size)
24215 {
24216 int d0, d1;
24217
24218 __asm__ __volatile__(
24219 " .align 2,0x90\n"
24220 - "0: movl 32(%4), %%eax\n"
24221 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24222 " cmpl $67, %0\n"
24223 " jbe 2f\n"
24224 - "1: movl 64(%4), %%eax\n"
24225 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24226 " .align 2,0x90\n"
24227 - "2: movl 0(%4), %%eax\n"
24228 - "21: movl 4(%4), %%edx\n"
24229 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24230 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24231 " movnti %%eax, 0(%3)\n"
24232 " movnti %%edx, 4(%3)\n"
24233 - "3: movl 8(%4), %%eax\n"
24234 - "31: movl 12(%4),%%edx\n"
24235 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24236 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24237 " movnti %%eax, 8(%3)\n"
24238 " movnti %%edx, 12(%3)\n"
24239 - "4: movl 16(%4), %%eax\n"
24240 - "41: movl 20(%4), %%edx\n"
24241 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24242 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24243 " movnti %%eax, 16(%3)\n"
24244 " movnti %%edx, 20(%3)\n"
24245 - "10: movl 24(%4), %%eax\n"
24246 - "51: movl 28(%4), %%edx\n"
24247 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24248 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24249 " movnti %%eax, 24(%3)\n"
24250 " movnti %%edx, 28(%3)\n"
24251 - "11: movl 32(%4), %%eax\n"
24252 - "61: movl 36(%4), %%edx\n"
24253 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24254 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24255 " movnti %%eax, 32(%3)\n"
24256 " movnti %%edx, 36(%3)\n"
24257 - "12: movl 40(%4), %%eax\n"
24258 - "71: movl 44(%4), %%edx\n"
24259 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24260 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24261 " movnti %%eax, 40(%3)\n"
24262 " movnti %%edx, 44(%3)\n"
24263 - "13: movl 48(%4), %%eax\n"
24264 - "81: movl 52(%4), %%edx\n"
24265 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24266 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24267 " movnti %%eax, 48(%3)\n"
24268 " movnti %%edx, 52(%3)\n"
24269 - "14: movl 56(%4), %%eax\n"
24270 - "91: movl 60(%4), %%edx\n"
24271 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24272 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24273 " movnti %%eax, 56(%3)\n"
24274 " movnti %%edx, 60(%3)\n"
24275 " addl $-64, %0\n"
24276 @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24277 " shrl $2, %0\n"
24278 " andl $3, %%eax\n"
24279 " cld\n"
24280 - "6: rep; movsl\n"
24281 + "6: rep; "__copyuser_seg" movsl\n"
24282 " movl %%eax,%0\n"
24283 - "7: rep; movsb\n"
24284 + "7: rep; "__copyuser_seg" movsb\n"
24285 "8:\n"
24286 ".section .fixup,\"ax\"\n"
24287 "9: lea 0(%%eax,%0,4),%0\n"
24288 @@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24289 return size;
24290 }
24291
24292 -static unsigned long __copy_user_intel_nocache(void *to,
24293 +static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
24294 const void __user *from, unsigned long size)
24295 {
24296 int d0, d1;
24297
24298 __asm__ __volatile__(
24299 " .align 2,0x90\n"
24300 - "0: movl 32(%4), %%eax\n"
24301 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24302 " cmpl $67, %0\n"
24303 " jbe 2f\n"
24304 - "1: movl 64(%4), %%eax\n"
24305 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24306 " .align 2,0x90\n"
24307 - "2: movl 0(%4), %%eax\n"
24308 - "21: movl 4(%4), %%edx\n"
24309 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24310 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24311 " movnti %%eax, 0(%3)\n"
24312 " movnti %%edx, 4(%3)\n"
24313 - "3: movl 8(%4), %%eax\n"
24314 - "31: movl 12(%4),%%edx\n"
24315 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24316 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24317 " movnti %%eax, 8(%3)\n"
24318 " movnti %%edx, 12(%3)\n"
24319 - "4: movl 16(%4), %%eax\n"
24320 - "41: movl 20(%4), %%edx\n"
24321 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24322 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24323 " movnti %%eax, 16(%3)\n"
24324 " movnti %%edx, 20(%3)\n"
24325 - "10: movl 24(%4), %%eax\n"
24326 - "51: movl 28(%4), %%edx\n"
24327 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24328 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24329 " movnti %%eax, 24(%3)\n"
24330 " movnti %%edx, 28(%3)\n"
24331 - "11: movl 32(%4), %%eax\n"
24332 - "61: movl 36(%4), %%edx\n"
24333 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24334 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24335 " movnti %%eax, 32(%3)\n"
24336 " movnti %%edx, 36(%3)\n"
24337 - "12: movl 40(%4), %%eax\n"
24338 - "71: movl 44(%4), %%edx\n"
24339 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24340 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24341 " movnti %%eax, 40(%3)\n"
24342 " movnti %%edx, 44(%3)\n"
24343 - "13: movl 48(%4), %%eax\n"
24344 - "81: movl 52(%4), %%edx\n"
24345 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24346 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24347 " movnti %%eax, 48(%3)\n"
24348 " movnti %%edx, 52(%3)\n"
24349 - "14: movl 56(%4), %%eax\n"
24350 - "91: movl 60(%4), %%edx\n"
24351 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24352 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24353 " movnti %%eax, 56(%3)\n"
24354 " movnti %%edx, 60(%3)\n"
24355 " addl $-64, %0\n"
24356 @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24357 " shrl $2, %0\n"
24358 " andl $3, %%eax\n"
24359 " cld\n"
24360 - "6: rep; movsl\n"
24361 + "6: rep; "__copyuser_seg" movsl\n"
24362 " movl %%eax,%0\n"
24363 - "7: rep; movsb\n"
24364 + "7: rep; "__copyuser_seg" movsb\n"
24365 "8:\n"
24366 ".section .fixup,\"ax\"\n"
24367 "9: lea 0(%%eax,%0,4),%0\n"
24368 @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24369 */
24370 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24371 unsigned long size);
24372 -unsigned long __copy_user_intel(void __user *to, const void *from,
24373 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24374 + unsigned long size);
24375 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24376 unsigned long size);
24377 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24378 const void __user *from, unsigned long size);
24379 #endif /* CONFIG_X86_INTEL_USERCOPY */
24380
24381 /* Generic arbitrary sized copy. */
24382 -#define __copy_user(to, from, size) \
24383 +#define __copy_user(to, from, size, prefix, set, restore) \
24384 do { \
24385 int __d0, __d1, __d2; \
24386 __asm__ __volatile__( \
24387 + set \
24388 " cmp $7,%0\n" \
24389 " jbe 1f\n" \
24390 " movl %1,%0\n" \
24391 " negl %0\n" \
24392 " andl $7,%0\n" \
24393 " subl %0,%3\n" \
24394 - "4: rep; movsb\n" \
24395 + "4: rep; "prefix"movsb\n" \
24396 " movl %3,%0\n" \
24397 " shrl $2,%0\n" \
24398 " andl $3,%3\n" \
24399 " .align 2,0x90\n" \
24400 - "0: rep; movsl\n" \
24401 + "0: rep; "prefix"movsl\n" \
24402 " movl %3,%0\n" \
24403 - "1: rep; movsb\n" \
24404 + "1: rep; "prefix"movsb\n" \
24405 "2:\n" \
24406 + restore \
24407 ".section .fixup,\"ax\"\n" \
24408 "5: addl %3,%0\n" \
24409 " jmp 2b\n" \
24410 @@ -538,14 +650,14 @@ do { \
24411 " negl %0\n" \
24412 " andl $7,%0\n" \
24413 " subl %0,%3\n" \
24414 - "4: rep; movsb\n" \
24415 + "4: rep; "__copyuser_seg"movsb\n" \
24416 " movl %3,%0\n" \
24417 " shrl $2,%0\n" \
24418 " andl $3,%3\n" \
24419 " .align 2,0x90\n" \
24420 - "0: rep; movsl\n" \
24421 + "0: rep; "__copyuser_seg"movsl\n" \
24422 " movl %3,%0\n" \
24423 - "1: rep; movsb\n" \
24424 + "1: rep; "__copyuser_seg"movsb\n" \
24425 "2:\n" \
24426 ".section .fixup,\"ax\"\n" \
24427 "5: addl %3,%0\n" \
24428 @@ -629,9 +741,9 @@ survive:
24429 #endif
24430 stac();
24431 if (movsl_is_ok(to, from, n))
24432 - __copy_user(to, from, n);
24433 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
24434 else
24435 - n = __copy_user_intel(to, from, n);
24436 + n = __generic_copy_to_user_intel(to, from, n);
24437 clac();
24438 return n;
24439 }
24440 @@ -655,10 +767,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
24441 {
24442 stac();
24443 if (movsl_is_ok(to, from, n))
24444 - __copy_user(to, from, n);
24445 + __copy_user(to, from, n, __copyuser_seg, "", "");
24446 else
24447 - n = __copy_user_intel((void __user *)to,
24448 - (const void *)from, n);
24449 + n = __generic_copy_from_user_intel(to, from, n);
24450 clac();
24451 return n;
24452 }
24453 @@ -689,66 +800,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
24454 if (n > 64 && cpu_has_xmm2)
24455 n = __copy_user_intel_nocache(to, from, n);
24456 else
24457 - __copy_user(to, from, n);
24458 + __copy_user(to, from, n, __copyuser_seg, "", "");
24459 #else
24460 - __copy_user(to, from, n);
24461 + __copy_user(to, from, n, __copyuser_seg, "", "");
24462 #endif
24463 clac();
24464 return n;
24465 }
24466 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
24467
24468 -/**
24469 - * copy_to_user: - Copy a block of data into user space.
24470 - * @to: Destination address, in user space.
24471 - * @from: Source address, in kernel space.
24472 - * @n: Number of bytes to copy.
24473 - *
24474 - * Context: User context only. This function may sleep.
24475 - *
24476 - * Copy data from kernel space to user space.
24477 - *
24478 - * Returns number of bytes that could not be copied.
24479 - * On success, this will be zero.
24480 - */
24481 -unsigned long
24482 -copy_to_user(void __user *to, const void *from, unsigned long n)
24483 -{
24484 - if (access_ok(VERIFY_WRITE, to, n))
24485 - n = __copy_to_user(to, from, n);
24486 - return n;
24487 -}
24488 -EXPORT_SYMBOL(copy_to_user);
24489 -
24490 -/**
24491 - * copy_from_user: - Copy a block of data from user space.
24492 - * @to: Destination address, in kernel space.
24493 - * @from: Source address, in user space.
24494 - * @n: Number of bytes to copy.
24495 - *
24496 - * Context: User context only. This function may sleep.
24497 - *
24498 - * Copy data from user space to kernel space.
24499 - *
24500 - * Returns number of bytes that could not be copied.
24501 - * On success, this will be zero.
24502 - *
24503 - * If some data could not be copied, this function will pad the copied
24504 - * data to the requested size using zero bytes.
24505 - */
24506 -unsigned long
24507 -_copy_from_user(void *to, const void __user *from, unsigned long n)
24508 -{
24509 - if (access_ok(VERIFY_READ, from, n))
24510 - n = __copy_from_user(to, from, n);
24511 - else
24512 - memset(to, 0, n);
24513 - return n;
24514 -}
24515 -EXPORT_SYMBOL(_copy_from_user);
24516 -
24517 void copy_from_user_overflow(void)
24518 {
24519 WARN(1, "Buffer overflow detected!\n");
24520 }
24521 EXPORT_SYMBOL(copy_from_user_overflow);
24522 +
24523 +void copy_to_user_overflow(void)
24524 +{
24525 + WARN(1, "Buffer overflow detected!\n");
24526 +}
24527 +EXPORT_SYMBOL(copy_to_user_overflow);
24528 +
24529 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24530 +void __set_fs(mm_segment_t x)
24531 +{
24532 + switch (x.seg) {
24533 + case 0:
24534 + loadsegment(gs, 0);
24535 + break;
24536 + case TASK_SIZE_MAX:
24537 + loadsegment(gs, __USER_DS);
24538 + break;
24539 + case -1UL:
24540 + loadsegment(gs, __KERNEL_DS);
24541 + break;
24542 + default:
24543 + BUG();
24544 + }
24545 + return;
24546 +}
24547 +EXPORT_SYMBOL(__set_fs);
24548 +
24549 +void set_fs(mm_segment_t x)
24550 +{
24551 + current_thread_info()->addr_limit = x;
24552 + __set_fs(x);
24553 +}
24554 +EXPORT_SYMBOL(set_fs);
24555 +#endif
24556 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
24557 index 05928aa..b33dea1 100644
24558 --- a/arch/x86/lib/usercopy_64.c
24559 +++ b/arch/x86/lib/usercopy_64.c
24560 @@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
24561 _ASM_EXTABLE(0b,3b)
24562 _ASM_EXTABLE(1b,2b)
24563 : [size8] "=&c"(size), [dst] "=&D" (__d0)
24564 - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
24565 + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
24566 [zero] "r" (0UL), [eight] "r" (8UL));
24567 clac();
24568 return size;
24569 @@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
24570 }
24571 EXPORT_SYMBOL(clear_user);
24572
24573 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
24574 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
24575 {
24576 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24577 - return copy_user_generic((__force void *)to, (__force void *)from, len);
24578 - }
24579 - return len;
24580 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
24581 + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
24582 + return len;
24583 }
24584 EXPORT_SYMBOL(copy_in_user);
24585
24586 @@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
24587 * it is not necessary to optimize tail handling.
24588 */
24589 unsigned long
24590 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24591 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
24592 {
24593 char c;
24594 unsigned zero_len;
24595 @@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24596 clac();
24597 return len;
24598 }
24599 +
24600 +void copy_from_user_overflow(void)
24601 +{
24602 + WARN(1, "Buffer overflow detected!\n");
24603 +}
24604 +EXPORT_SYMBOL(copy_from_user_overflow);
24605 +
24606 +void copy_to_user_overflow(void)
24607 +{
24608 + WARN(1, "Buffer overflow detected!\n");
24609 +}
24610 +EXPORT_SYMBOL(copy_to_user_overflow);
24611 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
24612 index 903ec1e..c4166b2 100644
24613 --- a/arch/x86/mm/extable.c
24614 +++ b/arch/x86/mm/extable.c
24615 @@ -6,12 +6,24 @@
24616 static inline unsigned long
24617 ex_insn_addr(const struct exception_table_entry *x)
24618 {
24619 - return (unsigned long)&x->insn + x->insn;
24620 + unsigned long reloc = 0;
24621 +
24622 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24623 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
24624 +#endif
24625 +
24626 + return (unsigned long)&x->insn + x->insn + reloc;
24627 }
24628 static inline unsigned long
24629 ex_fixup_addr(const struct exception_table_entry *x)
24630 {
24631 - return (unsigned long)&x->fixup + x->fixup;
24632 + unsigned long reloc = 0;
24633 +
24634 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24635 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
24636 +#endif
24637 +
24638 + return (unsigned long)&x->fixup + x->fixup + reloc;
24639 }
24640
24641 int fixup_exception(struct pt_regs *regs)
24642 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
24643 unsigned long new_ip;
24644
24645 #ifdef CONFIG_PNPBIOS
24646 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24647 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24648 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24649 extern u32 pnp_bios_is_utter_crap;
24650 pnp_bios_is_utter_crap = 1;
24651 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
24652 i += 4;
24653 p->fixup -= i;
24654 i += 4;
24655 +
24656 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24657 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
24658 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
24659 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
24660 +#endif
24661 +
24662 }
24663 }
24664
24665 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24666 index 8e13ecb..831f2d0 100644
24667 --- a/arch/x86/mm/fault.c
24668 +++ b/arch/x86/mm/fault.c
24669 @@ -13,12 +13,19 @@
24670 #include <linux/perf_event.h> /* perf_sw_event */
24671 #include <linux/hugetlb.h> /* hstate_index_to_shift */
24672 #include <linux/prefetch.h> /* prefetchw */
24673 +#include <linux/unistd.h>
24674 +#include <linux/compiler.h>
24675
24676 #include <asm/traps.h> /* dotraplinkage, ... */
24677 #include <asm/pgalloc.h> /* pgd_*(), ... */
24678 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24679 #include <asm/fixmap.h> /* VSYSCALL_START */
24680 #include <asm/rcu.h> /* exception_enter(), ... */
24681 +#include <asm/tlbflush.h>
24682 +
24683 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24684 +#include <asm/stacktrace.h>
24685 +#endif
24686
24687 /*
24688 * Page fault error code bits:
24689 @@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
24690 int ret = 0;
24691
24692 /* kprobe_running() needs smp_processor_id() */
24693 - if (kprobes_built_in() && !user_mode_vm(regs)) {
24694 + if (kprobes_built_in() && !user_mode(regs)) {
24695 preempt_disable();
24696 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24697 ret = 1;
24698 @@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24699 return !instr_lo || (instr_lo>>1) == 1;
24700 case 0x00:
24701 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24702 - if (probe_kernel_address(instr, opcode))
24703 + if (user_mode(regs)) {
24704 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24705 + return 0;
24706 + } else if (probe_kernel_address(instr, opcode))
24707 return 0;
24708
24709 *prefetch = (instr_lo == 0xF) &&
24710 @@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24711 while (instr < max_instr) {
24712 unsigned char opcode;
24713
24714 - if (probe_kernel_address(instr, opcode))
24715 + if (user_mode(regs)) {
24716 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24717 + break;
24718 + } else if (probe_kernel_address(instr, opcode))
24719 break;
24720
24721 instr++;
24722 @@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24723 force_sig_info(si_signo, &info, tsk);
24724 }
24725
24726 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24727 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24728 +#endif
24729 +
24730 +#ifdef CONFIG_PAX_EMUTRAMP
24731 +static int pax_handle_fetch_fault(struct pt_regs *regs);
24732 +#endif
24733 +
24734 +#ifdef CONFIG_PAX_PAGEEXEC
24735 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24736 +{
24737 + pgd_t *pgd;
24738 + pud_t *pud;
24739 + pmd_t *pmd;
24740 +
24741 + pgd = pgd_offset(mm, address);
24742 + if (!pgd_present(*pgd))
24743 + return NULL;
24744 + pud = pud_offset(pgd, address);
24745 + if (!pud_present(*pud))
24746 + return NULL;
24747 + pmd = pmd_offset(pud, address);
24748 + if (!pmd_present(*pmd))
24749 + return NULL;
24750 + return pmd;
24751 +}
24752 +#endif
24753 +
24754 DEFINE_SPINLOCK(pgd_lock);
24755 LIST_HEAD(pgd_list);
24756
24757 @@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
24758 for (address = VMALLOC_START & PMD_MASK;
24759 address >= TASK_SIZE && address < FIXADDR_TOP;
24760 address += PMD_SIZE) {
24761 +
24762 +#ifdef CONFIG_PAX_PER_CPU_PGD
24763 + unsigned long cpu;
24764 +#else
24765 struct page *page;
24766 +#endif
24767
24768 spin_lock(&pgd_lock);
24769 +
24770 +#ifdef CONFIG_PAX_PER_CPU_PGD
24771 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24772 + pgd_t *pgd = get_cpu_pgd(cpu);
24773 + pmd_t *ret;
24774 +#else
24775 list_for_each_entry(page, &pgd_list, lru) {
24776 + pgd_t *pgd = page_address(page);
24777 spinlock_t *pgt_lock;
24778 pmd_t *ret;
24779
24780 @@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
24781 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24782
24783 spin_lock(pgt_lock);
24784 - ret = vmalloc_sync_one(page_address(page), address);
24785 +#endif
24786 +
24787 + ret = vmalloc_sync_one(pgd, address);
24788 +
24789 +#ifndef CONFIG_PAX_PER_CPU_PGD
24790 spin_unlock(pgt_lock);
24791 +#endif
24792
24793 if (!ret)
24794 break;
24795 @@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
24796 * an interrupt in the middle of a task switch..
24797 */
24798 pgd_paddr = read_cr3();
24799 +
24800 +#ifdef CONFIG_PAX_PER_CPU_PGD
24801 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24802 +#endif
24803 +
24804 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24805 if (!pmd_k)
24806 return -1;
24807 @@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
24808 * happen within a race in page table update. In the later
24809 * case just flush:
24810 */
24811 +
24812 +#ifdef CONFIG_PAX_PER_CPU_PGD
24813 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24814 + pgd = pgd_offset_cpu(smp_processor_id(), address);
24815 +#else
24816 pgd = pgd_offset(current->active_mm, address);
24817 +#endif
24818 +
24819 pgd_ref = pgd_offset_k(address);
24820 if (pgd_none(*pgd_ref))
24821 return -1;
24822 @@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24823 static int is_errata100(struct pt_regs *regs, unsigned long address)
24824 {
24825 #ifdef CONFIG_X86_64
24826 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24827 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24828 return 1;
24829 #endif
24830 return 0;
24831 @@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24832 }
24833
24834 static const char nx_warning[] = KERN_CRIT
24835 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24836 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24837
24838 static void
24839 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24840 @@ -577,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24841 if (!oops_may_print())
24842 return;
24843
24844 - if (error_code & PF_INSTR) {
24845 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
24846 unsigned int level;
24847
24848 pte_t *pte = lookup_address(address, &level);
24849
24850 if (pte && pte_present(*pte) && !pte_exec(*pte))
24851 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
24852 + printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
24853 }
24854
24855 +#ifdef CONFIG_PAX_KERNEXEC
24856 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24857 + if (current->signal->curr_ip)
24858 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24859 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24860 + else
24861 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24862 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24863 + }
24864 +#endif
24865 +
24866 printk(KERN_ALERT "BUG: unable to handle kernel ");
24867 if (address < PAGE_SIZE)
24868 printk(KERN_CONT "NULL pointer dereference");
24869 @@ -749,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24870 }
24871 #endif
24872
24873 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24874 + if (pax_is_fetch_fault(regs, error_code, address)) {
24875 +
24876 +#ifdef CONFIG_PAX_EMUTRAMP
24877 + switch (pax_handle_fetch_fault(regs)) {
24878 + case 2:
24879 + return;
24880 + }
24881 +#endif
24882 +
24883 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24884 + do_group_exit(SIGKILL);
24885 + }
24886 +#endif
24887 +
24888 if (unlikely(show_unhandled_signals))
24889 show_signal_msg(regs, error_code, address, tsk);
24890
24891 @@ -845,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24892 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
24893 printk(KERN_ERR
24894 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24895 - tsk->comm, tsk->pid, address);
24896 + tsk->comm, task_pid_nr(tsk), address);
24897 code = BUS_MCEERR_AR;
24898 }
24899 #endif
24900 @@ -901,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24901 return 1;
24902 }
24903
24904 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24905 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24906 +{
24907 + pte_t *pte;
24908 + pmd_t *pmd;
24909 + spinlock_t *ptl;
24910 + unsigned char pte_mask;
24911 +
24912 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24913 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24914 + return 0;
24915 +
24916 + /* PaX: it's our fault, let's handle it if we can */
24917 +
24918 + /* PaX: take a look at read faults before acquiring any locks */
24919 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24920 + /* instruction fetch attempt from a protected page in user mode */
24921 + up_read(&mm->mmap_sem);
24922 +
24923 +#ifdef CONFIG_PAX_EMUTRAMP
24924 + switch (pax_handle_fetch_fault(regs)) {
24925 + case 2:
24926 + return 1;
24927 + }
24928 +#endif
24929 +
24930 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24931 + do_group_exit(SIGKILL);
24932 + }
24933 +
24934 + pmd = pax_get_pmd(mm, address);
24935 + if (unlikely(!pmd))
24936 + return 0;
24937 +
24938 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24939 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24940 + pte_unmap_unlock(pte, ptl);
24941 + return 0;
24942 + }
24943 +
24944 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24945 + /* write attempt to a protected page in user mode */
24946 + pte_unmap_unlock(pte, ptl);
24947 + return 0;
24948 + }
24949 +
24950 +#ifdef CONFIG_SMP
24951 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24952 +#else
24953 + if (likely(address > get_limit(regs->cs)))
24954 +#endif
24955 + {
24956 + set_pte(pte, pte_mkread(*pte));
24957 + __flush_tlb_one(address);
24958 + pte_unmap_unlock(pte, ptl);
24959 + up_read(&mm->mmap_sem);
24960 + return 1;
24961 + }
24962 +
24963 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24964 +
24965 + /*
24966 + * PaX: fill DTLB with user rights and retry
24967 + */
24968 + __asm__ __volatile__ (
24969 + "orb %2,(%1)\n"
24970 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24971 +/*
24972 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24973 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24974 + * page fault when examined during a TLB load attempt. this is true not only
24975 + * for PTEs holding a non-present entry but also present entries that will
24976 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24977 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24978 + * for our target pages since their PTEs are simply not in the TLBs at all.
24979 +
24980 + * the best thing in omitting it is that we gain around 15-20% speed in the
24981 + * fast path of the page fault handler and can get rid of tracing since we
24982 + * can no longer flush unintended entries.
24983 + */
24984 + "invlpg (%0)\n"
24985 +#endif
24986 + __copyuser_seg"testb $0,(%0)\n"
24987 + "xorb %3,(%1)\n"
24988 + :
24989 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24990 + : "memory", "cc");
24991 + pte_unmap_unlock(pte, ptl);
24992 + up_read(&mm->mmap_sem);
24993 + return 1;
24994 +}
24995 +#endif
24996 +
24997 /*
24998 * Handle a spurious fault caused by a stale TLB entry.
24999 *
25000 @@ -973,6 +1162,9 @@ int show_unhandled_signals = 1;
25001 static inline int
25002 access_error(unsigned long error_code, struct vm_area_struct *vma)
25003 {
25004 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25005 + return 1;
25006 +
25007 if (error_code & PF_WRITE) {
25008 /* write, present and write, not present: */
25009 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25010 @@ -1001,7 +1193,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
25011 if (error_code & PF_USER)
25012 return false;
25013
25014 - if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
25015 + if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
25016 return false;
25017
25018 return true;
25019 @@ -1017,18 +1209,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25020 {
25021 struct vm_area_struct *vma;
25022 struct task_struct *tsk;
25023 - unsigned long address;
25024 struct mm_struct *mm;
25025 int fault;
25026 int write = error_code & PF_WRITE;
25027 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
25028 (write ? FAULT_FLAG_WRITE : 0);
25029
25030 - tsk = current;
25031 - mm = tsk->mm;
25032 -
25033 /* Get the faulting address: */
25034 - address = read_cr2();
25035 + unsigned long address = read_cr2();
25036 +
25037 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25038 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25039 + if (!search_exception_tables(regs->ip)) {
25040 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25041 + bad_area_nosemaphore(regs, error_code, address);
25042 + return;
25043 + }
25044 + if (address < PAX_USER_SHADOW_BASE) {
25045 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25046 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
25047 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25048 + } else
25049 + address -= PAX_USER_SHADOW_BASE;
25050 + }
25051 +#endif
25052 +
25053 + tsk = current;
25054 + mm = tsk->mm;
25055
25056 /*
25057 * Detect and handle instructions that would cause a page fault for
25058 @@ -1089,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25059 * User-mode registers count as a user access even for any
25060 * potential system fault or CPU buglet:
25061 */
25062 - if (user_mode_vm(regs)) {
25063 + if (user_mode(regs)) {
25064 local_irq_enable();
25065 error_code |= PF_USER;
25066 } else {
25067 @@ -1151,6 +1358,11 @@ retry:
25068 might_sleep();
25069 }
25070
25071 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25072 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25073 + return;
25074 +#endif
25075 +
25076 vma = find_vma(mm, address);
25077 if (unlikely(!vma)) {
25078 bad_area(regs, error_code, address);
25079 @@ -1162,18 +1374,24 @@ retry:
25080 bad_area(regs, error_code, address);
25081 return;
25082 }
25083 - if (error_code & PF_USER) {
25084 - /*
25085 - * Accessing the stack below %sp is always a bug.
25086 - * The large cushion allows instructions like enter
25087 - * and pusha to work. ("enter $65535, $31" pushes
25088 - * 32 pointers and then decrements %sp by 65535.)
25089 - */
25090 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25091 - bad_area(regs, error_code, address);
25092 - return;
25093 - }
25094 + /*
25095 + * Accessing the stack below %sp is always a bug.
25096 + * The large cushion allows instructions like enter
25097 + * and pusha to work. ("enter $65535, $31" pushes
25098 + * 32 pointers and then decrements %sp by 65535.)
25099 + */
25100 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25101 + bad_area(regs, error_code, address);
25102 + return;
25103 }
25104 +
25105 +#ifdef CONFIG_PAX_SEGMEXEC
25106 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25107 + bad_area(regs, error_code, address);
25108 + return;
25109 + }
25110 +#endif
25111 +
25112 if (unlikely(expand_stack(vma, address))) {
25113 bad_area(regs, error_code, address);
25114 return;
25115 @@ -1237,3 +1455,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25116 __do_page_fault(regs, error_code);
25117 exception_exit(regs);
25118 }
25119 +
25120 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25121 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25122 +{
25123 + struct mm_struct *mm = current->mm;
25124 + unsigned long ip = regs->ip;
25125 +
25126 + if (v8086_mode(regs))
25127 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25128 +
25129 +#ifdef CONFIG_PAX_PAGEEXEC
25130 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25131 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25132 + return true;
25133 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25134 + return true;
25135 + return false;
25136 + }
25137 +#endif
25138 +
25139 +#ifdef CONFIG_PAX_SEGMEXEC
25140 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25141 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25142 + return true;
25143 + return false;
25144 + }
25145 +#endif
25146 +
25147 + return false;
25148 +}
25149 +#endif
25150 +
25151 +#ifdef CONFIG_PAX_EMUTRAMP
25152 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25153 +{
25154 + int err;
25155 +
25156 + do { /* PaX: libffi trampoline emulation */
25157 + unsigned char mov, jmp;
25158 + unsigned int addr1, addr2;
25159 +
25160 +#ifdef CONFIG_X86_64
25161 + if ((regs->ip + 9) >> 32)
25162 + break;
25163 +#endif
25164 +
25165 + err = get_user(mov, (unsigned char __user *)regs->ip);
25166 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25167 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25168 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25169 +
25170 + if (err)
25171 + break;
25172 +
25173 + if (mov == 0xB8 && jmp == 0xE9) {
25174 + regs->ax = addr1;
25175 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25176 + return 2;
25177 + }
25178 + } while (0);
25179 +
25180 + do { /* PaX: gcc trampoline emulation #1 */
25181 + unsigned char mov1, mov2;
25182 + unsigned short jmp;
25183 + unsigned int addr1, addr2;
25184 +
25185 +#ifdef CONFIG_X86_64
25186 + if ((regs->ip + 11) >> 32)
25187 + break;
25188 +#endif
25189 +
25190 + err = get_user(mov1, (unsigned char __user *)regs->ip);
25191 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25192 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25193 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25194 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25195 +
25196 + if (err)
25197 + break;
25198 +
25199 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25200 + regs->cx = addr1;
25201 + regs->ax = addr2;
25202 + regs->ip = addr2;
25203 + return 2;
25204 + }
25205 + } while (0);
25206 +
25207 + do { /* PaX: gcc trampoline emulation #2 */
25208 + unsigned char mov, jmp;
25209 + unsigned int addr1, addr2;
25210 +
25211 +#ifdef CONFIG_X86_64
25212 + if ((regs->ip + 9) >> 32)
25213 + break;
25214 +#endif
25215 +
25216 + err = get_user(mov, (unsigned char __user *)regs->ip);
25217 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25218 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25219 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25220 +
25221 + if (err)
25222 + break;
25223 +
25224 + if (mov == 0xB9 && jmp == 0xE9) {
25225 + regs->cx = addr1;
25226 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25227 + return 2;
25228 + }
25229 + } while (0);
25230 +
25231 + return 1; /* PaX in action */
25232 +}
25233 +
25234 +#ifdef CONFIG_X86_64
25235 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25236 +{
25237 + int err;
25238 +
25239 + do { /* PaX: libffi trampoline emulation */
25240 + unsigned short mov1, mov2, jmp1;
25241 + unsigned char stcclc, jmp2;
25242 + unsigned long addr1, addr2;
25243 +
25244 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25245 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25246 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25247 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25248 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25249 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25250 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25251 +
25252 + if (err)
25253 + break;
25254 +
25255 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25256 + regs->r11 = addr1;
25257 + regs->r10 = addr2;
25258 + if (stcclc == 0xF8)
25259 + regs->flags &= ~X86_EFLAGS_CF;
25260 + else
25261 + regs->flags |= X86_EFLAGS_CF;
25262 + regs->ip = addr1;
25263 + return 2;
25264 + }
25265 + } while (0);
25266 +
25267 + do { /* PaX: gcc trampoline emulation #1 */
25268 + unsigned short mov1, mov2, jmp1;
25269 + unsigned char jmp2;
25270 + unsigned int addr1;
25271 + unsigned long addr2;
25272 +
25273 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25274 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25275 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25276 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25277 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25278 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25279 +
25280 + if (err)
25281 + break;
25282 +
25283 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25284 + regs->r11 = addr1;
25285 + regs->r10 = addr2;
25286 + regs->ip = addr1;
25287 + return 2;
25288 + }
25289 + } while (0);
25290 +
25291 + do { /* PaX: gcc trampoline emulation #2 */
25292 + unsigned short mov1, mov2, jmp1;
25293 + unsigned char jmp2;
25294 + unsigned long addr1, addr2;
25295 +
25296 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25297 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25298 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25299 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25300 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25301 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25302 +
25303 + if (err)
25304 + break;
25305 +
25306 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25307 + regs->r11 = addr1;
25308 + regs->r10 = addr2;
25309 + regs->ip = addr1;
25310 + return 2;
25311 + }
25312 + } while (0);
25313 +
25314 + return 1; /* PaX in action */
25315 +}
25316 +#endif
25317 +
25318 +/*
25319 + * PaX: decide what to do with offenders (regs->ip = fault address)
25320 + *
25321 + * returns 1 when task should be killed
25322 + * 2 when gcc trampoline was detected
25323 + */
25324 +static int pax_handle_fetch_fault(struct pt_regs *regs)
25325 +{
25326 + if (v8086_mode(regs))
25327 + return 1;
25328 +
25329 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25330 + return 1;
25331 +
25332 +#ifdef CONFIG_X86_32
25333 + return pax_handle_fetch_fault_32(regs);
25334 +#else
25335 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25336 + return pax_handle_fetch_fault_32(regs);
25337 + else
25338 + return pax_handle_fetch_fault_64(regs);
25339 +#endif
25340 +}
25341 +#endif
25342 +
25343 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25344 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25345 +{
25346 + long i;
25347 +
25348 + printk(KERN_ERR "PAX: bytes at PC: ");
25349 + for (i = 0; i < 20; i++) {
25350 + unsigned char c;
25351 + if (get_user(c, (unsigned char __force_user *)pc+i))
25352 + printk(KERN_CONT "?? ");
25353 + else
25354 + printk(KERN_CONT "%02x ", c);
25355 + }
25356 + printk("\n");
25357 +
25358 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
25359 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
25360 + unsigned long c;
25361 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
25362 +#ifdef CONFIG_X86_32
25363 + printk(KERN_CONT "???????? ");
25364 +#else
25365 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
25366 + printk(KERN_CONT "???????? ???????? ");
25367 + else
25368 + printk(KERN_CONT "???????????????? ");
25369 +#endif
25370 + } else {
25371 +#ifdef CONFIG_X86_64
25372 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
25373 + printk(KERN_CONT "%08x ", (unsigned int)c);
25374 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
25375 + } else
25376 +#endif
25377 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
25378 + }
25379 + }
25380 + printk("\n");
25381 +}
25382 +#endif
25383 +
25384 +/**
25385 + * probe_kernel_write(): safely attempt to write to a location
25386 + * @dst: address to write to
25387 + * @src: pointer to the data that shall be written
25388 + * @size: size of the data chunk
25389 + *
25390 + * Safely write to address @dst from the buffer at @src. If a kernel fault
25391 + * happens, handle that and return -EFAULT.
25392 + */
25393 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
25394 +{
25395 + long ret;
25396 + mm_segment_t old_fs = get_fs();
25397 +
25398 + set_fs(KERNEL_DS);
25399 + pagefault_disable();
25400 + pax_open_kernel();
25401 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
25402 + pax_close_kernel();
25403 + pagefault_enable();
25404 + set_fs(old_fs);
25405 +
25406 + return ret ? -EFAULT : 0;
25407 +}
25408 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
25409 index dd74e46..7d26398 100644
25410 --- a/arch/x86/mm/gup.c
25411 +++ b/arch/x86/mm/gup.c
25412 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
25413 addr = start;
25414 len = (unsigned long) nr_pages << PAGE_SHIFT;
25415 end = start + len;
25416 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25417 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25418 (void __user *)start, len)))
25419 return 0;
25420
25421 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
25422 index 6f31ee5..8ee4164 100644
25423 --- a/arch/x86/mm/highmem_32.c
25424 +++ b/arch/x86/mm/highmem_32.c
25425 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
25426 idx = type + KM_TYPE_NR*smp_processor_id();
25427 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25428 BUG_ON(!pte_none(*(kmap_pte-idx)));
25429 +
25430 + pax_open_kernel();
25431 set_pte(kmap_pte-idx, mk_pte(page, prot));
25432 + pax_close_kernel();
25433 +
25434 arch_flush_lazy_mmu_mode();
25435
25436 return (void *)vaddr;
25437 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
25438 index 937bff5..d777418 100644
25439 --- a/arch/x86/mm/hugetlbpage.c
25440 +++ b/arch/x86/mm/hugetlbpage.c
25441 @@ -276,13 +276,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
25442 struct hstate *h = hstate_file(file);
25443 struct mm_struct *mm = current->mm;
25444 struct vm_area_struct *vma;
25445 - unsigned long start_addr;
25446 + unsigned long start_addr, pax_task_size = TASK_SIZE;
25447 +
25448 +#ifdef CONFIG_PAX_SEGMEXEC
25449 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25450 + pax_task_size = SEGMEXEC_TASK_SIZE;
25451 +#endif
25452 +
25453 + pax_task_size -= PAGE_SIZE;
25454
25455 if (len > mm->cached_hole_size) {
25456 - start_addr = mm->free_area_cache;
25457 + start_addr = mm->free_area_cache;
25458 } else {
25459 - start_addr = TASK_UNMAPPED_BASE;
25460 - mm->cached_hole_size = 0;
25461 + start_addr = mm->mmap_base;
25462 + mm->cached_hole_size = 0;
25463 }
25464
25465 full_search:
25466 @@ -290,26 +297,27 @@ full_search:
25467
25468 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
25469 /* At this point: (!vma || addr < vma->vm_end). */
25470 - if (TASK_SIZE - len < addr) {
25471 + if (pax_task_size - len < addr) {
25472 /*
25473 * Start a new search - just in case we missed
25474 * some holes.
25475 */
25476 - if (start_addr != TASK_UNMAPPED_BASE) {
25477 - start_addr = TASK_UNMAPPED_BASE;
25478 + if (start_addr != mm->mmap_base) {
25479 + start_addr = mm->mmap_base;
25480 mm->cached_hole_size = 0;
25481 goto full_search;
25482 }
25483 return -ENOMEM;
25484 }
25485 - if (!vma || addr + len <= vma->vm_start) {
25486 - mm->free_area_cache = addr + len;
25487 - return addr;
25488 - }
25489 + if (check_heap_stack_gap(vma, addr, len))
25490 + break;
25491 if (addr + mm->cached_hole_size < vma->vm_start)
25492 mm->cached_hole_size = vma->vm_start - addr;
25493 addr = ALIGN(vma->vm_end, huge_page_size(h));
25494 }
25495 +
25496 + mm->free_area_cache = addr + len;
25497 + return addr;
25498 }
25499
25500 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25501 @@ -320,9 +328,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25502 struct mm_struct *mm = current->mm;
25503 struct vm_area_struct *vma;
25504 unsigned long base = mm->mmap_base;
25505 - unsigned long addr = addr0;
25506 + unsigned long addr;
25507 unsigned long largest_hole = mm->cached_hole_size;
25508 - unsigned long start_addr;
25509
25510 /* don't allow allocations above current base */
25511 if (mm->free_area_cache > base)
25512 @@ -332,16 +339,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25513 largest_hole = 0;
25514 mm->free_area_cache = base;
25515 }
25516 -try_again:
25517 - start_addr = mm->free_area_cache;
25518
25519 /* make sure it can fit in the remaining address space */
25520 if (mm->free_area_cache < len)
25521 goto fail;
25522
25523 /* either no address requested or can't fit in requested address hole */
25524 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
25525 + addr = mm->free_area_cache - len;
25526 do {
25527 + addr &= huge_page_mask(h);
25528 /*
25529 * Lookup failure means no vma is above this address,
25530 * i.e. return with success:
25531 @@ -350,10 +356,10 @@ try_again:
25532 if (!vma)
25533 return addr;
25534
25535 - if (addr + len <= vma->vm_start) {
25536 + if (check_heap_stack_gap(vma, addr, len)) {
25537 /* remember the address as a hint for next time */
25538 - mm->cached_hole_size = largest_hole;
25539 - return (mm->free_area_cache = addr);
25540 + mm->cached_hole_size = largest_hole;
25541 + return (mm->free_area_cache = addr);
25542 } else if (mm->free_area_cache == vma->vm_end) {
25543 /* pull free_area_cache down to the first hole */
25544 mm->free_area_cache = vma->vm_start;
25545 @@ -362,29 +368,34 @@ try_again:
25546
25547 /* remember the largest hole we saw so far */
25548 if (addr + largest_hole < vma->vm_start)
25549 - largest_hole = vma->vm_start - addr;
25550 + largest_hole = vma->vm_start - addr;
25551
25552 /* try just below the current vma->vm_start */
25553 - addr = (vma->vm_start - len) & huge_page_mask(h);
25554 - } while (len <= vma->vm_start);
25555 + addr = skip_heap_stack_gap(vma, len);
25556 + } while (!IS_ERR_VALUE(addr));
25557
25558 fail:
25559 /*
25560 - * if hint left us with no space for the requested
25561 - * mapping then try again:
25562 - */
25563 - if (start_addr != base) {
25564 - mm->free_area_cache = base;
25565 - largest_hole = 0;
25566 - goto try_again;
25567 - }
25568 - /*
25569 * A failed mmap() very likely causes application failure,
25570 * so fall back to the bottom-up function here. This scenario
25571 * can happen with large stack limits and large mmap()
25572 * allocations.
25573 */
25574 - mm->free_area_cache = TASK_UNMAPPED_BASE;
25575 +
25576 +#ifdef CONFIG_PAX_SEGMEXEC
25577 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25578 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25579 + else
25580 +#endif
25581 +
25582 + mm->mmap_base = TASK_UNMAPPED_BASE;
25583 +
25584 +#ifdef CONFIG_PAX_RANDMMAP
25585 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25586 + mm->mmap_base += mm->delta_mmap;
25587 +#endif
25588 +
25589 + mm->free_area_cache = mm->mmap_base;
25590 mm->cached_hole_size = ~0UL;
25591 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25592 len, pgoff, flags);
25593 @@ -392,6 +403,7 @@ fail:
25594 /*
25595 * Restore the topdown base:
25596 */
25597 + mm->mmap_base = base;
25598 mm->free_area_cache = base;
25599 mm->cached_hole_size = ~0UL;
25600
25601 @@ -405,10 +417,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25602 struct hstate *h = hstate_file(file);
25603 struct mm_struct *mm = current->mm;
25604 struct vm_area_struct *vma;
25605 + unsigned long pax_task_size = TASK_SIZE;
25606
25607 if (len & ~huge_page_mask(h))
25608 return -EINVAL;
25609 - if (len > TASK_SIZE)
25610 +
25611 +#ifdef CONFIG_PAX_SEGMEXEC
25612 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25613 + pax_task_size = SEGMEXEC_TASK_SIZE;
25614 +#endif
25615 +
25616 + pax_task_size -= PAGE_SIZE;
25617 +
25618 + if (len > pax_task_size)
25619 return -ENOMEM;
25620
25621 if (flags & MAP_FIXED) {
25622 @@ -420,8 +441,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25623 if (addr) {
25624 addr = ALIGN(addr, huge_page_size(h));
25625 vma = find_vma(mm, addr);
25626 - if (TASK_SIZE - len >= addr &&
25627 - (!vma || addr + len <= vma->vm_start))
25628 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25629 return addr;
25630 }
25631 if (mm->get_unmapped_area == arch_get_unmapped_area)
25632 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25633 index d7aea41..0fc945b 100644
25634 --- a/arch/x86/mm/init.c
25635 +++ b/arch/x86/mm/init.c
25636 @@ -4,6 +4,7 @@
25637 #include <linux/swap.h>
25638 #include <linux/memblock.h>
25639 #include <linux/bootmem.h> /* for max_low_pfn */
25640 +#include <linux/tboot.h>
25641
25642 #include <asm/cacheflush.h>
25643 #include <asm/e820.h>
25644 @@ -16,6 +17,8 @@
25645 #include <asm/tlb.h>
25646 #include <asm/proto.h>
25647 #include <asm/dma.h> /* for MAX_DMA_PFN */
25648 +#include <asm/desc.h>
25649 +#include <asm/bios_ebda.h>
25650
25651 unsigned long __initdata pgt_buf_start;
25652 unsigned long __meminitdata pgt_buf_end;
25653 @@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
25654 {
25655 int i;
25656 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
25657 - unsigned long start = 0, good_end;
25658 + unsigned long start = 0x100000, good_end;
25659 phys_addr_t base;
25660
25661 for (i = 0; i < nr_range; i++) {
25662 @@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25663 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25664 * mmio resources as well as potential bios/acpi data regions.
25665 */
25666 +
25667 +#ifdef CONFIG_GRKERNSEC_KMEM
25668 +static unsigned int ebda_start __read_only;
25669 +static unsigned int ebda_end __read_only;
25670 +#endif
25671 +
25672 int devmem_is_allowed(unsigned long pagenr)
25673 {
25674 - if (pagenr < 256)
25675 +#ifdef CONFIG_GRKERNSEC_KMEM
25676 + /* allow BDA */
25677 + if (!pagenr)
25678 return 1;
25679 + /* allow EBDA */
25680 + if (pagenr >= ebda_start && pagenr < ebda_end)
25681 + return 1;
25682 + /* if tboot is in use, allow access to its hardcoded serial log range */
25683 + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
25684 + return 1;
25685 +#else
25686 + if (!pagenr)
25687 + return 1;
25688 +#ifdef CONFIG_VM86
25689 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
25690 + return 1;
25691 +#endif
25692 +#endif
25693 +
25694 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25695 + return 1;
25696 +#ifdef CONFIG_GRKERNSEC_KMEM
25697 + /* throw out everything else below 1MB */
25698 + if (pagenr <= 256)
25699 + return 0;
25700 +#endif
25701 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25702 return 0;
25703 if (!page_is_ram(pagenr))
25704 @@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25705 #endif
25706 }
25707
25708 +#ifdef CONFIG_GRKERNSEC_KMEM
25709 +static inline void gr_init_ebda(void)
25710 +{
25711 + unsigned int ebda_addr;
25712 + unsigned int ebda_size = 0;
25713 +
25714 + ebda_addr = get_bios_ebda();
25715 + if (ebda_addr) {
25716 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
25717 + ebda_size <<= 10;
25718 + }
25719 + if (ebda_addr && ebda_size) {
25720 + ebda_start = ebda_addr >> PAGE_SHIFT;
25721 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
25722 + } else {
25723 + ebda_start = 0x9f000 >> PAGE_SHIFT;
25724 + ebda_end = 0xa0000 >> PAGE_SHIFT;
25725 + }
25726 +}
25727 +#else
25728 +static inline void gr_init_ebda(void) { }
25729 +#endif
25730 +
25731 void free_initmem(void)
25732 {
25733 +#ifdef CONFIG_PAX_KERNEXEC
25734 +#ifdef CONFIG_X86_32
25735 + /* PaX: limit KERNEL_CS to actual size */
25736 + unsigned long addr, limit;
25737 + struct desc_struct d;
25738 + int cpu;
25739 +#else
25740 + pgd_t *pgd;
25741 + pud_t *pud;
25742 + pmd_t *pmd;
25743 + unsigned long addr, end;
25744 +#endif
25745 +#endif
25746 +
25747 + gr_init_ebda();
25748 +
25749 +#ifdef CONFIG_PAX_KERNEXEC
25750 +#ifdef CONFIG_X86_32
25751 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25752 + limit = (limit - 1UL) >> PAGE_SHIFT;
25753 +
25754 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25755 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25756 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25757 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25758 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
25759 + }
25760 +
25761 + /* PaX: make KERNEL_CS read-only */
25762 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25763 + if (!paravirt_enabled())
25764 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25765 +/*
25766 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25767 + pgd = pgd_offset_k(addr);
25768 + pud = pud_offset(pgd, addr);
25769 + pmd = pmd_offset(pud, addr);
25770 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25771 + }
25772 +*/
25773 +#ifdef CONFIG_X86_PAE
25774 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25775 +/*
25776 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25777 + pgd = pgd_offset_k(addr);
25778 + pud = pud_offset(pgd, addr);
25779 + pmd = pmd_offset(pud, addr);
25780 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25781 + }
25782 +*/
25783 +#endif
25784 +
25785 +#ifdef CONFIG_MODULES
25786 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25787 +#endif
25788 +
25789 +#else
25790 + /* PaX: make kernel code/rodata read-only, rest non-executable */
25791 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25792 + pgd = pgd_offset_k(addr);
25793 + pud = pud_offset(pgd, addr);
25794 + pmd = pmd_offset(pud, addr);
25795 + if (!pmd_present(*pmd))
25796 + continue;
25797 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25798 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25799 + else
25800 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25801 + }
25802 +
25803 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25804 + end = addr + KERNEL_IMAGE_SIZE;
25805 + for (; addr < end; addr += PMD_SIZE) {
25806 + pgd = pgd_offset_k(addr);
25807 + pud = pud_offset(pgd, addr);
25808 + pmd = pmd_offset(pud, addr);
25809 + if (!pmd_present(*pmd))
25810 + continue;
25811 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25812 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25813 + }
25814 +#endif
25815 +
25816 + flush_tlb_all();
25817 +#endif
25818 +
25819 free_init_pages("unused kernel memory",
25820 (unsigned long)(&__init_begin),
25821 (unsigned long)(&__init_end));
25822 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25823 index 11a5800..4bd9977 100644
25824 --- a/arch/x86/mm/init_32.c
25825 +++ b/arch/x86/mm/init_32.c
25826 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
25827 }
25828
25829 /*
25830 - * Creates a middle page table and puts a pointer to it in the
25831 - * given global directory entry. This only returns the gd entry
25832 - * in non-PAE compilation mode, since the middle layer is folded.
25833 - */
25834 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
25835 -{
25836 - pud_t *pud;
25837 - pmd_t *pmd_table;
25838 -
25839 -#ifdef CONFIG_X86_PAE
25840 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25841 - if (after_bootmem)
25842 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25843 - else
25844 - pmd_table = (pmd_t *)alloc_low_page();
25845 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25846 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25847 - pud = pud_offset(pgd, 0);
25848 - BUG_ON(pmd_table != pmd_offset(pud, 0));
25849 -
25850 - return pmd_table;
25851 - }
25852 -#endif
25853 - pud = pud_offset(pgd, 0);
25854 - pmd_table = pmd_offset(pud, 0);
25855 -
25856 - return pmd_table;
25857 -}
25858 -
25859 -/*
25860 * Create a page table and place a pointer to it in a middle page
25861 * directory entry:
25862 */
25863 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25864 page_table = (pte_t *)alloc_low_page();
25865
25866 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25867 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25868 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25869 +#else
25870 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25871 +#endif
25872 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25873 }
25874
25875 return pte_offset_kernel(pmd, 0);
25876 }
25877
25878 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25879 +{
25880 + pud_t *pud;
25881 + pmd_t *pmd_table;
25882 +
25883 + pud = pud_offset(pgd, 0);
25884 + pmd_table = pmd_offset(pud, 0);
25885 +
25886 + return pmd_table;
25887 +}
25888 +
25889 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25890 {
25891 int pgd_idx = pgd_index(vaddr);
25892 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25893 int pgd_idx, pmd_idx;
25894 unsigned long vaddr;
25895 pgd_t *pgd;
25896 + pud_t *pud;
25897 pmd_t *pmd;
25898 pte_t *pte = NULL;
25899
25900 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25901 pgd = pgd_base + pgd_idx;
25902
25903 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25904 - pmd = one_md_table_init(pgd);
25905 - pmd = pmd + pmd_index(vaddr);
25906 + pud = pud_offset(pgd, vaddr);
25907 + pmd = pmd_offset(pud, vaddr);
25908 +
25909 +#ifdef CONFIG_X86_PAE
25910 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25911 +#endif
25912 +
25913 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25914 pmd++, pmd_idx++) {
25915 pte = page_table_kmap_check(one_page_table_init(pmd),
25916 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25917 }
25918 }
25919
25920 -static inline int is_kernel_text(unsigned long addr)
25921 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25922 {
25923 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
25924 - return 1;
25925 - return 0;
25926 + if ((start > ktla_ktva((unsigned long)_etext) ||
25927 + end <= ktla_ktva((unsigned long)_stext)) &&
25928 + (start > ktla_ktva((unsigned long)_einittext) ||
25929 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25930 +
25931 +#ifdef CONFIG_ACPI_SLEEP
25932 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25933 +#endif
25934 +
25935 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25936 + return 0;
25937 + return 1;
25938 }
25939
25940 /*
25941 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
25942 unsigned long last_map_addr = end;
25943 unsigned long start_pfn, end_pfn;
25944 pgd_t *pgd_base = swapper_pg_dir;
25945 - int pgd_idx, pmd_idx, pte_ofs;
25946 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25947 unsigned long pfn;
25948 pgd_t *pgd;
25949 + pud_t *pud;
25950 pmd_t *pmd;
25951 pte_t *pte;
25952 unsigned pages_2m, pages_4k;
25953 @@ -280,8 +281,13 @@ repeat:
25954 pfn = start_pfn;
25955 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25956 pgd = pgd_base + pgd_idx;
25957 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25958 - pmd = one_md_table_init(pgd);
25959 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25960 + pud = pud_offset(pgd, 0);
25961 + pmd = pmd_offset(pud, 0);
25962 +
25963 +#ifdef CONFIG_X86_PAE
25964 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25965 +#endif
25966
25967 if (pfn >= end_pfn)
25968 continue;
25969 @@ -293,14 +299,13 @@ repeat:
25970 #endif
25971 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25972 pmd++, pmd_idx++) {
25973 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25974 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25975
25976 /*
25977 * Map with big pages if possible, otherwise
25978 * create normal page tables:
25979 */
25980 if (use_pse) {
25981 - unsigned int addr2;
25982 pgprot_t prot = PAGE_KERNEL_LARGE;
25983 /*
25984 * first pass will use the same initial
25985 @@ -310,11 +315,7 @@ repeat:
25986 __pgprot(PTE_IDENT_ATTR |
25987 _PAGE_PSE);
25988
25989 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25990 - PAGE_OFFSET + PAGE_SIZE-1;
25991 -
25992 - if (is_kernel_text(addr) ||
25993 - is_kernel_text(addr2))
25994 + if (is_kernel_text(address, address + PMD_SIZE))
25995 prot = PAGE_KERNEL_LARGE_EXEC;
25996
25997 pages_2m++;
25998 @@ -331,7 +332,7 @@ repeat:
25999 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26000 pte += pte_ofs;
26001 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26002 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26003 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26004 pgprot_t prot = PAGE_KERNEL;
26005 /*
26006 * first pass will use the same initial
26007 @@ -339,7 +340,7 @@ repeat:
26008 */
26009 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26010
26011 - if (is_kernel_text(addr))
26012 + if (is_kernel_text(address, address + PAGE_SIZE))
26013 prot = PAGE_KERNEL_EXEC;
26014
26015 pages_4k++;
26016 @@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
26017
26018 pud = pud_offset(pgd, va);
26019 pmd = pmd_offset(pud, va);
26020 - if (!pmd_present(*pmd))
26021 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
26022 break;
26023
26024 pte = pte_offset_kernel(pmd, va);
26025 @@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
26026
26027 static void __init pagetable_init(void)
26028 {
26029 - pgd_t *pgd_base = swapper_pg_dir;
26030 -
26031 - permanent_kmaps_init(pgd_base);
26032 + permanent_kmaps_init(swapper_pg_dir);
26033 }
26034
26035 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26036 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26037 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26038
26039 /* user-defined highmem size */
26040 @@ -731,6 +730,12 @@ void __init mem_init(void)
26041
26042 pci_iommu_alloc();
26043
26044 +#ifdef CONFIG_PAX_PER_CPU_PGD
26045 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26046 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26047 + KERNEL_PGD_PTRS);
26048 +#endif
26049 +
26050 #ifdef CONFIG_FLATMEM
26051 BUG_ON(!mem_map);
26052 #endif
26053 @@ -757,7 +762,7 @@ void __init mem_init(void)
26054 reservedpages++;
26055
26056 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26057 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26058 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26059 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26060
26061 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26062 @@ -798,10 +803,10 @@ void __init mem_init(void)
26063 ((unsigned long)&__init_end -
26064 (unsigned long)&__init_begin) >> 10,
26065
26066 - (unsigned long)&_etext, (unsigned long)&_edata,
26067 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26068 + (unsigned long)&_sdata, (unsigned long)&_edata,
26069 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26070
26071 - (unsigned long)&_text, (unsigned long)&_etext,
26072 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26073 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26074
26075 /*
26076 @@ -879,6 +884,7 @@ void set_kernel_text_rw(void)
26077 if (!kernel_set_to_readonly)
26078 return;
26079
26080 + start = ktla_ktva(start);
26081 pr_debug("Set kernel text: %lx - %lx for read write\n",
26082 start, start+size);
26083
26084 @@ -893,6 +899,7 @@ void set_kernel_text_ro(void)
26085 if (!kernel_set_to_readonly)
26086 return;
26087
26088 + start = ktla_ktva(start);
26089 pr_debug("Set kernel text: %lx - %lx for read only\n",
26090 start, start+size);
26091
26092 @@ -921,6 +928,7 @@ void mark_rodata_ro(void)
26093 unsigned long start = PFN_ALIGN(_text);
26094 unsigned long size = PFN_ALIGN(_etext) - start;
26095
26096 + start = ktla_ktva(start);
26097 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26098 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26099 size >> 10);
26100 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26101 index 3baff25..8b37564 100644
26102 --- a/arch/x86/mm/init_64.c
26103 +++ b/arch/x86/mm/init_64.c
26104 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
26105 * around without checking the pgd every time.
26106 */
26107
26108 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
26109 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
26110 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26111
26112 int force_personality32;
26113 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26114
26115 for (address = start; address <= end; address += PGDIR_SIZE) {
26116 const pgd_t *pgd_ref = pgd_offset_k(address);
26117 +
26118 +#ifdef CONFIG_PAX_PER_CPU_PGD
26119 + unsigned long cpu;
26120 +#else
26121 struct page *page;
26122 +#endif
26123
26124 if (pgd_none(*pgd_ref))
26125 continue;
26126
26127 spin_lock(&pgd_lock);
26128 +
26129 +#ifdef CONFIG_PAX_PER_CPU_PGD
26130 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26131 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
26132 +#else
26133 list_for_each_entry(page, &pgd_list, lru) {
26134 pgd_t *pgd;
26135 spinlock_t *pgt_lock;
26136 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26137 /* the pgt_lock only for Xen */
26138 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26139 spin_lock(pgt_lock);
26140 +#endif
26141
26142 if (pgd_none(*pgd))
26143 set_pgd(pgd, *pgd_ref);
26144 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26145 BUG_ON(pgd_page_vaddr(*pgd)
26146 != pgd_page_vaddr(*pgd_ref));
26147
26148 +#ifndef CONFIG_PAX_PER_CPU_PGD
26149 spin_unlock(pgt_lock);
26150 +#endif
26151 +
26152 }
26153 spin_unlock(&pgd_lock);
26154 }
26155 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
26156 {
26157 if (pgd_none(*pgd)) {
26158 pud_t *pud = (pud_t *)spp_getpage();
26159 - pgd_populate(&init_mm, pgd, pud);
26160 + pgd_populate_kernel(&init_mm, pgd, pud);
26161 if (pud != pud_offset(pgd, 0))
26162 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
26163 pud, pud_offset(pgd, 0));
26164 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
26165 {
26166 if (pud_none(*pud)) {
26167 pmd_t *pmd = (pmd_t *) spp_getpage();
26168 - pud_populate(&init_mm, pud, pmd);
26169 + pud_populate_kernel(&init_mm, pud, pmd);
26170 if (pmd != pmd_offset(pud, 0))
26171 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
26172 pmd, pmd_offset(pud, 0));
26173 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26174 pmd = fill_pmd(pud, vaddr);
26175 pte = fill_pte(pmd, vaddr);
26176
26177 + pax_open_kernel();
26178 set_pte(pte, new_pte);
26179 + pax_close_kernel();
26180
26181 /*
26182 * It's enough to flush this one mapping.
26183 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26184 pgd = pgd_offset_k((unsigned long)__va(phys));
26185 if (pgd_none(*pgd)) {
26186 pud = (pud_t *) spp_getpage();
26187 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26188 - _PAGE_USER));
26189 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26190 }
26191 pud = pud_offset(pgd, (unsigned long)__va(phys));
26192 if (pud_none(*pud)) {
26193 pmd = (pmd_t *) spp_getpage();
26194 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26195 - _PAGE_USER));
26196 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26197 }
26198 pmd = pmd_offset(pud, phys);
26199 BUG_ON(!pmd_none(*pmd));
26200 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
26201 if (pfn >= pgt_buf_top)
26202 panic("alloc_low_page: ran out of memory");
26203
26204 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26205 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26206 clear_page(adr);
26207 *phys = pfn * PAGE_SIZE;
26208 return adr;
26209 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
26210
26211 phys = __pa(virt);
26212 left = phys & (PAGE_SIZE - 1);
26213 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26214 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26215 adr = (void *)(((unsigned long)adr) | left);
26216
26217 return adr;
26218 @@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
26219 unmap_low_page(pmd);
26220
26221 spin_lock(&init_mm.page_table_lock);
26222 - pud_populate(&init_mm, pud, __va(pmd_phys));
26223 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
26224 spin_unlock(&init_mm.page_table_lock);
26225 }
26226 __flush_tlb_all();
26227 @@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
26228 unmap_low_page(pud);
26229
26230 spin_lock(&init_mm.page_table_lock);
26231 - pgd_populate(&init_mm, pgd, __va(pud_phys));
26232 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
26233 spin_unlock(&init_mm.page_table_lock);
26234 pgd_changed = true;
26235 }
26236 @@ -691,6 +705,12 @@ void __init mem_init(void)
26237
26238 pci_iommu_alloc();
26239
26240 +#ifdef CONFIG_PAX_PER_CPU_PGD
26241 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26242 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26243 + KERNEL_PGD_PTRS);
26244 +#endif
26245 +
26246 /* clear_bss() already clear the empty_zero_page */
26247
26248 reservedpages = 0;
26249 @@ -851,8 +871,8 @@ int kern_addr_valid(unsigned long addr)
26250 static struct vm_area_struct gate_vma = {
26251 .vm_start = VSYSCALL_START,
26252 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26253 - .vm_page_prot = PAGE_READONLY_EXEC,
26254 - .vm_flags = VM_READ | VM_EXEC
26255 + .vm_page_prot = PAGE_READONLY,
26256 + .vm_flags = VM_READ
26257 };
26258
26259 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26260 @@ -886,7 +906,7 @@ int in_gate_area_no_mm(unsigned long addr)
26261
26262 const char *arch_vma_name(struct vm_area_struct *vma)
26263 {
26264 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26265 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26266 return "[vdso]";
26267 if (vma == &gate_vma)
26268 return "[vsyscall]";
26269 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26270 index 7b179b4..6bd1777 100644
26271 --- a/arch/x86/mm/iomap_32.c
26272 +++ b/arch/x86/mm/iomap_32.c
26273 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
26274 type = kmap_atomic_idx_push();
26275 idx = type + KM_TYPE_NR * smp_processor_id();
26276 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26277 +
26278 + pax_open_kernel();
26279 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26280 + pax_close_kernel();
26281 +
26282 arch_flush_lazy_mmu_mode();
26283
26284 return (void *)vaddr;
26285 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26286 index 78fe3f1..2f9433c 100644
26287 --- a/arch/x86/mm/ioremap.c
26288 +++ b/arch/x86/mm/ioremap.c
26289 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26290 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
26291 int is_ram = page_is_ram(pfn);
26292
26293 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26294 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26295 return NULL;
26296 WARN_ON_ONCE(is_ram);
26297 }
26298 @@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
26299 *
26300 * Caller must ensure there is only one unmapping for the same pointer.
26301 */
26302 -void iounmap(volatile void __iomem *addr)
26303 +void iounmap(const volatile void __iomem *addr)
26304 {
26305 struct vm_struct *p, *o;
26306
26307 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26308
26309 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26310 if (page_is_ram(start >> PAGE_SHIFT))
26311 +#ifdef CONFIG_HIGHMEM
26312 + if ((start >> PAGE_SHIFT) < max_low_pfn)
26313 +#endif
26314 return __va(phys);
26315
26316 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
26317 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
26318 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26319
26320 static __initdata int after_paging_init;
26321 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26322 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26323
26324 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26325 {
26326 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
26327 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26328
26329 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26330 - memset(bm_pte, 0, sizeof(bm_pte));
26331 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
26332 + pmd_populate_user(&init_mm, pmd, bm_pte);
26333
26334 /*
26335 * The boot-ioremap range spans multiple pmds, for which
26336 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26337 index d87dd6d..bf3fa66 100644
26338 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
26339 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26340 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26341 * memory (e.g. tracked pages)? For now, we need this to avoid
26342 * invoking kmemcheck for PnP BIOS calls.
26343 */
26344 - if (regs->flags & X86_VM_MASK)
26345 + if (v8086_mode(regs))
26346 return false;
26347 - if (regs->cs != __KERNEL_CS)
26348 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26349 return false;
26350
26351 pte = kmemcheck_pte_lookup(address);
26352 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26353 index 845df68..1d8d29f 100644
26354 --- a/arch/x86/mm/mmap.c
26355 +++ b/arch/x86/mm/mmap.c
26356 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
26357 * Leave an at least ~128 MB hole with possible stack randomization.
26358 */
26359 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26360 -#define MAX_GAP (TASK_SIZE/6*5)
26361 +#define MAX_GAP (pax_task_size/6*5)
26362
26363 static int mmap_is_legacy(void)
26364 {
26365 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
26366 return rnd << PAGE_SHIFT;
26367 }
26368
26369 -static unsigned long mmap_base(void)
26370 +static unsigned long mmap_base(struct mm_struct *mm)
26371 {
26372 unsigned long gap = rlimit(RLIMIT_STACK);
26373 + unsigned long pax_task_size = TASK_SIZE;
26374 +
26375 +#ifdef CONFIG_PAX_SEGMEXEC
26376 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26377 + pax_task_size = SEGMEXEC_TASK_SIZE;
26378 +#endif
26379
26380 if (gap < MIN_GAP)
26381 gap = MIN_GAP;
26382 else if (gap > MAX_GAP)
26383 gap = MAX_GAP;
26384
26385 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26386 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26387 }
26388
26389 /*
26390 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26391 * does, but not when emulating X86_32
26392 */
26393 -static unsigned long mmap_legacy_base(void)
26394 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
26395 {
26396 - if (mmap_is_ia32())
26397 + if (mmap_is_ia32()) {
26398 +
26399 +#ifdef CONFIG_PAX_SEGMEXEC
26400 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26401 + return SEGMEXEC_TASK_UNMAPPED_BASE;
26402 + else
26403 +#endif
26404 +
26405 return TASK_UNMAPPED_BASE;
26406 - else
26407 + } else
26408 return TASK_UNMAPPED_BASE + mmap_rnd();
26409 }
26410
26411 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
26412 void arch_pick_mmap_layout(struct mm_struct *mm)
26413 {
26414 if (mmap_is_legacy()) {
26415 - mm->mmap_base = mmap_legacy_base();
26416 + mm->mmap_base = mmap_legacy_base(mm);
26417 +
26418 +#ifdef CONFIG_PAX_RANDMMAP
26419 + if (mm->pax_flags & MF_PAX_RANDMMAP)
26420 + mm->mmap_base += mm->delta_mmap;
26421 +#endif
26422 +
26423 mm->get_unmapped_area = arch_get_unmapped_area;
26424 mm->unmap_area = arch_unmap_area;
26425 } else {
26426 - mm->mmap_base = mmap_base();
26427 + mm->mmap_base = mmap_base(mm);
26428 +
26429 +#ifdef CONFIG_PAX_RANDMMAP
26430 + if (mm->pax_flags & MF_PAX_RANDMMAP)
26431 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26432 +#endif
26433 +
26434 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26435 mm->unmap_area = arch_unmap_area_topdown;
26436 }
26437 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26438 index dc0b727..f612039 100644
26439 --- a/arch/x86/mm/mmio-mod.c
26440 +++ b/arch/x86/mm/mmio-mod.c
26441 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26442 break;
26443 default:
26444 {
26445 - unsigned char *ip = (unsigned char *)instptr;
26446 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26447 my_trace->opcode = MMIO_UNKNOWN_OP;
26448 my_trace->width = 0;
26449 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26450 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26451 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26452 void __iomem *addr)
26453 {
26454 - static atomic_t next_id;
26455 + static atomic_unchecked_t next_id;
26456 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26457 /* These are page-unaligned. */
26458 struct mmiotrace_map map = {
26459 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26460 .private = trace
26461 },
26462 .phys = offset,
26463 - .id = atomic_inc_return(&next_id)
26464 + .id = atomic_inc_return_unchecked(&next_id)
26465 };
26466 map.map_id = trace->id;
26467
26468 @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
26469 ioremap_trace_core(offset, size, addr);
26470 }
26471
26472 -static void iounmap_trace_core(volatile void __iomem *addr)
26473 +static void iounmap_trace_core(const volatile void __iomem *addr)
26474 {
26475 struct mmiotrace_map map = {
26476 .phys = 0,
26477 @@ -328,7 +328,7 @@ not_enabled:
26478 }
26479 }
26480
26481 -void mmiotrace_iounmap(volatile void __iomem *addr)
26482 +void mmiotrace_iounmap(const volatile void __iomem *addr)
26483 {
26484 might_sleep();
26485 if (is_enabled()) /* recheck and proper locking in *_core() */
26486 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
26487 index b008656..773eac2 100644
26488 --- a/arch/x86/mm/pageattr-test.c
26489 +++ b/arch/x86/mm/pageattr-test.c
26490 @@ -36,7 +36,7 @@ enum {
26491
26492 static int pte_testbit(pte_t pte)
26493 {
26494 - return pte_flags(pte) & _PAGE_UNUSED1;
26495 + return pte_flags(pte) & _PAGE_CPA_TEST;
26496 }
26497
26498 struct split_state {
26499 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
26500 index a718e0d..77419bc 100644
26501 --- a/arch/x86/mm/pageattr.c
26502 +++ b/arch/x86/mm/pageattr.c
26503 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26504 */
26505 #ifdef CONFIG_PCI_BIOS
26506 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
26507 - pgprot_val(forbidden) |= _PAGE_NX;
26508 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26509 #endif
26510
26511 /*
26512 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26513 * Does not cover __inittext since that is gone later on. On
26514 * 64bit we do not enforce !NX on the low mapping
26515 */
26516 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
26517 - pgprot_val(forbidden) |= _PAGE_NX;
26518 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
26519 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26520
26521 +#ifdef CONFIG_DEBUG_RODATA
26522 /*
26523 * The .rodata section needs to be read-only. Using the pfn
26524 * catches all aliases.
26525 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26526 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
26527 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
26528 pgprot_val(forbidden) |= _PAGE_RW;
26529 +#endif
26530
26531 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
26532 /*
26533 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26534 }
26535 #endif
26536
26537 +#ifdef CONFIG_PAX_KERNEXEC
26538 + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
26539 + pgprot_val(forbidden) |= _PAGE_RW;
26540 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26541 + }
26542 +#endif
26543 +
26544 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
26545
26546 return prot;
26547 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
26548 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
26549 {
26550 /* change init_mm */
26551 + pax_open_kernel();
26552 set_pte_atomic(kpte, pte);
26553 +
26554 #ifdef CONFIG_X86_32
26555 if (!SHARED_KERNEL_PMD) {
26556 +
26557 +#ifdef CONFIG_PAX_PER_CPU_PGD
26558 + unsigned long cpu;
26559 +#else
26560 struct page *page;
26561 +#endif
26562
26563 +#ifdef CONFIG_PAX_PER_CPU_PGD
26564 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26565 + pgd_t *pgd = get_cpu_pgd(cpu);
26566 +#else
26567 list_for_each_entry(page, &pgd_list, lru) {
26568 - pgd_t *pgd;
26569 + pgd_t *pgd = (pgd_t *)page_address(page);
26570 +#endif
26571 +
26572 pud_t *pud;
26573 pmd_t *pmd;
26574
26575 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
26576 + pgd += pgd_index(address);
26577 pud = pud_offset(pgd, address);
26578 pmd = pmd_offset(pud, address);
26579 set_pte_atomic((pte_t *)pmd, pte);
26580 }
26581 }
26582 #endif
26583 + pax_close_kernel();
26584 }
26585
26586 static int
26587 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
26588 index 0eb572e..92f5c1e 100644
26589 --- a/arch/x86/mm/pat.c
26590 +++ b/arch/x86/mm/pat.c
26591 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
26592
26593 if (!entry) {
26594 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
26595 - current->comm, current->pid, start, end - 1);
26596 + current->comm, task_pid_nr(current), start, end - 1);
26597 return -EINVAL;
26598 }
26599
26600 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26601
26602 while (cursor < to) {
26603 if (!devmem_is_allowed(pfn)) {
26604 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
26605 - current->comm, from, to - 1);
26606 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
26607 + current->comm, from, to - 1, cursor);
26608 return 0;
26609 }
26610 cursor += PAGE_SIZE;
26611 @@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
26612 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
26613 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
26614 "for [mem %#010Lx-%#010Lx]\n",
26615 - current->comm, current->pid,
26616 + current->comm, task_pid_nr(current),
26617 cattr_name(flags),
26618 base, (unsigned long long)(base + size-1));
26619 return -EINVAL;
26620 @@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
26621 flags = lookup_memtype(paddr);
26622 if (want_flags != flags) {
26623 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
26624 - current->comm, current->pid,
26625 + current->comm, task_pid_nr(current),
26626 cattr_name(want_flags),
26627 (unsigned long long)paddr,
26628 (unsigned long long)(paddr + size - 1),
26629 @@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
26630 free_memtype(paddr, paddr + size);
26631 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
26632 " for [mem %#010Lx-%#010Lx], got %s\n",
26633 - current->comm, current->pid,
26634 + current->comm, task_pid_nr(current),
26635 cattr_name(want_flags),
26636 (unsigned long long)paddr,
26637 (unsigned long long)(paddr + size - 1),
26638 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
26639 index 9f0614d..92ae64a 100644
26640 --- a/arch/x86/mm/pf_in.c
26641 +++ b/arch/x86/mm/pf_in.c
26642 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
26643 int i;
26644 enum reason_type rv = OTHERS;
26645
26646 - p = (unsigned char *)ins_addr;
26647 + p = (unsigned char *)ktla_ktva(ins_addr);
26648 p += skip_prefix(p, &prf);
26649 p += get_opcode(p, &opcode);
26650
26651 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
26652 struct prefix_bits prf;
26653 int i;
26654
26655 - p = (unsigned char *)ins_addr;
26656 + p = (unsigned char *)ktla_ktva(ins_addr);
26657 p += skip_prefix(p, &prf);
26658 p += get_opcode(p, &opcode);
26659
26660 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
26661 struct prefix_bits prf;
26662 int i;
26663
26664 - p = (unsigned char *)ins_addr;
26665 + p = (unsigned char *)ktla_ktva(ins_addr);
26666 p += skip_prefix(p, &prf);
26667 p += get_opcode(p, &opcode);
26668
26669 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
26670 struct prefix_bits prf;
26671 int i;
26672
26673 - p = (unsigned char *)ins_addr;
26674 + p = (unsigned char *)ktla_ktva(ins_addr);
26675 p += skip_prefix(p, &prf);
26676 p += get_opcode(p, &opcode);
26677 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
26678 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
26679 struct prefix_bits prf;
26680 int i;
26681
26682 - p = (unsigned char *)ins_addr;
26683 + p = (unsigned char *)ktla_ktva(ins_addr);
26684 p += skip_prefix(p, &prf);
26685 p += get_opcode(p, &opcode);
26686 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
26687 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
26688 index 8573b83..4f3ed7e 100644
26689 --- a/arch/x86/mm/pgtable.c
26690 +++ b/arch/x86/mm/pgtable.c
26691 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
26692 list_del(&page->lru);
26693 }
26694
26695 -#define UNSHARED_PTRS_PER_PGD \
26696 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26697 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26698 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
26699
26700 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
26701 +{
26702 + unsigned int count = USER_PGD_PTRS;
26703
26704 + while (count--)
26705 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26706 +}
26707 +#endif
26708 +
26709 +#ifdef CONFIG_PAX_PER_CPU_PGD
26710 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
26711 +{
26712 + unsigned int count = USER_PGD_PTRS;
26713 +
26714 + while (count--) {
26715 + pgd_t pgd;
26716 +
26717 +#ifdef CONFIG_X86_64
26718 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
26719 +#else
26720 + pgd = *src++;
26721 +#endif
26722 +
26723 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26724 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
26725 +#endif
26726 +
26727 + *dst++ = pgd;
26728 + }
26729 +
26730 +}
26731 +#endif
26732 +
26733 +#ifdef CONFIG_X86_64
26734 +#define pxd_t pud_t
26735 +#define pyd_t pgd_t
26736 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26737 +#define pxd_free(mm, pud) pud_free((mm), (pud))
26738 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26739 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
26740 +#define PYD_SIZE PGDIR_SIZE
26741 +#else
26742 +#define pxd_t pmd_t
26743 +#define pyd_t pud_t
26744 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26745 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
26746 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26747 +#define pyd_offset(mm, address) pud_offset((mm), (address))
26748 +#define PYD_SIZE PUD_SIZE
26749 +#endif
26750 +
26751 +#ifdef CONFIG_PAX_PER_CPU_PGD
26752 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
26753 +static inline void pgd_dtor(pgd_t *pgd) {}
26754 +#else
26755 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
26756 {
26757 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
26758 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
26759 pgd_list_del(pgd);
26760 spin_unlock(&pgd_lock);
26761 }
26762 +#endif
26763
26764 /*
26765 * List of all pgd's needed for non-PAE so it can invalidate entries
26766 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
26767 * -- wli
26768 */
26769
26770 -#ifdef CONFIG_X86_PAE
26771 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26772 /*
26773 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26774 * updating the top-level pagetable entries to guarantee the
26775 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
26776 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26777 * and initialize the kernel pmds here.
26778 */
26779 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26780 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26781
26782 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26783 {
26784 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26785 */
26786 flush_tlb_mm(mm);
26787 }
26788 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26789 +#define PREALLOCATED_PXDS USER_PGD_PTRS
26790 #else /* !CONFIG_X86_PAE */
26791
26792 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26793 -#define PREALLOCATED_PMDS 0
26794 +#define PREALLOCATED_PXDS 0
26795
26796 #endif /* CONFIG_X86_PAE */
26797
26798 -static void free_pmds(pmd_t *pmds[])
26799 +static void free_pxds(pxd_t *pxds[])
26800 {
26801 int i;
26802
26803 - for(i = 0; i < PREALLOCATED_PMDS; i++)
26804 - if (pmds[i])
26805 - free_page((unsigned long)pmds[i]);
26806 + for(i = 0; i < PREALLOCATED_PXDS; i++)
26807 + if (pxds[i])
26808 + free_page((unsigned long)pxds[i]);
26809 }
26810
26811 -static int preallocate_pmds(pmd_t *pmds[])
26812 +static int preallocate_pxds(pxd_t *pxds[])
26813 {
26814 int i;
26815 bool failed = false;
26816
26817 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26818 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26819 - if (pmd == NULL)
26820 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26821 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26822 + if (pxd == NULL)
26823 failed = true;
26824 - pmds[i] = pmd;
26825 + pxds[i] = pxd;
26826 }
26827
26828 if (failed) {
26829 - free_pmds(pmds);
26830 + free_pxds(pxds);
26831 return -ENOMEM;
26832 }
26833
26834 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
26835 * preallocate which never got a corresponding vma will need to be
26836 * freed manually.
26837 */
26838 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26839 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26840 {
26841 int i;
26842
26843 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26844 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26845 pgd_t pgd = pgdp[i];
26846
26847 if (pgd_val(pgd) != 0) {
26848 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26849 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26850
26851 - pgdp[i] = native_make_pgd(0);
26852 + set_pgd(pgdp + i, native_make_pgd(0));
26853
26854 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26855 - pmd_free(mm, pmd);
26856 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26857 + pxd_free(mm, pxd);
26858 }
26859 }
26860 }
26861
26862 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26863 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26864 {
26865 - pud_t *pud;
26866 + pyd_t *pyd;
26867 unsigned long addr;
26868 int i;
26869
26870 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26871 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26872 return;
26873
26874 - pud = pud_offset(pgd, 0);
26875 +#ifdef CONFIG_X86_64
26876 + pyd = pyd_offset(mm, 0L);
26877 +#else
26878 + pyd = pyd_offset(pgd, 0L);
26879 +#endif
26880
26881 - for (addr = i = 0; i < PREALLOCATED_PMDS;
26882 - i++, pud++, addr += PUD_SIZE) {
26883 - pmd_t *pmd = pmds[i];
26884 + for (addr = i = 0; i < PREALLOCATED_PXDS;
26885 + i++, pyd++, addr += PYD_SIZE) {
26886 + pxd_t *pxd = pxds[i];
26887
26888 if (i >= KERNEL_PGD_BOUNDARY)
26889 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26890 - sizeof(pmd_t) * PTRS_PER_PMD);
26891 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26892 + sizeof(pxd_t) * PTRS_PER_PMD);
26893
26894 - pud_populate(mm, pud, pmd);
26895 + pyd_populate(mm, pyd, pxd);
26896 }
26897 }
26898
26899 pgd_t *pgd_alloc(struct mm_struct *mm)
26900 {
26901 pgd_t *pgd;
26902 - pmd_t *pmds[PREALLOCATED_PMDS];
26903 + pxd_t *pxds[PREALLOCATED_PXDS];
26904
26905 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26906
26907 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26908
26909 mm->pgd = pgd;
26910
26911 - if (preallocate_pmds(pmds) != 0)
26912 + if (preallocate_pxds(pxds) != 0)
26913 goto out_free_pgd;
26914
26915 if (paravirt_pgd_alloc(mm) != 0)
26916 - goto out_free_pmds;
26917 + goto out_free_pxds;
26918
26919 /*
26920 * Make sure that pre-populating the pmds is atomic with
26921 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26922 spin_lock(&pgd_lock);
26923
26924 pgd_ctor(mm, pgd);
26925 - pgd_prepopulate_pmd(mm, pgd, pmds);
26926 + pgd_prepopulate_pxd(mm, pgd, pxds);
26927
26928 spin_unlock(&pgd_lock);
26929
26930 return pgd;
26931
26932 -out_free_pmds:
26933 - free_pmds(pmds);
26934 +out_free_pxds:
26935 + free_pxds(pxds);
26936 out_free_pgd:
26937 free_page((unsigned long)pgd);
26938 out:
26939 @@ -295,7 +356,7 @@ out:
26940
26941 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26942 {
26943 - pgd_mop_up_pmds(mm, pgd);
26944 + pgd_mop_up_pxds(mm, pgd);
26945 pgd_dtor(pgd);
26946 paravirt_pgd_free(mm, pgd);
26947 free_page((unsigned long)pgd);
26948 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26949 index a69bcb8..19068ab 100644
26950 --- a/arch/x86/mm/pgtable_32.c
26951 +++ b/arch/x86/mm/pgtable_32.c
26952 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26953 return;
26954 }
26955 pte = pte_offset_kernel(pmd, vaddr);
26956 +
26957 + pax_open_kernel();
26958 if (pte_val(pteval))
26959 set_pte_at(&init_mm, vaddr, pte, pteval);
26960 else
26961 pte_clear(&init_mm, vaddr, pte);
26962 + pax_close_kernel();
26963
26964 /*
26965 * It's enough to flush this one mapping.
26966 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26967 index 410531d..0f16030 100644
26968 --- a/arch/x86/mm/setup_nx.c
26969 +++ b/arch/x86/mm/setup_nx.c
26970 @@ -5,8 +5,10 @@
26971 #include <asm/pgtable.h>
26972 #include <asm/proto.h>
26973
26974 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26975 static int disable_nx __cpuinitdata;
26976
26977 +#ifndef CONFIG_PAX_PAGEEXEC
26978 /*
26979 * noexec = on|off
26980 *
26981 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
26982 return 0;
26983 }
26984 early_param("noexec", noexec_setup);
26985 +#endif
26986 +
26987 +#endif
26988
26989 void __cpuinit x86_configure_nx(void)
26990 {
26991 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26992 if (cpu_has_nx && !disable_nx)
26993 __supported_pte_mask |= _PAGE_NX;
26994 else
26995 +#endif
26996 __supported_pte_mask &= ~_PAGE_NX;
26997 }
26998
26999 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27000 index 60f926c..a710970 100644
27001 --- a/arch/x86/mm/tlb.c
27002 +++ b/arch/x86/mm/tlb.c
27003 @@ -48,7 +48,11 @@ void leave_mm(int cpu)
27004 BUG();
27005 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
27006 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
27007 +
27008 +#ifndef CONFIG_PAX_PER_CPU_PGD
27009 load_cr3(swapper_pg_dir);
27010 +#endif
27011 +
27012 }
27013 }
27014 EXPORT_SYMBOL_GPL(leave_mm);
27015 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
27016 index 877b9a1..a8ecf42 100644
27017 --- a/arch/x86/net/bpf_jit.S
27018 +++ b/arch/x86/net/bpf_jit.S
27019 @@ -9,6 +9,7 @@
27020 */
27021 #include <linux/linkage.h>
27022 #include <asm/dwarf2.h>
27023 +#include <asm/alternative-asm.h>
27024
27025 /*
27026 * Calling convention :
27027 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
27028 jle bpf_slow_path_word
27029 mov (SKBDATA,%rsi),%eax
27030 bswap %eax /* ntohl() */
27031 + pax_force_retaddr
27032 ret
27033
27034 sk_load_half:
27035 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
27036 jle bpf_slow_path_half
27037 movzwl (SKBDATA,%rsi),%eax
27038 rol $8,%ax # ntohs()
27039 + pax_force_retaddr
27040 ret
27041
27042 sk_load_byte:
27043 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
27044 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
27045 jle bpf_slow_path_byte
27046 movzbl (SKBDATA,%rsi),%eax
27047 + pax_force_retaddr
27048 ret
27049
27050 /**
27051 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
27052 movzbl (SKBDATA,%rsi),%ebx
27053 and $15,%bl
27054 shl $2,%bl
27055 + pax_force_retaddr
27056 ret
27057
27058 /* rsi contains offset and can be scratched */
27059 @@ -109,6 +114,7 @@ bpf_slow_path_word:
27060 js bpf_error
27061 mov -12(%rbp),%eax
27062 bswap %eax
27063 + pax_force_retaddr
27064 ret
27065
27066 bpf_slow_path_half:
27067 @@ -117,12 +123,14 @@ bpf_slow_path_half:
27068 mov -12(%rbp),%ax
27069 rol $8,%ax
27070 movzwl %ax,%eax
27071 + pax_force_retaddr
27072 ret
27073
27074 bpf_slow_path_byte:
27075 bpf_slow_path_common(1)
27076 js bpf_error
27077 movzbl -12(%rbp),%eax
27078 + pax_force_retaddr
27079 ret
27080
27081 bpf_slow_path_byte_msh:
27082 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
27083 and $15,%al
27084 shl $2,%al
27085 xchg %eax,%ebx
27086 + pax_force_retaddr
27087 ret
27088
27089 #define sk_negative_common(SIZE) \
27090 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
27091 sk_negative_common(4)
27092 mov (%rax), %eax
27093 bswap %eax
27094 + pax_force_retaddr
27095 ret
27096
27097 bpf_slow_path_half_neg:
27098 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
27099 mov (%rax),%ax
27100 rol $8,%ax
27101 movzwl %ax,%eax
27102 + pax_force_retaddr
27103 ret
27104
27105 bpf_slow_path_byte_neg:
27106 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
27107 .globl sk_load_byte_negative_offset
27108 sk_negative_common(1)
27109 movzbl (%rax), %eax
27110 + pax_force_retaddr
27111 ret
27112
27113 bpf_slow_path_byte_msh_neg:
27114 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
27115 and $15,%al
27116 shl $2,%al
27117 xchg %eax,%ebx
27118 + pax_force_retaddr
27119 ret
27120
27121 bpf_error:
27122 @@ -197,4 +210,5 @@ bpf_error:
27123 xor %eax,%eax
27124 mov -8(%rbp),%rbx
27125 leaveq
27126 + pax_force_retaddr
27127 ret
27128 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
27129 index 520d2bd..b895ef4 100644
27130 --- a/arch/x86/net/bpf_jit_comp.c
27131 +++ b/arch/x86/net/bpf_jit_comp.c
27132 @@ -11,6 +11,7 @@
27133 #include <asm/cacheflush.h>
27134 #include <linux/netdevice.h>
27135 #include <linux/filter.h>
27136 +#include <linux/random.h>
27137
27138 /*
27139 * Conventions :
27140 @@ -48,13 +49,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
27141 return ptr + len;
27142 }
27143
27144 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27145 +#define MAX_INSTR_CODE_SIZE 96
27146 +#else
27147 +#define MAX_INSTR_CODE_SIZE 64
27148 +#endif
27149 +
27150 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
27151
27152 #define EMIT1(b1) EMIT(b1, 1)
27153 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
27154 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
27155 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
27156 +
27157 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27158 +/* original constant will appear in ecx */
27159 +#define DILUTE_CONST_SEQUENCE(_off, _key) \
27160 +do { \
27161 + /* mov ecx, randkey */ \
27162 + EMIT1(0xb9); \
27163 + EMIT(_key, 4); \
27164 + /* xor ecx, randkey ^ off */ \
27165 + EMIT2(0x81, 0xf1); \
27166 + EMIT((_key) ^ (_off), 4); \
27167 +} while (0)
27168 +
27169 +#define EMIT1_off32(b1, _off) \
27170 +do { \
27171 + switch (b1) { \
27172 + case 0x05: /* add eax, imm32 */ \
27173 + case 0x2d: /* sub eax, imm32 */ \
27174 + case 0x25: /* and eax, imm32 */ \
27175 + case 0x0d: /* or eax, imm32 */ \
27176 + case 0xb8: /* mov eax, imm32 */ \
27177 + case 0x3d: /* cmp eax, imm32 */ \
27178 + case 0xa9: /* test eax, imm32 */ \
27179 + DILUTE_CONST_SEQUENCE(_off, randkey); \
27180 + EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
27181 + break; \
27182 + case 0xbb: /* mov ebx, imm32 */ \
27183 + DILUTE_CONST_SEQUENCE(_off, randkey); \
27184 + /* mov ebx, ecx */ \
27185 + EMIT2(0x89, 0xcb); \
27186 + break; \
27187 + case 0xbe: /* mov esi, imm32 */ \
27188 + DILUTE_CONST_SEQUENCE(_off, randkey); \
27189 + /* mov esi, ecx */ \
27190 + EMIT2(0x89, 0xce); \
27191 + break; \
27192 + case 0xe9: /* jmp rel imm32 */ \
27193 + EMIT1(b1); \
27194 + EMIT(_off, 4); \
27195 + /* prevent fall-through, we're not called if off = 0 */ \
27196 + EMIT(0xcccccccc, 4); \
27197 + EMIT(0xcccccccc, 4); \
27198 + break; \
27199 + default: \
27200 + EMIT1(b1); \
27201 + EMIT(_off, 4); \
27202 + } \
27203 +} while (0)
27204 +
27205 +#define EMIT2_off32(b1, b2, _off) \
27206 +do { \
27207 + if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
27208 + EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
27209 + EMIT(randkey, 4); \
27210 + EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
27211 + EMIT((_off) - randkey, 4); \
27212 + } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
27213 + DILUTE_CONST_SEQUENCE(_off, randkey); \
27214 + /* imul eax, ecx */ \
27215 + EMIT3(0x0f, 0xaf, 0xc1); \
27216 + } else { \
27217 + EMIT2(b1, b2); \
27218 + EMIT(_off, 4); \
27219 + } \
27220 +} while (0)
27221 +#else
27222 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
27223 +#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
27224 +#endif
27225
27226 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
27227 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
27228 @@ -89,6 +164,24 @@ do { \
27229 #define X86_JBE 0x76
27230 #define X86_JA 0x77
27231
27232 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27233 +#define APPEND_FLOW_VERIFY() \
27234 +do { \
27235 + /* mov ecx, randkey */ \
27236 + EMIT1(0xb9); \
27237 + EMIT(randkey, 4); \
27238 + /* cmp ecx, randkey */ \
27239 + EMIT2(0x81, 0xf9); \
27240 + EMIT(randkey, 4); \
27241 + /* jz after 8 int 3s */ \
27242 + EMIT2(0x74, 0x08); \
27243 + EMIT(0xcccccccc, 4); \
27244 + EMIT(0xcccccccc, 4); \
27245 +} while (0)
27246 +#else
27247 +#define APPEND_FLOW_VERIFY() do { } while (0)
27248 +#endif
27249 +
27250 #define EMIT_COND_JMP(op, offset) \
27251 do { \
27252 if (is_near(offset)) \
27253 @@ -96,6 +189,7 @@ do { \
27254 else { \
27255 EMIT2(0x0f, op + 0x10); \
27256 EMIT(offset, 4); /* jxx .+off32 */ \
27257 + APPEND_FLOW_VERIFY(); \
27258 } \
27259 } while (0)
27260
27261 @@ -120,12 +214,17 @@ static inline void bpf_flush_icache(void *start, void *end)
27262 set_fs(old_fs);
27263 }
27264
27265 +struct bpf_jit_work {
27266 + struct work_struct work;
27267 + void *image;
27268 +};
27269 +
27270 #define CHOOSE_LOAD_FUNC(K, func) \
27271 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
27272
27273 void bpf_jit_compile(struct sk_filter *fp)
27274 {
27275 - u8 temp[64];
27276 + u8 temp[MAX_INSTR_CODE_SIZE];
27277 u8 *prog;
27278 unsigned int proglen, oldproglen = 0;
27279 int ilen, i;
27280 @@ -138,6 +237,9 @@ void bpf_jit_compile(struct sk_filter *fp)
27281 unsigned int *addrs;
27282 const struct sock_filter *filter = fp->insns;
27283 int flen = fp->len;
27284 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27285 + unsigned int randkey;
27286 +#endif
27287
27288 if (!bpf_jit_enable)
27289 return;
27290 @@ -146,11 +248,19 @@ void bpf_jit_compile(struct sk_filter *fp)
27291 if (addrs == NULL)
27292 return;
27293
27294 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
27295 + if (!fp->work)
27296 + goto out;
27297 +
27298 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27299 + randkey = get_random_int();
27300 +#endif
27301 +
27302 /* Before first pass, make a rough estimation of addrs[]
27303 - * each bpf instruction is translated to less than 64 bytes
27304 + * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
27305 */
27306 for (proglen = 0, i = 0; i < flen; i++) {
27307 - proglen += 64;
27308 + proglen += MAX_INSTR_CODE_SIZE;
27309 addrs[i] = proglen;
27310 }
27311 cleanup_addr = proglen; /* epilogue address */
27312 @@ -258,10 +368,8 @@ void bpf_jit_compile(struct sk_filter *fp)
27313 case BPF_S_ALU_MUL_K: /* A *= K */
27314 if (is_imm8(K))
27315 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
27316 - else {
27317 - EMIT2(0x69, 0xc0); /* imul imm32,%eax */
27318 - EMIT(K, 4);
27319 - }
27320 + else
27321 + EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
27322 break;
27323 case BPF_S_ALU_DIV_X: /* A /= X; */
27324 seen |= SEEN_XREG;
27325 @@ -301,13 +409,23 @@ void bpf_jit_compile(struct sk_filter *fp)
27326 break;
27327 case BPF_S_ALU_MOD_K: /* A %= K; */
27328 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
27329 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27330 + DILUTE_CONST_SEQUENCE(K, randkey);
27331 +#else
27332 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
27333 +#endif
27334 EMIT2(0xf7, 0xf1); /* div %ecx */
27335 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
27336 break;
27337 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
27338 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27339 + DILUTE_CONST_SEQUENCE(K, randkey);
27340 + // imul rax, rcx
27341 + EMIT4(0x48, 0x0f, 0xaf, 0xc1);
27342 +#else
27343 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
27344 EMIT(K, 4);
27345 +#endif
27346 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
27347 break;
27348 case BPF_S_ALU_AND_X:
27349 @@ -543,8 +661,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
27350 if (is_imm8(K)) {
27351 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
27352 } else {
27353 - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
27354 - EMIT(K, 4);
27355 + EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
27356 }
27357 } else {
27358 EMIT2(0x89,0xde); /* mov %ebx,%esi */
27359 @@ -627,17 +744,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27360 break;
27361 default:
27362 /* hmm, too complex filter, give up with jit compiler */
27363 - goto out;
27364 + goto error;
27365 }
27366 ilen = prog - temp;
27367 if (image) {
27368 if (unlikely(proglen + ilen > oldproglen)) {
27369 pr_err("bpb_jit_compile fatal error\n");
27370 - kfree(addrs);
27371 - module_free(NULL, image);
27372 - return;
27373 + module_free_exec(NULL, image);
27374 + goto error;
27375 }
27376 + pax_open_kernel();
27377 memcpy(image + proglen, temp, ilen);
27378 + pax_close_kernel();
27379 }
27380 proglen += ilen;
27381 addrs[i] = proglen;
27382 @@ -658,11 +776,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27383 break;
27384 }
27385 if (proglen == oldproglen) {
27386 - image = module_alloc(max_t(unsigned int,
27387 - proglen,
27388 - sizeof(struct work_struct)));
27389 + image = module_alloc_exec(proglen);
27390 if (!image)
27391 - goto out;
27392 + goto error;
27393 }
27394 oldproglen = proglen;
27395 }
27396 @@ -678,7 +794,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27397 bpf_flush_icache(image, image + proglen);
27398
27399 fp->bpf_func = (void *)image;
27400 - }
27401 + } else
27402 +error:
27403 + kfree(fp->work);
27404 +
27405 out:
27406 kfree(addrs);
27407 return;
27408 @@ -686,18 +805,20 @@ out:
27409
27410 static void jit_free_defer(struct work_struct *arg)
27411 {
27412 - module_free(NULL, arg);
27413 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
27414 + kfree(arg);
27415 }
27416
27417 /* run from softirq, we must use a work_struct to call
27418 - * module_free() from process context
27419 + * module_free_exec() from process context
27420 */
27421 void bpf_jit_free(struct sk_filter *fp)
27422 {
27423 if (fp->bpf_func != sk_run_filter) {
27424 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
27425 + struct work_struct *work = &fp->work->work;
27426
27427 INIT_WORK(work, jit_free_defer);
27428 + fp->work->image = fp->bpf_func;
27429 schedule_work(work);
27430 }
27431 }
27432 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27433 index d6aa6e8..266395a 100644
27434 --- a/arch/x86/oprofile/backtrace.c
27435 +++ b/arch/x86/oprofile/backtrace.c
27436 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
27437 struct stack_frame_ia32 *fp;
27438 unsigned long bytes;
27439
27440 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27441 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27442 if (bytes != sizeof(bufhead))
27443 return NULL;
27444
27445 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
27446 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
27447
27448 oprofile_add_trace(bufhead[0].return_address);
27449
27450 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
27451 struct stack_frame bufhead[2];
27452 unsigned long bytes;
27453
27454 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27455 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27456 if (bytes != sizeof(bufhead))
27457 return NULL;
27458
27459 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27460 {
27461 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
27462
27463 - if (!user_mode_vm(regs)) {
27464 + if (!user_mode(regs)) {
27465 unsigned long stack = kernel_stack_pointer(regs);
27466 if (depth)
27467 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27468 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
27469 index e14a2ff..3fd6b58 100644
27470 --- a/arch/x86/pci/mrst.c
27471 +++ b/arch/x86/pci/mrst.c
27472 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
27473 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
27474 pci_mmcfg_late_init();
27475 pcibios_enable_irq = mrst_pci_irq_enable;
27476 - pci_root_ops = pci_mrst_ops;
27477 + pax_open_kernel();
27478 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
27479 + pax_close_kernel();
27480 pci_soc_mode = 1;
27481 /* Continue with standard init */
27482 return 1;
27483 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27484 index da8fe05..7ee6704 100644
27485 --- a/arch/x86/pci/pcbios.c
27486 +++ b/arch/x86/pci/pcbios.c
27487 @@ -79,50 +79,93 @@ union bios32 {
27488 static struct {
27489 unsigned long address;
27490 unsigned short segment;
27491 -} bios32_indirect = { 0, __KERNEL_CS };
27492 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27493
27494 /*
27495 * Returns the entry point for the given service, NULL on error
27496 */
27497
27498 -static unsigned long bios32_service(unsigned long service)
27499 +static unsigned long __devinit bios32_service(unsigned long service)
27500 {
27501 unsigned char return_code; /* %al */
27502 unsigned long address; /* %ebx */
27503 unsigned long length; /* %ecx */
27504 unsigned long entry; /* %edx */
27505 unsigned long flags;
27506 + struct desc_struct d, *gdt;
27507
27508 local_irq_save(flags);
27509 - __asm__("lcall *(%%edi); cld"
27510 +
27511 + gdt = get_cpu_gdt_table(smp_processor_id());
27512 +
27513 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27514 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27515 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27516 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27517 +
27518 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27519 : "=a" (return_code),
27520 "=b" (address),
27521 "=c" (length),
27522 "=d" (entry)
27523 : "0" (service),
27524 "1" (0),
27525 - "D" (&bios32_indirect));
27526 + "D" (&bios32_indirect),
27527 + "r"(__PCIBIOS_DS)
27528 + : "memory");
27529 +
27530 + pax_open_kernel();
27531 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27532 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27533 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27534 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27535 + pax_close_kernel();
27536 +
27537 local_irq_restore(flags);
27538
27539 switch (return_code) {
27540 - case 0:
27541 - return address + entry;
27542 - case 0x80: /* Not present */
27543 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27544 - return 0;
27545 - default: /* Shouldn't happen */
27546 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27547 - service, return_code);
27548 + case 0: {
27549 + int cpu;
27550 + unsigned char flags;
27551 +
27552 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27553 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27554 + printk(KERN_WARNING "bios32_service: not valid\n");
27555 return 0;
27556 + }
27557 + address = address + PAGE_OFFSET;
27558 + length += 16UL; /* some BIOSs underreport this... */
27559 + flags = 4;
27560 + if (length >= 64*1024*1024) {
27561 + length >>= PAGE_SHIFT;
27562 + flags |= 8;
27563 + }
27564 +
27565 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27566 + gdt = get_cpu_gdt_table(cpu);
27567 + pack_descriptor(&d, address, length, 0x9b, flags);
27568 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27569 + pack_descriptor(&d, address, length, 0x93, flags);
27570 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27571 + }
27572 + return entry;
27573 + }
27574 + case 0x80: /* Not present */
27575 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27576 + return 0;
27577 + default: /* Shouldn't happen */
27578 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27579 + service, return_code);
27580 + return 0;
27581 }
27582 }
27583
27584 static struct {
27585 unsigned long address;
27586 unsigned short segment;
27587 -} pci_indirect = { 0, __KERNEL_CS };
27588 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27589
27590 -static int pci_bios_present;
27591 +static int pci_bios_present __read_only;
27592
27593 static int __devinit check_pcibios(void)
27594 {
27595 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
27596 unsigned long flags, pcibios_entry;
27597
27598 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27599 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27600 + pci_indirect.address = pcibios_entry;
27601
27602 local_irq_save(flags);
27603 - __asm__(
27604 - "lcall *(%%edi); cld\n\t"
27605 + __asm__("movw %w6, %%ds\n\t"
27606 + "lcall *%%ss:(%%edi); cld\n\t"
27607 + "push %%ss\n\t"
27608 + "pop %%ds\n\t"
27609 "jc 1f\n\t"
27610 "xor %%ah, %%ah\n"
27611 "1:"
27612 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
27613 "=b" (ebx),
27614 "=c" (ecx)
27615 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27616 - "D" (&pci_indirect)
27617 + "D" (&pci_indirect),
27618 + "r" (__PCIBIOS_DS)
27619 : "memory");
27620 local_irq_restore(flags);
27621
27622 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27623
27624 switch (len) {
27625 case 1:
27626 - __asm__("lcall *(%%esi); cld\n\t"
27627 + __asm__("movw %w6, %%ds\n\t"
27628 + "lcall *%%ss:(%%esi); cld\n\t"
27629 + "push %%ss\n\t"
27630 + "pop %%ds\n\t"
27631 "jc 1f\n\t"
27632 "xor %%ah, %%ah\n"
27633 "1:"
27634 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27635 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27636 "b" (bx),
27637 "D" ((long)reg),
27638 - "S" (&pci_indirect));
27639 + "S" (&pci_indirect),
27640 + "r" (__PCIBIOS_DS));
27641 /*
27642 * Zero-extend the result beyond 8 bits, do not trust the
27643 * BIOS having done it:
27644 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27645 *value &= 0xff;
27646 break;
27647 case 2:
27648 - __asm__("lcall *(%%esi); cld\n\t"
27649 + __asm__("movw %w6, %%ds\n\t"
27650 + "lcall *%%ss:(%%esi); cld\n\t"
27651 + "push %%ss\n\t"
27652 + "pop %%ds\n\t"
27653 "jc 1f\n\t"
27654 "xor %%ah, %%ah\n"
27655 "1:"
27656 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27657 : "1" (PCIBIOS_READ_CONFIG_WORD),
27658 "b" (bx),
27659 "D" ((long)reg),
27660 - "S" (&pci_indirect));
27661 + "S" (&pci_indirect),
27662 + "r" (__PCIBIOS_DS));
27663 /*
27664 * Zero-extend the result beyond 16 bits, do not trust the
27665 * BIOS having done it:
27666 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27667 *value &= 0xffff;
27668 break;
27669 case 4:
27670 - __asm__("lcall *(%%esi); cld\n\t"
27671 + __asm__("movw %w6, %%ds\n\t"
27672 + "lcall *%%ss:(%%esi); cld\n\t"
27673 + "push %%ss\n\t"
27674 + "pop %%ds\n\t"
27675 "jc 1f\n\t"
27676 "xor %%ah, %%ah\n"
27677 "1:"
27678 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27679 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27680 "b" (bx),
27681 "D" ((long)reg),
27682 - "S" (&pci_indirect));
27683 + "S" (&pci_indirect),
27684 + "r" (__PCIBIOS_DS));
27685 break;
27686 }
27687
27688 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27689
27690 switch (len) {
27691 case 1:
27692 - __asm__("lcall *(%%esi); cld\n\t"
27693 + __asm__("movw %w6, %%ds\n\t"
27694 + "lcall *%%ss:(%%esi); cld\n\t"
27695 + "push %%ss\n\t"
27696 + "pop %%ds\n\t"
27697 "jc 1f\n\t"
27698 "xor %%ah, %%ah\n"
27699 "1:"
27700 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27701 "c" (value),
27702 "b" (bx),
27703 "D" ((long)reg),
27704 - "S" (&pci_indirect));
27705 + "S" (&pci_indirect),
27706 + "r" (__PCIBIOS_DS));
27707 break;
27708 case 2:
27709 - __asm__("lcall *(%%esi); cld\n\t"
27710 + __asm__("movw %w6, %%ds\n\t"
27711 + "lcall *%%ss:(%%esi); cld\n\t"
27712 + "push %%ss\n\t"
27713 + "pop %%ds\n\t"
27714 "jc 1f\n\t"
27715 "xor %%ah, %%ah\n"
27716 "1:"
27717 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27718 "c" (value),
27719 "b" (bx),
27720 "D" ((long)reg),
27721 - "S" (&pci_indirect));
27722 + "S" (&pci_indirect),
27723 + "r" (__PCIBIOS_DS));
27724 break;
27725 case 4:
27726 - __asm__("lcall *(%%esi); cld\n\t"
27727 + __asm__("movw %w6, %%ds\n\t"
27728 + "lcall *%%ss:(%%esi); cld\n\t"
27729 + "push %%ss\n\t"
27730 + "pop %%ds\n\t"
27731 "jc 1f\n\t"
27732 "xor %%ah, %%ah\n"
27733 "1:"
27734 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27735 "c" (value),
27736 "b" (bx),
27737 "D" ((long)reg),
27738 - "S" (&pci_indirect));
27739 + "S" (&pci_indirect),
27740 + "r" (__PCIBIOS_DS));
27741 break;
27742 }
27743
27744 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27745
27746 DBG("PCI: Fetching IRQ routing table... ");
27747 __asm__("push %%es\n\t"
27748 + "movw %w8, %%ds\n\t"
27749 "push %%ds\n\t"
27750 "pop %%es\n\t"
27751 - "lcall *(%%esi); cld\n\t"
27752 + "lcall *%%ss:(%%esi); cld\n\t"
27753 "pop %%es\n\t"
27754 + "push %%ss\n\t"
27755 + "pop %%ds\n"
27756 "jc 1f\n\t"
27757 "xor %%ah, %%ah\n"
27758 "1:"
27759 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27760 "1" (0),
27761 "D" ((long) &opt),
27762 "S" (&pci_indirect),
27763 - "m" (opt)
27764 + "m" (opt),
27765 + "r" (__PCIBIOS_DS)
27766 : "memory");
27767 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
27768 if (ret & 0xff00)
27769 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27770 {
27771 int ret;
27772
27773 - __asm__("lcall *(%%esi); cld\n\t"
27774 + __asm__("movw %w5, %%ds\n\t"
27775 + "lcall *%%ss:(%%esi); cld\n\t"
27776 + "push %%ss\n\t"
27777 + "pop %%ds\n"
27778 "jc 1f\n\t"
27779 "xor %%ah, %%ah\n"
27780 "1:"
27781 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27782 : "0" (PCIBIOS_SET_PCI_HW_INT),
27783 "b" ((dev->bus->number << 8) | dev->devfn),
27784 "c" ((irq << 8) | (pin + 10)),
27785 - "S" (&pci_indirect));
27786 + "S" (&pci_indirect),
27787 + "r" (__PCIBIOS_DS));
27788 return !(ret & 0xff00);
27789 }
27790 EXPORT_SYMBOL(pcibios_set_irq_routing);
27791 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
27792 index 40e4469..1ab536e 100644
27793 --- a/arch/x86/platform/efi/efi_32.c
27794 +++ b/arch/x86/platform/efi/efi_32.c
27795 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
27796 {
27797 struct desc_ptr gdt_descr;
27798
27799 +#ifdef CONFIG_PAX_KERNEXEC
27800 + struct desc_struct d;
27801 +#endif
27802 +
27803 local_irq_save(efi_rt_eflags);
27804
27805 load_cr3(initial_page_table);
27806 __flush_tlb_all();
27807
27808 +#ifdef CONFIG_PAX_KERNEXEC
27809 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
27810 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
27811 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
27812 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
27813 +#endif
27814 +
27815 gdt_descr.address = __pa(get_cpu_gdt_table(0));
27816 gdt_descr.size = GDT_SIZE - 1;
27817 load_gdt(&gdt_descr);
27818 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
27819 {
27820 struct desc_ptr gdt_descr;
27821
27822 +#ifdef CONFIG_PAX_KERNEXEC
27823 + struct desc_struct d;
27824 +
27825 + memset(&d, 0, sizeof d);
27826 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
27827 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
27828 +#endif
27829 +
27830 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
27831 gdt_descr.size = GDT_SIZE - 1;
27832 load_gdt(&gdt_descr);
27833 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
27834 index fbe66e6..eae5e38 100644
27835 --- a/arch/x86/platform/efi/efi_stub_32.S
27836 +++ b/arch/x86/platform/efi/efi_stub_32.S
27837 @@ -6,7 +6,9 @@
27838 */
27839
27840 #include <linux/linkage.h>
27841 +#include <linux/init.h>
27842 #include <asm/page_types.h>
27843 +#include <asm/segment.h>
27844
27845 /*
27846 * efi_call_phys(void *, ...) is a function with variable parameters.
27847 @@ -20,7 +22,7 @@
27848 * service functions will comply with gcc calling convention, too.
27849 */
27850
27851 -.text
27852 +__INIT
27853 ENTRY(efi_call_phys)
27854 /*
27855 * 0. The function can only be called in Linux kernel. So CS has been
27856 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
27857 * The mapping of lower virtual memory has been created in prelog and
27858 * epilog.
27859 */
27860 - movl $1f, %edx
27861 - subl $__PAGE_OFFSET, %edx
27862 - jmp *%edx
27863 +#ifdef CONFIG_PAX_KERNEXEC
27864 + movl $(__KERNEXEC_EFI_DS), %edx
27865 + mov %edx, %ds
27866 + mov %edx, %es
27867 + mov %edx, %ss
27868 + addl $2f,(1f)
27869 + ljmp *(1f)
27870 +
27871 +__INITDATA
27872 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
27873 +.previous
27874 +
27875 +2:
27876 + subl $2b,(1b)
27877 +#else
27878 + jmp 1f-__PAGE_OFFSET
27879 1:
27880 +#endif
27881
27882 /*
27883 * 2. Now on the top of stack is the return
27884 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
27885 * parameter 2, ..., param n. To make things easy, we save the return
27886 * address of efi_call_phys in a global variable.
27887 */
27888 - popl %edx
27889 - movl %edx, saved_return_addr
27890 - /* get the function pointer into ECX*/
27891 - popl %ecx
27892 - movl %ecx, efi_rt_function_ptr
27893 - movl $2f, %edx
27894 - subl $__PAGE_OFFSET, %edx
27895 - pushl %edx
27896 + popl (saved_return_addr)
27897 + popl (efi_rt_function_ptr)
27898
27899 /*
27900 * 3. Clear PG bit in %CR0.
27901 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
27902 /*
27903 * 5. Call the physical function.
27904 */
27905 - jmp *%ecx
27906 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
27907
27908 -2:
27909 /*
27910 * 6. After EFI runtime service returns, control will return to
27911 * following instruction. We'd better readjust stack pointer first.
27912 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
27913 movl %cr0, %edx
27914 orl $0x80000000, %edx
27915 movl %edx, %cr0
27916 - jmp 1f
27917 -1:
27918 +
27919 /*
27920 * 8. Now restore the virtual mode from flat mode by
27921 * adding EIP with PAGE_OFFSET.
27922 */
27923 - movl $1f, %edx
27924 - jmp *%edx
27925 +#ifdef CONFIG_PAX_KERNEXEC
27926 + movl $(__KERNEL_DS), %edx
27927 + mov %edx, %ds
27928 + mov %edx, %es
27929 + mov %edx, %ss
27930 + ljmp $(__KERNEL_CS),$1f
27931 +#else
27932 + jmp 1f+__PAGE_OFFSET
27933 +#endif
27934 1:
27935
27936 /*
27937 * 9. Balance the stack. And because EAX contain the return value,
27938 * we'd better not clobber it.
27939 */
27940 - leal efi_rt_function_ptr, %edx
27941 - movl (%edx), %ecx
27942 - pushl %ecx
27943 + pushl (efi_rt_function_ptr)
27944
27945 /*
27946 - * 10. Push the saved return address onto the stack and return.
27947 + * 10. Return to the saved return address.
27948 */
27949 - leal saved_return_addr, %edx
27950 - movl (%edx), %ecx
27951 - pushl %ecx
27952 - ret
27953 + jmpl *(saved_return_addr)
27954 ENDPROC(efi_call_phys)
27955 .previous
27956
27957 -.data
27958 +__INITDATA
27959 saved_return_addr:
27960 .long 0
27961 efi_rt_function_ptr:
27962 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
27963 index 4c07cca..2c8427d 100644
27964 --- a/arch/x86/platform/efi/efi_stub_64.S
27965 +++ b/arch/x86/platform/efi/efi_stub_64.S
27966 @@ -7,6 +7,7 @@
27967 */
27968
27969 #include <linux/linkage.h>
27970 +#include <asm/alternative-asm.h>
27971
27972 #define SAVE_XMM \
27973 mov %rsp, %rax; \
27974 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
27975 call *%rdi
27976 addq $32, %rsp
27977 RESTORE_XMM
27978 + pax_force_retaddr 0, 1
27979 ret
27980 ENDPROC(efi_call0)
27981
27982 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
27983 call *%rdi
27984 addq $32, %rsp
27985 RESTORE_XMM
27986 + pax_force_retaddr 0, 1
27987 ret
27988 ENDPROC(efi_call1)
27989
27990 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
27991 call *%rdi
27992 addq $32, %rsp
27993 RESTORE_XMM
27994 + pax_force_retaddr 0, 1
27995 ret
27996 ENDPROC(efi_call2)
27997
27998 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
27999 call *%rdi
28000 addq $32, %rsp
28001 RESTORE_XMM
28002 + pax_force_retaddr 0, 1
28003 ret
28004 ENDPROC(efi_call3)
28005
28006 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
28007 call *%rdi
28008 addq $32, %rsp
28009 RESTORE_XMM
28010 + pax_force_retaddr 0, 1
28011 ret
28012 ENDPROC(efi_call4)
28013
28014 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
28015 call *%rdi
28016 addq $48, %rsp
28017 RESTORE_XMM
28018 + pax_force_retaddr 0, 1
28019 ret
28020 ENDPROC(efi_call5)
28021
28022 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
28023 call *%rdi
28024 addq $48, %rsp
28025 RESTORE_XMM
28026 + pax_force_retaddr 0, 1
28027 ret
28028 ENDPROC(efi_call6)
28029 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
28030 index fd41a92..9c33628 100644
28031 --- a/arch/x86/platform/mrst/mrst.c
28032 +++ b/arch/x86/platform/mrst/mrst.c
28033 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
28034 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
28035 int sfi_mrtc_num;
28036
28037 -static void mrst_power_off(void)
28038 +static __noreturn void mrst_power_off(void)
28039 {
28040 + BUG();
28041 }
28042
28043 -static void mrst_reboot(void)
28044 +static __noreturn void mrst_reboot(void)
28045 {
28046 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
28047 + BUG();
28048 }
28049
28050 /* parse all the mtimer info to a static mtimer array */
28051 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28052 index 218cdb1..c1178eb 100644
28053 --- a/arch/x86/power/cpu.c
28054 +++ b/arch/x86/power/cpu.c
28055 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
28056 static void fix_processor_context(void)
28057 {
28058 int cpu = smp_processor_id();
28059 - struct tss_struct *t = &per_cpu(init_tss, cpu);
28060 + struct tss_struct *t = init_tss + cpu;
28061
28062 set_tss_desc(cpu, t); /*
28063 * This just modifies memory; should not be
28064 @@ -142,8 +142,6 @@ static void fix_processor_context(void)
28065 */
28066
28067 #ifdef CONFIG_X86_64
28068 - get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28069 -
28070 syscall_init(); /* This sets MSR_*STAR and related */
28071 #endif
28072 load_TR_desc(); /* This does ltr */
28073 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
28074 index cbca565..bae7133 100644
28075 --- a/arch/x86/realmode/init.c
28076 +++ b/arch/x86/realmode/init.c
28077 @@ -62,7 +62,13 @@ void __init setup_real_mode(void)
28078 __va(real_mode_header->trampoline_header);
28079
28080 #ifdef CONFIG_X86_32
28081 - trampoline_header->start = __pa(startup_32_smp);
28082 + trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
28083 +
28084 +#ifdef CONFIG_PAX_KERNEXEC
28085 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
28086 +#endif
28087 +
28088 + trampoline_header->boot_cs = __BOOT_CS;
28089 trampoline_header->gdt_limit = __BOOT_DS + 7;
28090 trampoline_header->gdt_base = __pa(boot_gdt);
28091 #else
28092 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
28093 index 8869287..d577672 100644
28094 --- a/arch/x86/realmode/rm/Makefile
28095 +++ b/arch/x86/realmode/rm/Makefile
28096 @@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
28097 $(call cc-option, -fno-unit-at-a-time)) \
28098 $(call cc-option, -fno-stack-protector) \
28099 $(call cc-option, -mpreferred-stack-boundary=2)
28100 +ifdef CONSTIFY_PLUGIN
28101 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
28102 +endif
28103 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
28104 GCOV_PROFILE := n
28105 diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
28106 index a28221d..93c40f1 100644
28107 --- a/arch/x86/realmode/rm/header.S
28108 +++ b/arch/x86/realmode/rm/header.S
28109 @@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
28110 #endif
28111 /* APM/BIOS reboot */
28112 .long pa_machine_real_restart_asm
28113 -#ifdef CONFIG_X86_64
28114 +#ifdef CONFIG_X86_32
28115 + .long __KERNEL_CS
28116 +#else
28117 .long __KERNEL32_CS
28118 #endif
28119 END(real_mode_header)
28120 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
28121 index c1b2791..f9e31c7 100644
28122 --- a/arch/x86/realmode/rm/trampoline_32.S
28123 +++ b/arch/x86/realmode/rm/trampoline_32.S
28124 @@ -25,6 +25,12 @@
28125 #include <asm/page_types.h>
28126 #include "realmode.h"
28127
28128 +#ifdef CONFIG_PAX_KERNEXEC
28129 +#define ta(X) (X)
28130 +#else
28131 +#define ta(X) (pa_ ## X)
28132 +#endif
28133 +
28134 .text
28135 .code16
28136
28137 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
28138
28139 cli # We should be safe anyway
28140
28141 - movl tr_start, %eax # where we need to go
28142 -
28143 movl $0xA5A5A5A5, trampoline_status
28144 # write marker for master knows we're running
28145
28146 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
28147 movw $1, %dx # protected mode (PE) bit
28148 lmsw %dx # into protected mode
28149
28150 - ljmpl $__BOOT_CS, $pa_startup_32
28151 + ljmpl *(trampoline_header)
28152
28153 .section ".text32","ax"
28154 .code32
28155 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
28156 .balign 8
28157 GLOBAL(trampoline_header)
28158 tr_start: .space 4
28159 - tr_gdt_pad: .space 2
28160 + tr_boot_cs: .space 2
28161 tr_gdt: .space 6
28162 END(trampoline_header)
28163
28164 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
28165 index bb360dc..3e5945f 100644
28166 --- a/arch/x86/realmode/rm/trampoline_64.S
28167 +++ b/arch/x86/realmode/rm/trampoline_64.S
28168 @@ -107,7 +107,7 @@ ENTRY(startup_32)
28169 wrmsr
28170
28171 # Enable paging and in turn activate Long Mode
28172 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
28173 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
28174 movl %eax, %cr0
28175
28176 /*
28177 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
28178 index 5a1847d..deccb30 100644
28179 --- a/arch/x86/tools/relocs.c
28180 +++ b/arch/x86/tools/relocs.c
28181 @@ -12,10 +12,13 @@
28182 #include <regex.h>
28183 #include <tools/le_byteshift.h>
28184
28185 +#include "../../../include/generated/autoconf.h"
28186 +
28187 static void die(char *fmt, ...);
28188
28189 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
28190 static Elf32_Ehdr ehdr;
28191 +static Elf32_Phdr *phdr;
28192 static unsigned long reloc_count, reloc_idx;
28193 static unsigned long *relocs;
28194 static unsigned long reloc16_count, reloc16_idx;
28195 @@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
28196 }
28197 }
28198
28199 +static void read_phdrs(FILE *fp)
28200 +{
28201 + unsigned int i;
28202 +
28203 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
28204 + if (!phdr) {
28205 + die("Unable to allocate %d program headers\n",
28206 + ehdr.e_phnum);
28207 + }
28208 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
28209 + die("Seek to %d failed: %s\n",
28210 + ehdr.e_phoff, strerror(errno));
28211 + }
28212 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
28213 + die("Cannot read ELF program headers: %s\n",
28214 + strerror(errno));
28215 + }
28216 + for(i = 0; i < ehdr.e_phnum; i++) {
28217 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
28218 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
28219 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
28220 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
28221 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
28222 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
28223 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
28224 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
28225 + }
28226 +
28227 +}
28228 +
28229 static void read_shdrs(FILE *fp)
28230 {
28231 - int i;
28232 + unsigned int i;
28233 Elf32_Shdr shdr;
28234
28235 secs = calloc(ehdr.e_shnum, sizeof(struct section));
28236 @@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
28237
28238 static void read_strtabs(FILE *fp)
28239 {
28240 - int i;
28241 + unsigned int i;
28242 for (i = 0; i < ehdr.e_shnum; i++) {
28243 struct section *sec = &secs[i];
28244 if (sec->shdr.sh_type != SHT_STRTAB) {
28245 @@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
28246
28247 static void read_symtabs(FILE *fp)
28248 {
28249 - int i,j;
28250 + unsigned int i,j;
28251 for (i = 0; i < ehdr.e_shnum; i++) {
28252 struct section *sec = &secs[i];
28253 if (sec->shdr.sh_type != SHT_SYMTAB) {
28254 @@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
28255 }
28256
28257
28258 -static void read_relocs(FILE *fp)
28259 +static void read_relocs(FILE *fp, int use_real_mode)
28260 {
28261 - int i,j;
28262 + unsigned int i,j;
28263 + uint32_t base;
28264 +
28265 for (i = 0; i < ehdr.e_shnum; i++) {
28266 struct section *sec = &secs[i];
28267 if (sec->shdr.sh_type != SHT_REL) {
28268 @@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
28269 die("Cannot read symbol table: %s\n",
28270 strerror(errno));
28271 }
28272 + base = 0;
28273 +
28274 +#ifdef CONFIG_X86_32
28275 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
28276 + if (phdr[j].p_type != PT_LOAD )
28277 + continue;
28278 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
28279 + continue;
28280 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
28281 + break;
28282 + }
28283 +#endif
28284 +
28285 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
28286 Elf32_Rel *rel = &sec->reltab[j];
28287 - rel->r_offset = elf32_to_cpu(rel->r_offset);
28288 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
28289 rel->r_info = elf32_to_cpu(rel->r_info);
28290 }
28291 }
28292 @@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
28293
28294 static void print_absolute_symbols(void)
28295 {
28296 - int i;
28297 + unsigned int i;
28298 printf("Absolute symbols\n");
28299 printf(" Num: Value Size Type Bind Visibility Name\n");
28300 for (i = 0; i < ehdr.e_shnum; i++) {
28301 struct section *sec = &secs[i];
28302 char *sym_strtab;
28303 - int j;
28304 + unsigned int j;
28305
28306 if (sec->shdr.sh_type != SHT_SYMTAB) {
28307 continue;
28308 @@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
28309
28310 static void print_absolute_relocs(void)
28311 {
28312 - int i, printed = 0;
28313 + unsigned int i, printed = 0;
28314
28315 for (i = 0; i < ehdr.e_shnum; i++) {
28316 struct section *sec = &secs[i];
28317 struct section *sec_applies, *sec_symtab;
28318 char *sym_strtab;
28319 Elf32_Sym *sh_symtab;
28320 - int j;
28321 + unsigned int j;
28322 if (sec->shdr.sh_type != SHT_REL) {
28323 continue;
28324 }
28325 @@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
28326 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28327 int use_real_mode)
28328 {
28329 - int i;
28330 + unsigned int i;
28331 /* Walk through the relocations */
28332 for (i = 0; i < ehdr.e_shnum; i++) {
28333 char *sym_strtab;
28334 Elf32_Sym *sh_symtab;
28335 struct section *sec_applies, *sec_symtab;
28336 - int j;
28337 + unsigned int j;
28338 struct section *sec = &secs[i];
28339
28340 if (sec->shdr.sh_type != SHT_REL) {
28341 @@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28342 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
28343 r_type = ELF32_R_TYPE(rel->r_info);
28344
28345 + if (!use_real_mode) {
28346 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
28347 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
28348 + continue;
28349 +
28350 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
28351 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
28352 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
28353 + continue;
28354 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
28355 + continue;
28356 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
28357 + continue;
28358 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
28359 + continue;
28360 +#endif
28361 + }
28362 +
28363 shn_abs = sym->st_shndx == SHN_ABS;
28364
28365 switch (r_type) {
28366 @@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
28367
28368 static void emit_relocs(int as_text, int use_real_mode)
28369 {
28370 - int i;
28371 + unsigned int i;
28372 /* Count how many relocations I have and allocate space for them. */
28373 reloc_count = 0;
28374 walk_relocs(count_reloc, use_real_mode);
28375 @@ -808,10 +874,11 @@ int main(int argc, char **argv)
28376 fname, strerror(errno));
28377 }
28378 read_ehdr(fp);
28379 + read_phdrs(fp);
28380 read_shdrs(fp);
28381 read_strtabs(fp);
28382 read_symtabs(fp);
28383 - read_relocs(fp);
28384 + read_relocs(fp, use_real_mode);
28385 if (show_absolute_syms) {
28386 print_absolute_symbols();
28387 return 0;
28388 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28389 index fd14be1..e3c79c0 100644
28390 --- a/arch/x86/vdso/Makefile
28391 +++ b/arch/x86/vdso/Makefile
28392 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
28393 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
28394 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
28395
28396 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28397 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28398 GCOV_PROFILE := n
28399
28400 #
28401 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28402 index 0faad64..39ef157 100644
28403 --- a/arch/x86/vdso/vdso32-setup.c
28404 +++ b/arch/x86/vdso/vdso32-setup.c
28405 @@ -25,6 +25,7 @@
28406 #include <asm/tlbflush.h>
28407 #include <asm/vdso.h>
28408 #include <asm/proto.h>
28409 +#include <asm/mman.h>
28410
28411 enum {
28412 VDSO_DISABLED = 0,
28413 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28414 void enable_sep_cpu(void)
28415 {
28416 int cpu = get_cpu();
28417 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
28418 + struct tss_struct *tss = init_tss + cpu;
28419
28420 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28421 put_cpu();
28422 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28423 gate_vma.vm_start = FIXADDR_USER_START;
28424 gate_vma.vm_end = FIXADDR_USER_END;
28425 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28426 - gate_vma.vm_page_prot = __P101;
28427 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28428
28429 return 0;
28430 }
28431 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28432 if (compat)
28433 addr = VDSO_HIGH_BASE;
28434 else {
28435 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28436 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28437 if (IS_ERR_VALUE(addr)) {
28438 ret = addr;
28439 goto up_fail;
28440 }
28441 }
28442
28443 - current->mm->context.vdso = (void *)addr;
28444 + current->mm->context.vdso = addr;
28445
28446 if (compat_uses_vma || !compat) {
28447 /*
28448 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28449 }
28450
28451 current_thread_info()->sysenter_return =
28452 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28453 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28454
28455 up_fail:
28456 if (ret)
28457 - current->mm->context.vdso = NULL;
28458 + current->mm->context.vdso = 0;
28459
28460 up_write(&mm->mmap_sem);
28461
28462 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
28463
28464 const char *arch_vma_name(struct vm_area_struct *vma)
28465 {
28466 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28467 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28468 return "[vdso]";
28469 +
28470 +#ifdef CONFIG_PAX_SEGMEXEC
28471 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28472 + return "[vdso]";
28473 +#endif
28474 +
28475 return NULL;
28476 }
28477
28478 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28479 * Check to see if the corresponding task was created in compat vdso
28480 * mode.
28481 */
28482 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
28483 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
28484 return &gate_vma;
28485 return NULL;
28486 }
28487 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
28488 index 00aaf04..4a26505 100644
28489 --- a/arch/x86/vdso/vma.c
28490 +++ b/arch/x86/vdso/vma.c
28491 @@ -16,8 +16,6 @@
28492 #include <asm/vdso.h>
28493 #include <asm/page.h>
28494
28495 -unsigned int __read_mostly vdso_enabled = 1;
28496 -
28497 extern char vdso_start[], vdso_end[];
28498 extern unsigned short vdso_sync_cpuid;
28499
28500 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
28501 * unaligned here as a result of stack start randomization.
28502 */
28503 addr = PAGE_ALIGN(addr);
28504 - addr = align_addr(addr, NULL, ALIGN_VDSO);
28505
28506 return addr;
28507 }
28508 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
28509 unsigned size)
28510 {
28511 struct mm_struct *mm = current->mm;
28512 - unsigned long addr;
28513 + unsigned long addr = 0;
28514 int ret;
28515
28516 - if (!vdso_enabled)
28517 - return 0;
28518 -
28519 down_write(&mm->mmap_sem);
28520 +
28521 +#ifdef CONFIG_PAX_RANDMMAP
28522 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28523 +#endif
28524 +
28525 addr = vdso_addr(mm->start_stack, size);
28526 + addr = align_addr(addr, NULL, ALIGN_VDSO);
28527 addr = get_unmapped_area(NULL, addr, size, 0, 0);
28528 if (IS_ERR_VALUE(addr)) {
28529 ret = addr;
28530 goto up_fail;
28531 }
28532
28533 - current->mm->context.vdso = (void *)addr;
28534 + mm->context.vdso = addr;
28535
28536 ret = install_special_mapping(mm, addr, size,
28537 VM_READ|VM_EXEC|
28538 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
28539 pages);
28540 - if (ret) {
28541 - current->mm->context.vdso = NULL;
28542 - goto up_fail;
28543 - }
28544 + if (ret)
28545 + mm->context.vdso = 0;
28546
28547 up_fail:
28548 up_write(&mm->mmap_sem);
28549 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28550 vdsox32_size);
28551 }
28552 #endif
28553 -
28554 -static __init int vdso_setup(char *s)
28555 -{
28556 - vdso_enabled = simple_strtoul(s, NULL, 0);
28557 - return 0;
28558 -}
28559 -__setup("vdso=", vdso_setup);
28560 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
28561 index 586d838..38cb3ff 100644
28562 --- a/arch/x86/xen/enlighten.c
28563 +++ b/arch/x86/xen/enlighten.c
28564 @@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
28565
28566 struct shared_info xen_dummy_shared_info;
28567
28568 -void *xen_initial_gdt;
28569 -
28570 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
28571 __read_mostly int xen_have_vector_callback;
28572 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
28573 @@ -918,21 +916,21 @@ static u32 xen_safe_apic_wait_icr_idle(void)
28574
28575 static void set_xen_basic_apic_ops(void)
28576 {
28577 - apic->read = xen_apic_read;
28578 - apic->write = xen_apic_write;
28579 - apic->icr_read = xen_apic_icr_read;
28580 - apic->icr_write = xen_apic_icr_write;
28581 - apic->wait_icr_idle = xen_apic_wait_icr_idle;
28582 - apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
28583 - apic->set_apic_id = xen_set_apic_id;
28584 - apic->get_apic_id = xen_get_apic_id;
28585 + *(void **)&apic->read = xen_apic_read;
28586 + *(void **)&apic->write = xen_apic_write;
28587 + *(void **)&apic->icr_read = xen_apic_icr_read;
28588 + *(void **)&apic->icr_write = xen_apic_icr_write;
28589 + *(void **)&apic->wait_icr_idle = xen_apic_wait_icr_idle;
28590 + *(void **)&apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
28591 + *(void **)&apic->set_apic_id = xen_set_apic_id;
28592 + *(void **)&apic->get_apic_id = xen_get_apic_id;
28593
28594 #ifdef CONFIG_SMP
28595 - apic->send_IPI_allbutself = xen_send_IPI_allbutself;
28596 - apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
28597 - apic->send_IPI_mask = xen_send_IPI_mask;
28598 - apic->send_IPI_all = xen_send_IPI_all;
28599 - apic->send_IPI_self = xen_send_IPI_self;
28600 + *(void **)&apic->send_IPI_allbutself = xen_send_IPI_allbutself;
28601 + *(void **)&apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
28602 + *(void **)&apic->send_IPI_mask = xen_send_IPI_mask;
28603 + *(void **)&apic->send_IPI_all = xen_send_IPI_all;
28604 + *(void **)&apic->send_IPI_self = xen_send_IPI_self;
28605 #endif
28606 }
28607
28608 @@ -1222,30 +1220,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
28609 #endif
28610 };
28611
28612 -static void xen_reboot(int reason)
28613 +static __noreturn void xen_reboot(int reason)
28614 {
28615 struct sched_shutdown r = { .reason = reason };
28616
28617 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
28618 - BUG();
28619 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
28620 + BUG();
28621 }
28622
28623 -static void xen_restart(char *msg)
28624 +static __noreturn void xen_restart(char *msg)
28625 {
28626 xen_reboot(SHUTDOWN_reboot);
28627 }
28628
28629 -static void xen_emergency_restart(void)
28630 +static __noreturn void xen_emergency_restart(void)
28631 {
28632 xen_reboot(SHUTDOWN_reboot);
28633 }
28634
28635 -static void xen_machine_halt(void)
28636 +static __noreturn void xen_machine_halt(void)
28637 {
28638 xen_reboot(SHUTDOWN_poweroff);
28639 }
28640
28641 -static void xen_machine_power_off(void)
28642 +static __noreturn void xen_machine_power_off(void)
28643 {
28644 if (pm_power_off)
28645 pm_power_off();
28646 @@ -1347,7 +1345,17 @@ asmlinkage void __init xen_start_kernel(void)
28647 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
28648
28649 /* Work out if we support NX */
28650 - x86_configure_nx();
28651 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28652 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
28653 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
28654 + unsigned l, h;
28655 +
28656 + __supported_pte_mask |= _PAGE_NX;
28657 + rdmsr(MSR_EFER, l, h);
28658 + l |= EFER_NX;
28659 + wrmsr(MSR_EFER, l, h);
28660 + }
28661 +#endif
28662
28663 xen_setup_features();
28664
28665 @@ -1378,13 +1386,6 @@ asmlinkage void __init xen_start_kernel(void)
28666
28667 machine_ops = xen_machine_ops;
28668
28669 - /*
28670 - * The only reliable way to retain the initial address of the
28671 - * percpu gdt_page is to remember it here, so we can go and
28672 - * mark it RW later, when the initial percpu area is freed.
28673 - */
28674 - xen_initial_gdt = &per_cpu(gdt_page, 0);
28675 -
28676 xen_smp_init();
28677
28678 #ifdef CONFIG_ACPI_NUMA
28679 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
28680 index dcf5f2d..d804c25 100644
28681 --- a/arch/x86/xen/mmu.c
28682 +++ b/arch/x86/xen/mmu.c
28683 @@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
28684 /* L3_k[510] -> level2_kernel_pgt
28685 * L3_i[511] -> level2_fixmap_pgt */
28686 convert_pfn_mfn(level3_kernel_pgt);
28687 + convert_pfn_mfn(level3_vmalloc_start_pgt);
28688 + convert_pfn_mfn(level3_vmalloc_end_pgt);
28689 + convert_pfn_mfn(level3_vmemmap_pgt);
28690
28691 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
28692 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
28693 @@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
28694 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
28695 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
28696 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
28697 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
28698 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
28699 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
28700 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
28701 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
28702 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
28703 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
28704 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
28705
28706 @@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
28707 pv_mmu_ops.set_pud = xen_set_pud;
28708 #if PAGETABLE_LEVELS == 4
28709 pv_mmu_ops.set_pgd = xen_set_pgd;
28710 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
28711 #endif
28712
28713 /* This will work as long as patching hasn't happened yet
28714 @@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
28715 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
28716 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
28717 .set_pgd = xen_set_pgd_hyper,
28718 + .set_pgd_batched = xen_set_pgd_hyper,
28719
28720 .alloc_pud = xen_alloc_pmd_init,
28721 .release_pud = xen_release_pmd_init,
28722 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
28723 index 353c50f..5b7cb95 100644
28724 --- a/arch/x86/xen/smp.c
28725 +++ b/arch/x86/xen/smp.c
28726 @@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
28727 {
28728 BUG_ON(smp_processor_id() != 0);
28729 native_smp_prepare_boot_cpu();
28730 -
28731 - /* We've switched to the "real" per-cpu gdt, so make sure the
28732 - old memory can be recycled */
28733 - make_lowmem_page_readwrite(xen_initial_gdt);
28734 -
28735 xen_filter_cpu_maps();
28736 xen_setup_vcpu_info_placement();
28737 }
28738 @@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
28739 gdt = get_cpu_gdt_table(cpu);
28740
28741 ctxt->flags = VGCF_IN_KERNEL;
28742 - ctxt->user_regs.ds = __USER_DS;
28743 - ctxt->user_regs.es = __USER_DS;
28744 + ctxt->user_regs.ds = __KERNEL_DS;
28745 + ctxt->user_regs.es = __KERNEL_DS;
28746 ctxt->user_regs.ss = __KERNEL_DS;
28747 #ifdef CONFIG_X86_32
28748 ctxt->user_regs.fs = __KERNEL_PERCPU;
28749 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
28750 + savesegment(gs, ctxt->user_regs.gs);
28751 #else
28752 ctxt->gs_base_kernel = per_cpu_offset(cpu);
28753 #endif
28754 @@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
28755 int rc;
28756
28757 per_cpu(current_task, cpu) = idle;
28758 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
28759 #ifdef CONFIG_X86_32
28760 irq_ctx_init(cpu);
28761 #else
28762 clear_tsk_thread_flag(idle, TIF_FORK);
28763 - per_cpu(kernel_stack, cpu) =
28764 - (unsigned long)task_stack_page(idle) -
28765 - KERNEL_STACK_OFFSET + THREAD_SIZE;
28766 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28767 #endif
28768 xen_setup_runstate_info(cpu);
28769 xen_setup_timer(cpu);
28770 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
28771 index f9643fc..602e8af 100644
28772 --- a/arch/x86/xen/xen-asm_32.S
28773 +++ b/arch/x86/xen/xen-asm_32.S
28774 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
28775 ESP_OFFSET=4 # bytes pushed onto stack
28776
28777 /*
28778 - * Store vcpu_info pointer for easy access. Do it this way to
28779 - * avoid having to reload %fs
28780 + * Store vcpu_info pointer for easy access.
28781 */
28782 #ifdef CONFIG_SMP
28783 - GET_THREAD_INFO(%eax)
28784 - movl TI_cpu(%eax), %eax
28785 - movl __per_cpu_offset(,%eax,4), %eax
28786 - mov xen_vcpu(%eax), %eax
28787 + push %fs
28788 + mov $(__KERNEL_PERCPU), %eax
28789 + mov %eax, %fs
28790 + mov PER_CPU_VAR(xen_vcpu), %eax
28791 + pop %fs
28792 #else
28793 movl xen_vcpu, %eax
28794 #endif
28795 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
28796 index 7faed58..ba4427c 100644
28797 --- a/arch/x86/xen/xen-head.S
28798 +++ b/arch/x86/xen/xen-head.S
28799 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
28800 #ifdef CONFIG_X86_32
28801 mov %esi,xen_start_info
28802 mov $init_thread_union+THREAD_SIZE,%esp
28803 +#ifdef CONFIG_SMP
28804 + movl $cpu_gdt_table,%edi
28805 + movl $__per_cpu_load,%eax
28806 + movw %ax,__KERNEL_PERCPU + 2(%edi)
28807 + rorl $16,%eax
28808 + movb %al,__KERNEL_PERCPU + 4(%edi)
28809 + movb %ah,__KERNEL_PERCPU + 7(%edi)
28810 + movl $__per_cpu_end - 1,%eax
28811 + subl $__per_cpu_start,%eax
28812 + movw %ax,__KERNEL_PERCPU + 0(%edi)
28813 +#endif
28814 #else
28815 mov %rsi,xen_start_info
28816 mov $init_thread_union+THREAD_SIZE,%rsp
28817 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
28818 index a95b417..b6dbd0b 100644
28819 --- a/arch/x86/xen/xen-ops.h
28820 +++ b/arch/x86/xen/xen-ops.h
28821 @@ -10,8 +10,6 @@
28822 extern const char xen_hypervisor_callback[];
28823 extern const char xen_failsafe_callback[];
28824
28825 -extern void *xen_initial_gdt;
28826 -
28827 struct trap_info;
28828 void xen_copy_trap_info(struct trap_info *traps);
28829
28830 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
28831 index 525bd3d..ef888b1 100644
28832 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
28833 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
28834 @@ -119,9 +119,9 @@
28835 ----------------------------------------------------------------------*/
28836
28837 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
28838 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
28839 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
28840 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
28841 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28842
28843 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
28844 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
28845 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
28846 index 2f33760..835e50a 100644
28847 --- a/arch/xtensa/variants/fsf/include/variant/core.h
28848 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
28849 @@ -11,6 +11,7 @@
28850 #ifndef _XTENSA_CORE_H
28851 #define _XTENSA_CORE_H
28852
28853 +#include <linux/const.h>
28854
28855 /****************************************************************************
28856 Parameters Useful for Any Code, USER or PRIVILEGED
28857 @@ -112,9 +113,9 @@
28858 ----------------------------------------------------------------------*/
28859
28860 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28861 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28862 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28863 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28864 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28865
28866 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
28867 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
28868 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
28869 index af00795..2bb8105 100644
28870 --- a/arch/xtensa/variants/s6000/include/variant/core.h
28871 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
28872 @@ -11,6 +11,7 @@
28873 #ifndef _XTENSA_CORE_CONFIGURATION_H
28874 #define _XTENSA_CORE_CONFIGURATION_H
28875
28876 +#include <linux/const.h>
28877
28878 /****************************************************************************
28879 Parameters Useful for Any Code, USER or PRIVILEGED
28880 @@ -118,9 +119,9 @@
28881 ----------------------------------------------------------------------*/
28882
28883 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28884 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28885 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28886 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28887 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28888
28889 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
28890 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
28891 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
28892 index 58916af..9cb880b 100644
28893 --- a/block/blk-iopoll.c
28894 +++ b/block/blk-iopoll.c
28895 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
28896 }
28897 EXPORT_SYMBOL(blk_iopoll_complete);
28898
28899 -static void blk_iopoll_softirq(struct softirq_action *h)
28900 +static void blk_iopoll_softirq(void)
28901 {
28902 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
28903 int rearm = 0, budget = blk_iopoll_budget;
28904 diff --git a/block/blk-map.c b/block/blk-map.c
28905 index 623e1cd..ca1e109 100644
28906 --- a/block/blk-map.c
28907 +++ b/block/blk-map.c
28908 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
28909 if (!len || !kbuf)
28910 return -EINVAL;
28911
28912 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
28913 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
28914 if (do_copy)
28915 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
28916 else
28917 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
28918 index 467c8de..4bddc6d 100644
28919 --- a/block/blk-softirq.c
28920 +++ b/block/blk-softirq.c
28921 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
28922 * Softirq action handler - move entries to local list and loop over them
28923 * while passing them to the queue registered handler.
28924 */
28925 -static void blk_done_softirq(struct softirq_action *h)
28926 +static void blk_done_softirq(void)
28927 {
28928 struct list_head *cpu_list, local_list;
28929
28930 diff --git a/block/bsg.c b/block/bsg.c
28931 index ff64ae3..593560c 100644
28932 --- a/block/bsg.c
28933 +++ b/block/bsg.c
28934 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28935 struct sg_io_v4 *hdr, struct bsg_device *bd,
28936 fmode_t has_write_perm)
28937 {
28938 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28939 + unsigned char *cmdptr;
28940 +
28941 if (hdr->request_len > BLK_MAX_CDB) {
28942 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28943 if (!rq->cmd)
28944 return -ENOMEM;
28945 - }
28946 + cmdptr = rq->cmd;
28947 + } else
28948 + cmdptr = tmpcmd;
28949
28950 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
28951 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28952 hdr->request_len))
28953 return -EFAULT;
28954
28955 + if (cmdptr != rq->cmd)
28956 + memcpy(rq->cmd, cmdptr, hdr->request_len);
28957 +
28958 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28959 if (blk_verify_command(rq->cmd, has_write_perm))
28960 return -EPERM;
28961 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28962 index 7c668c8..db3521c 100644
28963 --- a/block/compat_ioctl.c
28964 +++ b/block/compat_ioctl.c
28965 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28966 err |= __get_user(f->spec1, &uf->spec1);
28967 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28968 err |= __get_user(name, &uf->name);
28969 - f->name = compat_ptr(name);
28970 + f->name = (void __force_kernel *)compat_ptr(name);
28971 if (err) {
28972 err = -EFAULT;
28973 goto out;
28974 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
28975 index 6296b40..417c00f 100644
28976 --- a/block/partitions/efi.c
28977 +++ b/block/partitions/efi.c
28978 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
28979 if (!gpt)
28980 return NULL;
28981
28982 + if (!le32_to_cpu(gpt->num_partition_entries))
28983 + return NULL;
28984 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
28985 + if (!pte)
28986 + return NULL;
28987 +
28988 count = le32_to_cpu(gpt->num_partition_entries) *
28989 le32_to_cpu(gpt->sizeof_partition_entry);
28990 - if (!count)
28991 - return NULL;
28992 - pte = kzalloc(count, GFP_KERNEL);
28993 - if (!pte)
28994 - return NULL;
28995 -
28996 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
28997 (u8 *) pte,
28998 count) < count) {
28999 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
29000 index 9a87daa..fb17486 100644
29001 --- a/block/scsi_ioctl.c
29002 +++ b/block/scsi_ioctl.c
29003 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
29004 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
29005 struct sg_io_hdr *hdr, fmode_t mode)
29006 {
29007 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
29008 + unsigned char tmpcmd[sizeof(rq->__cmd)];
29009 + unsigned char *cmdptr;
29010 +
29011 + if (rq->cmd != rq->__cmd)
29012 + cmdptr = rq->cmd;
29013 + else
29014 + cmdptr = tmpcmd;
29015 +
29016 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
29017 return -EFAULT;
29018 +
29019 + if (cmdptr != rq->cmd)
29020 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
29021 +
29022 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
29023 return -EPERM;
29024
29025 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29026 int err;
29027 unsigned int in_len, out_len, bytes, opcode, cmdlen;
29028 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
29029 + unsigned char tmpcmd[sizeof(rq->__cmd)];
29030 + unsigned char *cmdptr;
29031
29032 if (!sic)
29033 return -EINVAL;
29034 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29035 */
29036 err = -EFAULT;
29037 rq->cmd_len = cmdlen;
29038 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
29039 +
29040 + if (rq->cmd != rq->__cmd)
29041 + cmdptr = rq->cmd;
29042 + else
29043 + cmdptr = tmpcmd;
29044 +
29045 + if (copy_from_user(cmdptr, sic->data, cmdlen))
29046 goto error;
29047
29048 + if (rq->cmd != cmdptr)
29049 + memcpy(rq->cmd, cmdptr, cmdlen);
29050 +
29051 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
29052 goto error;
29053
29054 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29055 index 7bdd61b..afec999 100644
29056 --- a/crypto/cryptd.c
29057 +++ b/crypto/cryptd.c
29058 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
29059
29060 struct cryptd_blkcipher_request_ctx {
29061 crypto_completion_t complete;
29062 -};
29063 +} __no_const;
29064
29065 struct cryptd_hash_ctx {
29066 struct crypto_shash *child;
29067 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
29068
29069 struct cryptd_aead_request_ctx {
29070 crypto_completion_t complete;
29071 -};
29072 +} __no_const;
29073
29074 static void cryptd_queue_worker(struct work_struct *work);
29075
29076 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
29077 index b2c99dc..2a10085 100644
29078 --- a/crypto/pcrypt.c
29079 +++ b/crypto/pcrypt.c
29080 @@ -52,7 +52,7 @@ struct padata_pcrypt {
29081 struct pcrypt_cpumask {
29082 cpumask_var_t mask;
29083 } *cb_cpumask;
29084 - struct notifier_block nblock;
29085 + notifier_block_no_const nblock;
29086 };
29087
29088 static struct padata_pcrypt pencrypt;
29089 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
29090 index e6defd8..c26a225 100644
29091 --- a/drivers/acpi/apei/cper.c
29092 +++ b/drivers/acpi/apei/cper.c
29093 @@ -38,12 +38,12 @@
29094 */
29095 u64 cper_next_record_id(void)
29096 {
29097 - static atomic64_t seq;
29098 + static atomic64_unchecked_t seq;
29099
29100 - if (!atomic64_read(&seq))
29101 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
29102 + if (!atomic64_read_unchecked(&seq))
29103 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
29104
29105 - return atomic64_inc_return(&seq);
29106 + return atomic64_inc_return_unchecked(&seq);
29107 }
29108 EXPORT_SYMBOL_GPL(cper_next_record_id);
29109
29110 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
29111 index 7efaeaa..53372fb 100644
29112 --- a/drivers/acpi/battery.c
29113 +++ b/drivers/acpi/battery.c
29114 @@ -115,7 +115,7 @@ struct acpi_battery {
29115 struct mutex sysfs_lock;
29116 struct power_supply bat;
29117 struct acpi_device *device;
29118 - struct notifier_block pm_nb;
29119 + notifier_block_no_const pm_nb;
29120 unsigned long update_time;
29121 int rate_now;
29122 int capacity_now;
29123 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
29124 index 7586544..636a2f0 100644
29125 --- a/drivers/acpi/ec_sys.c
29126 +++ b/drivers/acpi/ec_sys.c
29127 @@ -12,6 +12,7 @@
29128 #include <linux/acpi.h>
29129 #include <linux/debugfs.h>
29130 #include <linux/module.h>
29131 +#include <linux/uaccess.h>
29132 #include "internal.h"
29133
29134 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
29135 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29136 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
29137 */
29138 unsigned int size = EC_SPACE_SIZE;
29139 - u8 *data = (u8 *) buf;
29140 + u8 data;
29141 loff_t init_off = *off;
29142 int err = 0;
29143
29144 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29145 size = count;
29146
29147 while (size) {
29148 - err = ec_read(*off, &data[*off - init_off]);
29149 + err = ec_read(*off, &data);
29150 if (err)
29151 return err;
29152 + if (put_user(data, &buf[*off - init_off]))
29153 + return -EFAULT;
29154 *off += 1;
29155 size--;
29156 }
29157 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29158
29159 unsigned int size = count;
29160 loff_t init_off = *off;
29161 - u8 *data = (u8 *) buf;
29162 int err = 0;
29163
29164 if (*off >= EC_SPACE_SIZE)
29165 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29166 }
29167
29168 while (size) {
29169 - u8 byte_write = data[*off - init_off];
29170 + u8 byte_write;
29171 + if (get_user(byte_write, &buf[*off - init_off]))
29172 + return -EFAULT;
29173 err = ec_write(*off, byte_write);
29174 if (err)
29175 return err;
29176 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29177 index 27adb09..1ed412d 100644
29178 --- a/drivers/acpi/proc.c
29179 +++ b/drivers/acpi/proc.c
29180 @@ -360,19 +360,13 @@ acpi_system_write_wakeup_device(struct file *file,
29181 size_t count, loff_t * ppos)
29182 {
29183 struct list_head *node, *next;
29184 - char strbuf[5];
29185 - char str[5] = "";
29186 - unsigned int len = count;
29187 + char strbuf[5] = {0};
29188
29189 - if (len > 4)
29190 - len = 4;
29191 - if (len < 0)
29192 + if (count > 4)
29193 + count = 4;
29194 + if (copy_from_user(strbuf, buffer, count))
29195 return -EFAULT;
29196 -
29197 - if (copy_from_user(strbuf, buffer, len))
29198 - return -EFAULT;
29199 - strbuf[len] = '\0';
29200 - sscanf(strbuf, "%s", str);
29201 + strbuf[count] = '\0';
29202
29203 mutex_lock(&acpi_device_lock);
29204 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29205 @@ -381,7 +375,7 @@ acpi_system_write_wakeup_device(struct file *file,
29206 if (!dev->wakeup.flags.valid)
29207 continue;
29208
29209 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
29210 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29211 if (device_can_wakeup(&dev->dev)) {
29212 bool enable = !device_may_wakeup(&dev->dev);
29213 device_set_wakeup_enable(&dev->dev, enable);
29214 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
29215 index bd4e5dc..0497b66 100644
29216 --- a/drivers/acpi/processor_driver.c
29217 +++ b/drivers/acpi/processor_driver.c
29218 @@ -552,7 +552,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29219 return 0;
29220 #endif
29221
29222 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29223 + BUG_ON(pr->id >= nr_cpu_ids);
29224
29225 /*
29226 * Buggy BIOS check
29227 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
29228 index ac9a69c..6b2a391 100644
29229 --- a/drivers/acpi/video.c
29230 +++ b/drivers/acpi/video.c
29231 @@ -157,7 +157,7 @@ struct acpi_video_bus {
29232 struct mutex device_list_lock; /* protects video_device_list */
29233 struct input_dev *input;
29234 char phys[32]; /* for input device */
29235 - struct notifier_block pm_nb;
29236 + notifier_block_no_const pm_nb;
29237 };
29238
29239 struct acpi_video_device_flags {
29240 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29241 index f46fbd3..b8341f3 100644
29242 --- a/drivers/ata/libata-core.c
29243 +++ b/drivers/ata/libata-core.c
29244 @@ -4774,7 +4774,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29245 struct ata_port *ap;
29246 unsigned int tag;
29247
29248 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29249 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29250 ap = qc->ap;
29251
29252 qc->flags = 0;
29253 @@ -4790,7 +4790,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29254 struct ata_port *ap;
29255 struct ata_link *link;
29256
29257 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29258 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29259 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29260 ap = qc->ap;
29261 link = qc->dev->link;
29262 @@ -5886,6 +5886,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29263 return;
29264
29265 spin_lock(&lock);
29266 + pax_open_kernel();
29267
29268 for (cur = ops->inherits; cur; cur = cur->inherits) {
29269 void **inherit = (void **)cur;
29270 @@ -5899,8 +5900,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29271 if (IS_ERR(*pp))
29272 *pp = NULL;
29273
29274 - ops->inherits = NULL;
29275 + *(struct ata_port_operations **)&ops->inherits = NULL;
29276
29277 + pax_close_kernel();
29278 spin_unlock(&lock);
29279 }
29280
29281 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
29282 index 371fd2c..0836c78 100644
29283 --- a/drivers/ata/pata_arasan_cf.c
29284 +++ b/drivers/ata/pata_arasan_cf.c
29285 @@ -861,7 +861,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
29286 /* Handle platform specific quirks */
29287 if (pdata->quirk) {
29288 if (pdata->quirk & CF_BROKEN_PIO) {
29289 - ap->ops->set_piomode = NULL;
29290 + pax_open_kernel();
29291 + *(void **)&ap->ops->set_piomode = NULL;
29292 + pax_close_kernel();
29293 ap->pio_mask = 0;
29294 }
29295 if (pdata->quirk & CF_BROKEN_MWDMA)
29296 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29297 index f9b983a..887b9d8 100644
29298 --- a/drivers/atm/adummy.c
29299 +++ b/drivers/atm/adummy.c
29300 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29301 vcc->pop(vcc, skb);
29302 else
29303 dev_kfree_skb_any(skb);
29304 - atomic_inc(&vcc->stats->tx);
29305 + atomic_inc_unchecked(&vcc->stats->tx);
29306
29307 return 0;
29308 }
29309 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29310 index ff7bb8a..568fc0b 100644
29311 --- a/drivers/atm/ambassador.c
29312 +++ b/drivers/atm/ambassador.c
29313 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29314 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29315
29316 // VC layer stats
29317 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29318 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29319
29320 // free the descriptor
29321 kfree (tx_descr);
29322 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29323 dump_skb ("<<<", vc, skb);
29324
29325 // VC layer stats
29326 - atomic_inc(&atm_vcc->stats->rx);
29327 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29328 __net_timestamp(skb);
29329 // end of our responsibility
29330 atm_vcc->push (atm_vcc, skb);
29331 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29332 } else {
29333 PRINTK (KERN_INFO, "dropped over-size frame");
29334 // should we count this?
29335 - atomic_inc(&atm_vcc->stats->rx_drop);
29336 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29337 }
29338
29339 } else {
29340 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29341 }
29342
29343 if (check_area (skb->data, skb->len)) {
29344 - atomic_inc(&atm_vcc->stats->tx_err);
29345 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29346 return -ENOMEM; // ?
29347 }
29348
29349 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29350 index b22d71c..d6e1049 100644
29351 --- a/drivers/atm/atmtcp.c
29352 +++ b/drivers/atm/atmtcp.c
29353 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29354 if (vcc->pop) vcc->pop(vcc,skb);
29355 else dev_kfree_skb(skb);
29356 if (dev_data) return 0;
29357 - atomic_inc(&vcc->stats->tx_err);
29358 + atomic_inc_unchecked(&vcc->stats->tx_err);
29359 return -ENOLINK;
29360 }
29361 size = skb->len+sizeof(struct atmtcp_hdr);
29362 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29363 if (!new_skb) {
29364 if (vcc->pop) vcc->pop(vcc,skb);
29365 else dev_kfree_skb(skb);
29366 - atomic_inc(&vcc->stats->tx_err);
29367 + atomic_inc_unchecked(&vcc->stats->tx_err);
29368 return -ENOBUFS;
29369 }
29370 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29371 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29372 if (vcc->pop) vcc->pop(vcc,skb);
29373 else dev_kfree_skb(skb);
29374 out_vcc->push(out_vcc,new_skb);
29375 - atomic_inc(&vcc->stats->tx);
29376 - atomic_inc(&out_vcc->stats->rx);
29377 + atomic_inc_unchecked(&vcc->stats->tx);
29378 + atomic_inc_unchecked(&out_vcc->stats->rx);
29379 return 0;
29380 }
29381
29382 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29383 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29384 read_unlock(&vcc_sklist_lock);
29385 if (!out_vcc) {
29386 - atomic_inc(&vcc->stats->tx_err);
29387 + atomic_inc_unchecked(&vcc->stats->tx_err);
29388 goto done;
29389 }
29390 skb_pull(skb,sizeof(struct atmtcp_hdr));
29391 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29392 __net_timestamp(new_skb);
29393 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29394 out_vcc->push(out_vcc,new_skb);
29395 - atomic_inc(&vcc->stats->tx);
29396 - atomic_inc(&out_vcc->stats->rx);
29397 + atomic_inc_unchecked(&vcc->stats->tx);
29398 + atomic_inc_unchecked(&out_vcc->stats->rx);
29399 done:
29400 if (vcc->pop) vcc->pop(vcc,skb);
29401 else dev_kfree_skb(skb);
29402 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29403 index 81e44f7..498ea36 100644
29404 --- a/drivers/atm/eni.c
29405 +++ b/drivers/atm/eni.c
29406 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29407 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29408 vcc->dev->number);
29409 length = 0;
29410 - atomic_inc(&vcc->stats->rx_err);
29411 + atomic_inc_unchecked(&vcc->stats->rx_err);
29412 }
29413 else {
29414 length = ATM_CELL_SIZE-1; /* no HEC */
29415 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29416 size);
29417 }
29418 eff = length = 0;
29419 - atomic_inc(&vcc->stats->rx_err);
29420 + atomic_inc_unchecked(&vcc->stats->rx_err);
29421 }
29422 else {
29423 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29424 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29425 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29426 vcc->dev->number,vcc->vci,length,size << 2,descr);
29427 length = eff = 0;
29428 - atomic_inc(&vcc->stats->rx_err);
29429 + atomic_inc_unchecked(&vcc->stats->rx_err);
29430 }
29431 }
29432 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29433 @@ -767,7 +767,7 @@ rx_dequeued++;
29434 vcc->push(vcc,skb);
29435 pushed++;
29436 }
29437 - atomic_inc(&vcc->stats->rx);
29438 + atomic_inc_unchecked(&vcc->stats->rx);
29439 }
29440 wake_up(&eni_dev->rx_wait);
29441 }
29442 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29443 PCI_DMA_TODEVICE);
29444 if (vcc->pop) vcc->pop(vcc,skb);
29445 else dev_kfree_skb_irq(skb);
29446 - atomic_inc(&vcc->stats->tx);
29447 + atomic_inc_unchecked(&vcc->stats->tx);
29448 wake_up(&eni_dev->tx_wait);
29449 dma_complete++;
29450 }
29451 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29452 index 86fed1b..6dc4721 100644
29453 --- a/drivers/atm/firestream.c
29454 +++ b/drivers/atm/firestream.c
29455 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29456 }
29457 }
29458
29459 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29460 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29461
29462 fs_dprintk (FS_DEBUG_TXMEM, "i");
29463 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29464 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29465 #endif
29466 skb_put (skb, qe->p1 & 0xffff);
29467 ATM_SKB(skb)->vcc = atm_vcc;
29468 - atomic_inc(&atm_vcc->stats->rx);
29469 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29470 __net_timestamp(skb);
29471 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29472 atm_vcc->push (atm_vcc, skb);
29473 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29474 kfree (pe);
29475 }
29476 if (atm_vcc)
29477 - atomic_inc(&atm_vcc->stats->rx_drop);
29478 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29479 break;
29480 case 0x1f: /* Reassembly abort: no buffers. */
29481 /* Silently increment error counter. */
29482 if (atm_vcc)
29483 - atomic_inc(&atm_vcc->stats->rx_drop);
29484 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29485 break;
29486 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29487 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29488 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29489 index 361f5ae..7fc552d 100644
29490 --- a/drivers/atm/fore200e.c
29491 +++ b/drivers/atm/fore200e.c
29492 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29493 #endif
29494 /* check error condition */
29495 if (*entry->status & STATUS_ERROR)
29496 - atomic_inc(&vcc->stats->tx_err);
29497 + atomic_inc_unchecked(&vcc->stats->tx_err);
29498 else
29499 - atomic_inc(&vcc->stats->tx);
29500 + atomic_inc_unchecked(&vcc->stats->tx);
29501 }
29502 }
29503
29504 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29505 if (skb == NULL) {
29506 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29507
29508 - atomic_inc(&vcc->stats->rx_drop);
29509 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29510 return -ENOMEM;
29511 }
29512
29513 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29514
29515 dev_kfree_skb_any(skb);
29516
29517 - atomic_inc(&vcc->stats->rx_drop);
29518 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29519 return -ENOMEM;
29520 }
29521
29522 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29523
29524 vcc->push(vcc, skb);
29525 - atomic_inc(&vcc->stats->rx);
29526 + atomic_inc_unchecked(&vcc->stats->rx);
29527
29528 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29529
29530 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29531 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29532 fore200e->atm_dev->number,
29533 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29534 - atomic_inc(&vcc->stats->rx_err);
29535 + atomic_inc_unchecked(&vcc->stats->rx_err);
29536 }
29537 }
29538
29539 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29540 goto retry_here;
29541 }
29542
29543 - atomic_inc(&vcc->stats->tx_err);
29544 + atomic_inc_unchecked(&vcc->stats->tx_err);
29545
29546 fore200e->tx_sat++;
29547 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29548 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29549 index b182c2f..1c6fa8a 100644
29550 --- a/drivers/atm/he.c
29551 +++ b/drivers/atm/he.c
29552 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29553
29554 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29555 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29556 - atomic_inc(&vcc->stats->rx_drop);
29557 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29558 goto return_host_buffers;
29559 }
29560
29561 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29562 RBRQ_LEN_ERR(he_dev->rbrq_head)
29563 ? "LEN_ERR" : "",
29564 vcc->vpi, vcc->vci);
29565 - atomic_inc(&vcc->stats->rx_err);
29566 + atomic_inc_unchecked(&vcc->stats->rx_err);
29567 goto return_host_buffers;
29568 }
29569
29570 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29571 vcc->push(vcc, skb);
29572 spin_lock(&he_dev->global_lock);
29573
29574 - atomic_inc(&vcc->stats->rx);
29575 + atomic_inc_unchecked(&vcc->stats->rx);
29576
29577 return_host_buffers:
29578 ++pdus_assembled;
29579 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29580 tpd->vcc->pop(tpd->vcc, tpd->skb);
29581 else
29582 dev_kfree_skb_any(tpd->skb);
29583 - atomic_inc(&tpd->vcc->stats->tx_err);
29584 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29585 }
29586 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29587 return;
29588 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29589 vcc->pop(vcc, skb);
29590 else
29591 dev_kfree_skb_any(skb);
29592 - atomic_inc(&vcc->stats->tx_err);
29593 + atomic_inc_unchecked(&vcc->stats->tx_err);
29594 return -EINVAL;
29595 }
29596
29597 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29598 vcc->pop(vcc, skb);
29599 else
29600 dev_kfree_skb_any(skb);
29601 - atomic_inc(&vcc->stats->tx_err);
29602 + atomic_inc_unchecked(&vcc->stats->tx_err);
29603 return -EINVAL;
29604 }
29605 #endif
29606 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29607 vcc->pop(vcc, skb);
29608 else
29609 dev_kfree_skb_any(skb);
29610 - atomic_inc(&vcc->stats->tx_err);
29611 + atomic_inc_unchecked(&vcc->stats->tx_err);
29612 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29613 return -ENOMEM;
29614 }
29615 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29616 vcc->pop(vcc, skb);
29617 else
29618 dev_kfree_skb_any(skb);
29619 - atomic_inc(&vcc->stats->tx_err);
29620 + atomic_inc_unchecked(&vcc->stats->tx_err);
29621 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29622 return -ENOMEM;
29623 }
29624 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29625 __enqueue_tpd(he_dev, tpd, cid);
29626 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29627
29628 - atomic_inc(&vcc->stats->tx);
29629 + atomic_inc_unchecked(&vcc->stats->tx);
29630
29631 return 0;
29632 }
29633 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29634 index 7d01c2a..4e3ac01 100644
29635 --- a/drivers/atm/horizon.c
29636 +++ b/drivers/atm/horizon.c
29637 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29638 {
29639 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29640 // VC layer stats
29641 - atomic_inc(&vcc->stats->rx);
29642 + atomic_inc_unchecked(&vcc->stats->rx);
29643 __net_timestamp(skb);
29644 // end of our responsibility
29645 vcc->push (vcc, skb);
29646 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29647 dev->tx_iovec = NULL;
29648
29649 // VC layer stats
29650 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29651 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29652
29653 // free the skb
29654 hrz_kfree_skb (skb);
29655 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29656 index 8974bd2..b856f85 100644
29657 --- a/drivers/atm/idt77252.c
29658 +++ b/drivers/atm/idt77252.c
29659 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29660 else
29661 dev_kfree_skb(skb);
29662
29663 - atomic_inc(&vcc->stats->tx);
29664 + atomic_inc_unchecked(&vcc->stats->tx);
29665 }
29666
29667 atomic_dec(&scq->used);
29668 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29669 if ((sb = dev_alloc_skb(64)) == NULL) {
29670 printk("%s: Can't allocate buffers for aal0.\n",
29671 card->name);
29672 - atomic_add(i, &vcc->stats->rx_drop);
29673 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29674 break;
29675 }
29676 if (!atm_charge(vcc, sb->truesize)) {
29677 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29678 card->name);
29679 - atomic_add(i - 1, &vcc->stats->rx_drop);
29680 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29681 dev_kfree_skb(sb);
29682 break;
29683 }
29684 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29685 ATM_SKB(sb)->vcc = vcc;
29686 __net_timestamp(sb);
29687 vcc->push(vcc, sb);
29688 - atomic_inc(&vcc->stats->rx);
29689 + atomic_inc_unchecked(&vcc->stats->rx);
29690
29691 cell += ATM_CELL_PAYLOAD;
29692 }
29693 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29694 "(CDC: %08x)\n",
29695 card->name, len, rpp->len, readl(SAR_REG_CDC));
29696 recycle_rx_pool_skb(card, rpp);
29697 - atomic_inc(&vcc->stats->rx_err);
29698 + atomic_inc_unchecked(&vcc->stats->rx_err);
29699 return;
29700 }
29701 if (stat & SAR_RSQE_CRC) {
29702 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29703 recycle_rx_pool_skb(card, rpp);
29704 - atomic_inc(&vcc->stats->rx_err);
29705 + atomic_inc_unchecked(&vcc->stats->rx_err);
29706 return;
29707 }
29708 if (skb_queue_len(&rpp->queue) > 1) {
29709 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29710 RXPRINTK("%s: Can't alloc RX skb.\n",
29711 card->name);
29712 recycle_rx_pool_skb(card, rpp);
29713 - atomic_inc(&vcc->stats->rx_err);
29714 + atomic_inc_unchecked(&vcc->stats->rx_err);
29715 return;
29716 }
29717 if (!atm_charge(vcc, skb->truesize)) {
29718 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29719 __net_timestamp(skb);
29720
29721 vcc->push(vcc, skb);
29722 - atomic_inc(&vcc->stats->rx);
29723 + atomic_inc_unchecked(&vcc->stats->rx);
29724
29725 return;
29726 }
29727 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29728 __net_timestamp(skb);
29729
29730 vcc->push(vcc, skb);
29731 - atomic_inc(&vcc->stats->rx);
29732 + atomic_inc_unchecked(&vcc->stats->rx);
29733
29734 if (skb->truesize > SAR_FB_SIZE_3)
29735 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29736 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29737 if (vcc->qos.aal != ATM_AAL0) {
29738 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29739 card->name, vpi, vci);
29740 - atomic_inc(&vcc->stats->rx_drop);
29741 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29742 goto drop;
29743 }
29744
29745 if ((sb = dev_alloc_skb(64)) == NULL) {
29746 printk("%s: Can't allocate buffers for AAL0.\n",
29747 card->name);
29748 - atomic_inc(&vcc->stats->rx_err);
29749 + atomic_inc_unchecked(&vcc->stats->rx_err);
29750 goto drop;
29751 }
29752
29753 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29754 ATM_SKB(sb)->vcc = vcc;
29755 __net_timestamp(sb);
29756 vcc->push(vcc, sb);
29757 - atomic_inc(&vcc->stats->rx);
29758 + atomic_inc_unchecked(&vcc->stats->rx);
29759
29760 drop:
29761 skb_pull(queue, 64);
29762 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29763
29764 if (vc == NULL) {
29765 printk("%s: NULL connection in send().\n", card->name);
29766 - atomic_inc(&vcc->stats->tx_err);
29767 + atomic_inc_unchecked(&vcc->stats->tx_err);
29768 dev_kfree_skb(skb);
29769 return -EINVAL;
29770 }
29771 if (!test_bit(VCF_TX, &vc->flags)) {
29772 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29773 - atomic_inc(&vcc->stats->tx_err);
29774 + atomic_inc_unchecked(&vcc->stats->tx_err);
29775 dev_kfree_skb(skb);
29776 return -EINVAL;
29777 }
29778 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29779 break;
29780 default:
29781 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29782 - atomic_inc(&vcc->stats->tx_err);
29783 + atomic_inc_unchecked(&vcc->stats->tx_err);
29784 dev_kfree_skb(skb);
29785 return -EINVAL;
29786 }
29787
29788 if (skb_shinfo(skb)->nr_frags != 0) {
29789 printk("%s: No scatter-gather yet.\n", card->name);
29790 - atomic_inc(&vcc->stats->tx_err);
29791 + atomic_inc_unchecked(&vcc->stats->tx_err);
29792 dev_kfree_skb(skb);
29793 return -EINVAL;
29794 }
29795 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29796
29797 err = queue_skb(card, vc, skb, oam);
29798 if (err) {
29799 - atomic_inc(&vcc->stats->tx_err);
29800 + atomic_inc_unchecked(&vcc->stats->tx_err);
29801 dev_kfree_skb(skb);
29802 return err;
29803 }
29804 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29805 skb = dev_alloc_skb(64);
29806 if (!skb) {
29807 printk("%s: Out of memory in send_oam().\n", card->name);
29808 - atomic_inc(&vcc->stats->tx_err);
29809 + atomic_inc_unchecked(&vcc->stats->tx_err);
29810 return -ENOMEM;
29811 }
29812 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29813 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29814 index 96cce6d..62c3ec5 100644
29815 --- a/drivers/atm/iphase.c
29816 +++ b/drivers/atm/iphase.c
29817 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
29818 status = (u_short) (buf_desc_ptr->desc_mode);
29819 if (status & (RX_CER | RX_PTE | RX_OFL))
29820 {
29821 - atomic_inc(&vcc->stats->rx_err);
29822 + atomic_inc_unchecked(&vcc->stats->rx_err);
29823 IF_ERR(printk("IA: bad packet, dropping it");)
29824 if (status & RX_CER) {
29825 IF_ERR(printk(" cause: packet CRC error\n");)
29826 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
29827 len = dma_addr - buf_addr;
29828 if (len > iadev->rx_buf_sz) {
29829 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29830 - atomic_inc(&vcc->stats->rx_err);
29831 + atomic_inc_unchecked(&vcc->stats->rx_err);
29832 goto out_free_desc;
29833 }
29834
29835 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29836 ia_vcc = INPH_IA_VCC(vcc);
29837 if (ia_vcc == NULL)
29838 {
29839 - atomic_inc(&vcc->stats->rx_err);
29840 + atomic_inc_unchecked(&vcc->stats->rx_err);
29841 atm_return(vcc, skb->truesize);
29842 dev_kfree_skb_any(skb);
29843 goto INCR_DLE;
29844 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29845 if ((length > iadev->rx_buf_sz) || (length >
29846 (skb->len - sizeof(struct cpcs_trailer))))
29847 {
29848 - atomic_inc(&vcc->stats->rx_err);
29849 + atomic_inc_unchecked(&vcc->stats->rx_err);
29850 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29851 length, skb->len);)
29852 atm_return(vcc, skb->truesize);
29853 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29854
29855 IF_RX(printk("rx_dle_intr: skb push");)
29856 vcc->push(vcc,skb);
29857 - atomic_inc(&vcc->stats->rx);
29858 + atomic_inc_unchecked(&vcc->stats->rx);
29859 iadev->rx_pkt_cnt++;
29860 }
29861 INCR_DLE:
29862 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29863 {
29864 struct k_sonet_stats *stats;
29865 stats = &PRIV(_ia_dev[board])->sonet_stats;
29866 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29867 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29868 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29869 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29870 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29871 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29872 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29873 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29874 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29875 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29876 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29877 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29878 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29879 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29880 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29881 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29882 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29883 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29884 }
29885 ia_cmds.status = 0;
29886 break;
29887 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29888 if ((desc == 0) || (desc > iadev->num_tx_desc))
29889 {
29890 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29891 - atomic_inc(&vcc->stats->tx);
29892 + atomic_inc_unchecked(&vcc->stats->tx);
29893 if (vcc->pop)
29894 vcc->pop(vcc, skb);
29895 else
29896 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29897 ATM_DESC(skb) = vcc->vci;
29898 skb_queue_tail(&iadev->tx_dma_q, skb);
29899
29900 - atomic_inc(&vcc->stats->tx);
29901 + atomic_inc_unchecked(&vcc->stats->tx);
29902 iadev->tx_pkt_cnt++;
29903 /* Increment transaction counter */
29904 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29905
29906 #if 0
29907 /* add flow control logic */
29908 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29909 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29910 if (iavcc->vc_desc_cnt > 10) {
29911 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29912 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29913 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29914 index 68c7588..7036683 100644
29915 --- a/drivers/atm/lanai.c
29916 +++ b/drivers/atm/lanai.c
29917 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29918 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29919 lanai_endtx(lanai, lvcc);
29920 lanai_free_skb(lvcc->tx.atmvcc, skb);
29921 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29922 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29923 }
29924
29925 /* Try to fill the buffer - don't call unless there is backlog */
29926 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29927 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29928 __net_timestamp(skb);
29929 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29930 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29931 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29932 out:
29933 lvcc->rx.buf.ptr = end;
29934 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29935 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29936 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29937 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29938 lanai->stats.service_rxnotaal5++;
29939 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29940 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29941 return 0;
29942 }
29943 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29944 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29945 int bytes;
29946 read_unlock(&vcc_sklist_lock);
29947 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29948 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29949 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29950 lvcc->stats.x.aal5.service_trash++;
29951 bytes = (SERVICE_GET_END(s) * 16) -
29952 (((unsigned long) lvcc->rx.buf.ptr) -
29953 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29954 }
29955 if (s & SERVICE_STREAM) {
29956 read_unlock(&vcc_sklist_lock);
29957 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29958 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29959 lvcc->stats.x.aal5.service_stream++;
29960 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29961 "PDU on VCI %d!\n", lanai->number, vci);
29962 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29963 return 0;
29964 }
29965 DPRINTK("got rx crc error on vci %d\n", vci);
29966 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29967 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29968 lvcc->stats.x.aal5.service_rxcrc++;
29969 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29970 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29971 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29972 index 1c70c45..300718d 100644
29973 --- a/drivers/atm/nicstar.c
29974 +++ b/drivers/atm/nicstar.c
29975 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29976 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
29977 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
29978 card->index);
29979 - atomic_inc(&vcc->stats->tx_err);
29980 + atomic_inc_unchecked(&vcc->stats->tx_err);
29981 dev_kfree_skb_any(skb);
29982 return -EINVAL;
29983 }
29984 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29985 if (!vc->tx) {
29986 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
29987 card->index);
29988 - atomic_inc(&vcc->stats->tx_err);
29989 + atomic_inc_unchecked(&vcc->stats->tx_err);
29990 dev_kfree_skb_any(skb);
29991 return -EINVAL;
29992 }
29993 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29994 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
29995 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
29996 card->index);
29997 - atomic_inc(&vcc->stats->tx_err);
29998 + atomic_inc_unchecked(&vcc->stats->tx_err);
29999 dev_kfree_skb_any(skb);
30000 return -EINVAL;
30001 }
30002
30003 if (skb_shinfo(skb)->nr_frags != 0) {
30004 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30005 - atomic_inc(&vcc->stats->tx_err);
30006 + atomic_inc_unchecked(&vcc->stats->tx_err);
30007 dev_kfree_skb_any(skb);
30008 return -EINVAL;
30009 }
30010 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30011 }
30012
30013 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
30014 - atomic_inc(&vcc->stats->tx_err);
30015 + atomic_inc_unchecked(&vcc->stats->tx_err);
30016 dev_kfree_skb_any(skb);
30017 return -EIO;
30018 }
30019 - atomic_inc(&vcc->stats->tx);
30020 + atomic_inc_unchecked(&vcc->stats->tx);
30021
30022 return 0;
30023 }
30024 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30025 printk
30026 ("nicstar%d: Can't allocate buffers for aal0.\n",
30027 card->index);
30028 - atomic_add(i, &vcc->stats->rx_drop);
30029 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
30030 break;
30031 }
30032 if (!atm_charge(vcc, sb->truesize)) {
30033 RXPRINTK
30034 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
30035 card->index);
30036 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30037 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30038 dev_kfree_skb_any(sb);
30039 break;
30040 }
30041 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30042 ATM_SKB(sb)->vcc = vcc;
30043 __net_timestamp(sb);
30044 vcc->push(vcc, sb);
30045 - atomic_inc(&vcc->stats->rx);
30046 + atomic_inc_unchecked(&vcc->stats->rx);
30047 cell += ATM_CELL_PAYLOAD;
30048 }
30049
30050 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30051 if (iovb == NULL) {
30052 printk("nicstar%d: Out of iovec buffers.\n",
30053 card->index);
30054 - atomic_inc(&vcc->stats->rx_drop);
30055 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30056 recycle_rx_buf(card, skb);
30057 return;
30058 }
30059 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30060 small or large buffer itself. */
30061 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
30062 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30063 - atomic_inc(&vcc->stats->rx_err);
30064 + atomic_inc_unchecked(&vcc->stats->rx_err);
30065 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30066 NS_MAX_IOVECS);
30067 NS_PRV_IOVCNT(iovb) = 0;
30068 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30069 ("nicstar%d: Expected a small buffer, and this is not one.\n",
30070 card->index);
30071 which_list(card, skb);
30072 - atomic_inc(&vcc->stats->rx_err);
30073 + atomic_inc_unchecked(&vcc->stats->rx_err);
30074 recycle_rx_buf(card, skb);
30075 vc->rx_iov = NULL;
30076 recycle_iov_buf(card, iovb);
30077 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30078 ("nicstar%d: Expected a large buffer, and this is not one.\n",
30079 card->index);
30080 which_list(card, skb);
30081 - atomic_inc(&vcc->stats->rx_err);
30082 + atomic_inc_unchecked(&vcc->stats->rx_err);
30083 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30084 NS_PRV_IOVCNT(iovb));
30085 vc->rx_iov = NULL;
30086 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30087 printk(" - PDU size mismatch.\n");
30088 else
30089 printk(".\n");
30090 - atomic_inc(&vcc->stats->rx_err);
30091 + atomic_inc_unchecked(&vcc->stats->rx_err);
30092 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30093 NS_PRV_IOVCNT(iovb));
30094 vc->rx_iov = NULL;
30095 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30096 /* skb points to a small buffer */
30097 if (!atm_charge(vcc, skb->truesize)) {
30098 push_rxbufs(card, skb);
30099 - atomic_inc(&vcc->stats->rx_drop);
30100 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30101 } else {
30102 skb_put(skb, len);
30103 dequeue_sm_buf(card, skb);
30104 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30105 ATM_SKB(skb)->vcc = vcc;
30106 __net_timestamp(skb);
30107 vcc->push(vcc, skb);
30108 - atomic_inc(&vcc->stats->rx);
30109 + atomic_inc_unchecked(&vcc->stats->rx);
30110 }
30111 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
30112 struct sk_buff *sb;
30113 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30114 if (len <= NS_SMBUFSIZE) {
30115 if (!atm_charge(vcc, sb->truesize)) {
30116 push_rxbufs(card, sb);
30117 - atomic_inc(&vcc->stats->rx_drop);
30118 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30119 } else {
30120 skb_put(sb, len);
30121 dequeue_sm_buf(card, sb);
30122 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30123 ATM_SKB(sb)->vcc = vcc;
30124 __net_timestamp(sb);
30125 vcc->push(vcc, sb);
30126 - atomic_inc(&vcc->stats->rx);
30127 + atomic_inc_unchecked(&vcc->stats->rx);
30128 }
30129
30130 push_rxbufs(card, skb);
30131 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30132
30133 if (!atm_charge(vcc, skb->truesize)) {
30134 push_rxbufs(card, skb);
30135 - atomic_inc(&vcc->stats->rx_drop);
30136 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30137 } else {
30138 dequeue_lg_buf(card, skb);
30139 #ifdef NS_USE_DESTRUCTORS
30140 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30141 ATM_SKB(skb)->vcc = vcc;
30142 __net_timestamp(skb);
30143 vcc->push(vcc, skb);
30144 - atomic_inc(&vcc->stats->rx);
30145 + atomic_inc_unchecked(&vcc->stats->rx);
30146 }
30147
30148 push_rxbufs(card, sb);
30149 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30150 printk
30151 ("nicstar%d: Out of huge buffers.\n",
30152 card->index);
30153 - atomic_inc(&vcc->stats->rx_drop);
30154 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30155 recycle_iovec_rx_bufs(card,
30156 (struct iovec *)
30157 iovb->data,
30158 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30159 card->hbpool.count++;
30160 } else
30161 dev_kfree_skb_any(hb);
30162 - atomic_inc(&vcc->stats->rx_drop);
30163 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30164 } else {
30165 /* Copy the small buffer to the huge buffer */
30166 sb = (struct sk_buff *)iov->iov_base;
30167 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30168 #endif /* NS_USE_DESTRUCTORS */
30169 __net_timestamp(hb);
30170 vcc->push(vcc, hb);
30171 - atomic_inc(&vcc->stats->rx);
30172 + atomic_inc_unchecked(&vcc->stats->rx);
30173 }
30174 }
30175
30176 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30177 index 9851093..adb2b1e 100644
30178 --- a/drivers/atm/solos-pci.c
30179 +++ b/drivers/atm/solos-pci.c
30180 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
30181 }
30182 atm_charge(vcc, skb->truesize);
30183 vcc->push(vcc, skb);
30184 - atomic_inc(&vcc->stats->rx);
30185 + atomic_inc_unchecked(&vcc->stats->rx);
30186 break;
30187
30188 case PKT_STATUS:
30189 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30190 vcc = SKB_CB(oldskb)->vcc;
30191
30192 if (vcc) {
30193 - atomic_inc(&vcc->stats->tx);
30194 + atomic_inc_unchecked(&vcc->stats->tx);
30195 solos_pop(vcc, oldskb);
30196 } else
30197 dev_kfree_skb_irq(oldskb);
30198 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30199 index 0215934..ce9f5b1 100644
30200 --- a/drivers/atm/suni.c
30201 +++ b/drivers/atm/suni.c
30202 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30203
30204
30205 #define ADD_LIMITED(s,v) \
30206 - atomic_add((v),&stats->s); \
30207 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30208 + atomic_add_unchecked((v),&stats->s); \
30209 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30210
30211
30212 static void suni_hz(unsigned long from_timer)
30213 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30214 index 5120a96..e2572bd 100644
30215 --- a/drivers/atm/uPD98402.c
30216 +++ b/drivers/atm/uPD98402.c
30217 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30218 struct sonet_stats tmp;
30219 int error = 0;
30220
30221 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30222 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30223 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30224 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30225 if (zero && !error) {
30226 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30227
30228
30229 #define ADD_LIMITED(s,v) \
30230 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30231 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30232 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30233 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30234 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30235 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30236
30237
30238 static void stat_event(struct atm_dev *dev)
30239 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
30240 if (reason & uPD98402_INT_PFM) stat_event(dev);
30241 if (reason & uPD98402_INT_PCO) {
30242 (void) GET(PCOCR); /* clear interrupt cause */
30243 - atomic_add(GET(HECCT),
30244 + atomic_add_unchecked(GET(HECCT),
30245 &PRIV(dev)->sonet_stats.uncorr_hcs);
30246 }
30247 if ((reason & uPD98402_INT_RFO) &&
30248 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
30249 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30250 uPD98402_INT_LOS),PIMR); /* enable them */
30251 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30252 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30253 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30254 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30255 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30256 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30257 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30258 return 0;
30259 }
30260
30261 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30262 index abe4e20..83c4727 100644
30263 --- a/drivers/atm/zatm.c
30264 +++ b/drivers/atm/zatm.c
30265 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30266 }
30267 if (!size) {
30268 dev_kfree_skb_irq(skb);
30269 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30270 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30271 continue;
30272 }
30273 if (!atm_charge(vcc,skb->truesize)) {
30274 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30275 skb->len = size;
30276 ATM_SKB(skb)->vcc = vcc;
30277 vcc->push(vcc,skb);
30278 - atomic_inc(&vcc->stats->rx);
30279 + atomic_inc_unchecked(&vcc->stats->rx);
30280 }
30281 zout(pos & 0xffff,MTA(mbx));
30282 #if 0 /* probably a stupid idea */
30283 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30284 skb_queue_head(&zatm_vcc->backlog,skb);
30285 break;
30286 }
30287 - atomic_inc(&vcc->stats->tx);
30288 + atomic_inc_unchecked(&vcc->stats->tx);
30289 wake_up(&zatm_vcc->tx_wait);
30290 }
30291
30292 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
30293 index 147d1a4..d0fd4b0 100644
30294 --- a/drivers/base/devtmpfs.c
30295 +++ b/drivers/base/devtmpfs.c
30296 @@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
30297 if (!thread)
30298 return 0;
30299
30300 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
30301 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
30302 if (err)
30303 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
30304 else
30305 diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
30306 index 8945f4e..4b47cf4 100644
30307 --- a/drivers/base/firmware_class.c
30308 +++ b/drivers/base/firmware_class.c
30309 @@ -118,7 +118,7 @@ struct firmware_cache {
30310
30311 struct delayed_work work;
30312
30313 - struct notifier_block pm_notify;
30314 + notifier_block_no_const pm_notify;
30315 #endif
30316 };
30317
30318 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
30319 index e6ee5e8..98ad7fc 100644
30320 --- a/drivers/base/power/wakeup.c
30321 +++ b/drivers/base/power/wakeup.c
30322 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
30323 * They need to be modified together atomically, so it's better to use one
30324 * atomic variable to hold them both.
30325 */
30326 -static atomic_t combined_event_count = ATOMIC_INIT(0);
30327 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
30328
30329 #define IN_PROGRESS_BITS (sizeof(int) * 4)
30330 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
30331
30332 static void split_counters(unsigned int *cnt, unsigned int *inpr)
30333 {
30334 - unsigned int comb = atomic_read(&combined_event_count);
30335 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
30336
30337 *cnt = (comb >> IN_PROGRESS_BITS);
30338 *inpr = comb & MAX_IN_PROGRESS;
30339 @@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
30340 ws->start_prevent_time = ws->last_time;
30341
30342 /* Increment the counter of events in progress. */
30343 - cec = atomic_inc_return(&combined_event_count);
30344 + cec = atomic_inc_return_unchecked(&combined_event_count);
30345
30346 trace_wakeup_source_activate(ws->name, cec);
30347 }
30348 @@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
30349 * Increment the counter of registered wakeup events and decrement the
30350 * couter of wakeup events in progress simultaneously.
30351 */
30352 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
30353 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
30354 trace_wakeup_source_deactivate(ws->name, cec);
30355
30356 split_counters(&cnt, &inpr);
30357 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30358 index ca83f96..69d4ea9 100644
30359 --- a/drivers/block/cciss.c
30360 +++ b/drivers/block/cciss.c
30361 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30362 int err;
30363 u32 cp;
30364
30365 + memset(&arg64, 0, sizeof(arg64));
30366 +
30367 err = 0;
30368 err |=
30369 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30370 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
30371 while (!list_empty(&h->reqQ)) {
30372 c = list_entry(h->reqQ.next, CommandList_struct, list);
30373 /* can't do anything if fifo is full */
30374 - if ((h->access.fifo_full(h))) {
30375 + if ((h->access->fifo_full(h))) {
30376 dev_warn(&h->pdev->dev, "fifo full\n");
30377 break;
30378 }
30379 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
30380 h->Qdepth--;
30381
30382 /* Tell the controller execute command */
30383 - h->access.submit_command(h, c);
30384 + h->access->submit_command(h, c);
30385
30386 /* Put job onto the completed Q */
30387 addQ(&h->cmpQ, c);
30388 @@ -3443,17 +3445,17 @@ startio:
30389
30390 static inline unsigned long get_next_completion(ctlr_info_t *h)
30391 {
30392 - return h->access.command_completed(h);
30393 + return h->access->command_completed(h);
30394 }
30395
30396 static inline int interrupt_pending(ctlr_info_t *h)
30397 {
30398 - return h->access.intr_pending(h);
30399 + return h->access->intr_pending(h);
30400 }
30401
30402 static inline long interrupt_not_for_us(ctlr_info_t *h)
30403 {
30404 - return ((h->access.intr_pending(h) == 0) ||
30405 + return ((h->access->intr_pending(h) == 0) ||
30406 (h->interrupts_enabled == 0));
30407 }
30408
30409 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
30410 u32 a;
30411
30412 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30413 - return h->access.command_completed(h);
30414 + return h->access->command_completed(h);
30415
30416 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30417 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30418 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
30419 trans_support & CFGTBL_Trans_use_short_tags);
30420
30421 /* Change the access methods to the performant access methods */
30422 - h->access = SA5_performant_access;
30423 + h->access = &SA5_performant_access;
30424 h->transMethod = CFGTBL_Trans_Performant;
30425
30426 return;
30427 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
30428 if (prod_index < 0)
30429 return -ENODEV;
30430 h->product_name = products[prod_index].product_name;
30431 - h->access = *(products[prod_index].access);
30432 + h->access = products[prod_index].access;
30433
30434 if (cciss_board_disabled(h)) {
30435 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
30436 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
30437 }
30438
30439 /* make sure the board interrupts are off */
30440 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30441 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30442 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
30443 if (rc)
30444 goto clean2;
30445 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
30446 * fake ones to scoop up any residual completions.
30447 */
30448 spin_lock_irqsave(&h->lock, flags);
30449 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30450 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30451 spin_unlock_irqrestore(&h->lock, flags);
30452 free_irq(h->intr[h->intr_mode], h);
30453 rc = cciss_request_irq(h, cciss_msix_discard_completions,
30454 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
30455 dev_info(&h->pdev->dev, "Board READY.\n");
30456 dev_info(&h->pdev->dev,
30457 "Waiting for stale completions to drain.\n");
30458 - h->access.set_intr_mask(h, CCISS_INTR_ON);
30459 + h->access->set_intr_mask(h, CCISS_INTR_ON);
30460 msleep(10000);
30461 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30462 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30463
30464 rc = controller_reset_failed(h->cfgtable);
30465 if (rc)
30466 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
30467 cciss_scsi_setup(h);
30468
30469 /* Turn the interrupts on so we can service requests */
30470 - h->access.set_intr_mask(h, CCISS_INTR_ON);
30471 + h->access->set_intr_mask(h, CCISS_INTR_ON);
30472
30473 /* Get the firmware version */
30474 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30475 @@ -5210,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
30476 kfree(flush_buf);
30477 if (return_code != IO_OK)
30478 dev_warn(&h->pdev->dev, "Error flushing cache\n");
30479 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30480 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30481 free_irq(h->intr[h->intr_mode], h);
30482 }
30483
30484 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30485 index 7fda30e..eb5dfe0 100644
30486 --- a/drivers/block/cciss.h
30487 +++ b/drivers/block/cciss.h
30488 @@ -101,7 +101,7 @@ struct ctlr_info
30489 /* information about each logical volume */
30490 drive_info_struct *drv[CISS_MAX_LUN];
30491
30492 - struct access_method access;
30493 + struct access_method *access;
30494
30495 /* queue and queue Info */
30496 struct list_head reqQ;
30497 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30498 index 9125bbe..eede5c8 100644
30499 --- a/drivers/block/cpqarray.c
30500 +++ b/drivers/block/cpqarray.c
30501 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30502 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30503 goto Enomem4;
30504 }
30505 - hba[i]->access.set_intr_mask(hba[i], 0);
30506 + hba[i]->access->set_intr_mask(hba[i], 0);
30507 if (request_irq(hba[i]->intr, do_ida_intr,
30508 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30509 {
30510 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30511 add_timer(&hba[i]->timer);
30512
30513 /* Enable IRQ now that spinlock and rate limit timer are set up */
30514 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30515 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30516
30517 for(j=0; j<NWD; j++) {
30518 struct gendisk *disk = ida_gendisk[i][j];
30519 @@ -694,7 +694,7 @@ DBGINFO(
30520 for(i=0; i<NR_PRODUCTS; i++) {
30521 if (board_id == products[i].board_id) {
30522 c->product_name = products[i].product_name;
30523 - c->access = *(products[i].access);
30524 + c->access = products[i].access;
30525 break;
30526 }
30527 }
30528 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
30529 hba[ctlr]->intr = intr;
30530 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30531 hba[ctlr]->product_name = products[j].product_name;
30532 - hba[ctlr]->access = *(products[j].access);
30533 + hba[ctlr]->access = products[j].access;
30534 hba[ctlr]->ctlr = ctlr;
30535 hba[ctlr]->board_id = board_id;
30536 hba[ctlr]->pci_dev = NULL; /* not PCI */
30537 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
30538
30539 while((c = h->reqQ) != NULL) {
30540 /* Can't do anything if we're busy */
30541 - if (h->access.fifo_full(h) == 0)
30542 + if (h->access->fifo_full(h) == 0)
30543 return;
30544
30545 /* Get the first entry from the request Q */
30546 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
30547 h->Qdepth--;
30548
30549 /* Tell the controller to do our bidding */
30550 - h->access.submit_command(h, c);
30551 + h->access->submit_command(h, c);
30552
30553 /* Get onto the completion Q */
30554 addQ(&h->cmpQ, c);
30555 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30556 unsigned long flags;
30557 __u32 a,a1;
30558
30559 - istat = h->access.intr_pending(h);
30560 + istat = h->access->intr_pending(h);
30561 /* Is this interrupt for us? */
30562 if (istat == 0)
30563 return IRQ_NONE;
30564 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30565 */
30566 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30567 if (istat & FIFO_NOT_EMPTY) {
30568 - while((a = h->access.command_completed(h))) {
30569 + while((a = h->access->command_completed(h))) {
30570 a1 = a; a &= ~3;
30571 if ((c = h->cmpQ) == NULL)
30572 {
30573 @@ -1449,11 +1449,11 @@ static int sendcmd(
30574 /*
30575 * Disable interrupt
30576 */
30577 - info_p->access.set_intr_mask(info_p, 0);
30578 + info_p->access->set_intr_mask(info_p, 0);
30579 /* Make sure there is room in the command FIFO */
30580 /* Actually it should be completely empty at this time. */
30581 for (i = 200000; i > 0; i--) {
30582 - temp = info_p->access.fifo_full(info_p);
30583 + temp = info_p->access->fifo_full(info_p);
30584 if (temp != 0) {
30585 break;
30586 }
30587 @@ -1466,7 +1466,7 @@ DBG(
30588 /*
30589 * Send the cmd
30590 */
30591 - info_p->access.submit_command(info_p, c);
30592 + info_p->access->submit_command(info_p, c);
30593 complete = pollcomplete(ctlr);
30594
30595 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30596 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30597 * we check the new geometry. Then turn interrupts back on when
30598 * we're done.
30599 */
30600 - host->access.set_intr_mask(host, 0);
30601 + host->access->set_intr_mask(host, 0);
30602 getgeometry(ctlr);
30603 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30604 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30605
30606 for(i=0; i<NWD; i++) {
30607 struct gendisk *disk = ida_gendisk[ctlr][i];
30608 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
30609 /* Wait (up to 2 seconds) for a command to complete */
30610
30611 for (i = 200000; i > 0; i--) {
30612 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30613 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30614 if (done == 0) {
30615 udelay(10); /* a short fixed delay */
30616 } else
30617 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30618 index be73e9d..7fbf140 100644
30619 --- a/drivers/block/cpqarray.h
30620 +++ b/drivers/block/cpqarray.h
30621 @@ -99,7 +99,7 @@ struct ctlr_info {
30622 drv_info_t drv[NWD];
30623 struct proc_dir_entry *proc;
30624
30625 - struct access_method access;
30626 + struct access_method *access;
30627
30628 cmdlist_t *reqQ;
30629 cmdlist_t *cmpQ;
30630 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
30631 index b953cc7..e3dc580 100644
30632 --- a/drivers/block/drbd/drbd_int.h
30633 +++ b/drivers/block/drbd/drbd_int.h
30634 @@ -735,7 +735,7 @@ struct drbd_request;
30635 struct drbd_epoch {
30636 struct list_head list;
30637 unsigned int barrier_nr;
30638 - atomic_t epoch_size; /* increased on every request added. */
30639 + atomic_unchecked_t epoch_size; /* increased on every request added. */
30640 atomic_t active; /* increased on every req. added, and dec on every finished. */
30641 unsigned long flags;
30642 };
30643 @@ -1116,7 +1116,7 @@ struct drbd_conf {
30644 void *int_dig_in;
30645 void *int_dig_vv;
30646 wait_queue_head_t seq_wait;
30647 - atomic_t packet_seq;
30648 + atomic_unchecked_t packet_seq;
30649 unsigned int peer_seq;
30650 spinlock_t peer_seq_lock;
30651 unsigned int minor;
30652 @@ -1658,30 +1658,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
30653
30654 static inline void drbd_tcp_cork(struct socket *sock)
30655 {
30656 - int __user val = 1;
30657 + int val = 1;
30658 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
30659 - (char __user *)&val, sizeof(val));
30660 + (char __force_user *)&val, sizeof(val));
30661 }
30662
30663 static inline void drbd_tcp_uncork(struct socket *sock)
30664 {
30665 - int __user val = 0;
30666 + int val = 0;
30667 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
30668 - (char __user *)&val, sizeof(val));
30669 + (char __force_user *)&val, sizeof(val));
30670 }
30671
30672 static inline void drbd_tcp_nodelay(struct socket *sock)
30673 {
30674 - int __user val = 1;
30675 + int val = 1;
30676 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
30677 - (char __user *)&val, sizeof(val));
30678 + (char __force_user *)&val, sizeof(val));
30679 }
30680
30681 static inline void drbd_tcp_quickack(struct socket *sock)
30682 {
30683 - int __user val = 2;
30684 + int val = 2;
30685 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
30686 - (char __user *)&val, sizeof(val));
30687 + (char __force_user *)&val, sizeof(val));
30688 }
30689
30690 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
30691 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
30692 index f55683a..2101b96 100644
30693 --- a/drivers/block/drbd/drbd_main.c
30694 +++ b/drivers/block/drbd/drbd_main.c
30695 @@ -2556,7 +2556,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
30696 p.sector = sector;
30697 p.block_id = block_id;
30698 p.blksize = blksize;
30699 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
30700 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
30701
30702 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
30703 return false;
30704 @@ -2854,7 +2854,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
30705
30706 p.sector = cpu_to_be64(req->sector);
30707 p.block_id = (unsigned long)req;
30708 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
30709 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
30710
30711 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
30712
30713 @@ -3139,7 +3139,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
30714 atomic_set(&mdev->unacked_cnt, 0);
30715 atomic_set(&mdev->local_cnt, 0);
30716 atomic_set(&mdev->net_cnt, 0);
30717 - atomic_set(&mdev->packet_seq, 0);
30718 + atomic_set_unchecked(&mdev->packet_seq, 0);
30719 atomic_set(&mdev->pp_in_use, 0);
30720 atomic_set(&mdev->pp_in_use_by_net, 0);
30721 atomic_set(&mdev->rs_sect_in, 0);
30722 @@ -3221,8 +3221,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
30723 mdev->receiver.t_state);
30724
30725 /* no need to lock it, I'm the only thread alive */
30726 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
30727 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
30728 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
30729 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
30730 mdev->al_writ_cnt =
30731 mdev->bm_writ_cnt =
30732 mdev->read_cnt =
30733 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
30734 index edb490a..ecd69da 100644
30735 --- a/drivers/block/drbd/drbd_nl.c
30736 +++ b/drivers/block/drbd/drbd_nl.c
30737 @@ -2407,7 +2407,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
30738 module_put(THIS_MODULE);
30739 }
30740
30741 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
30742 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
30743
30744 static unsigned short *
30745 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
30746 @@ -2478,7 +2478,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
30747 cn_reply->id.idx = CN_IDX_DRBD;
30748 cn_reply->id.val = CN_VAL_DRBD;
30749
30750 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
30751 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
30752 cn_reply->ack = 0; /* not used here. */
30753 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
30754 (int)((char *)tl - (char *)reply->tag_list);
30755 @@ -2510,7 +2510,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
30756 cn_reply->id.idx = CN_IDX_DRBD;
30757 cn_reply->id.val = CN_VAL_DRBD;
30758
30759 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
30760 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
30761 cn_reply->ack = 0; /* not used here. */
30762 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
30763 (int)((char *)tl - (char *)reply->tag_list);
30764 @@ -2588,7 +2588,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
30765 cn_reply->id.idx = CN_IDX_DRBD;
30766 cn_reply->id.val = CN_VAL_DRBD;
30767
30768 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
30769 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
30770 cn_reply->ack = 0; // not used here.
30771 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
30772 (int)((char*)tl - (char*)reply->tag_list);
30773 @@ -2627,7 +2627,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
30774 cn_reply->id.idx = CN_IDX_DRBD;
30775 cn_reply->id.val = CN_VAL_DRBD;
30776
30777 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
30778 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
30779 cn_reply->ack = 0; /* not used here. */
30780 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
30781 (int)((char *)tl - (char *)reply->tag_list);
30782 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
30783 index c74ca2d..860c819 100644
30784 --- a/drivers/block/drbd/drbd_receiver.c
30785 +++ b/drivers/block/drbd/drbd_receiver.c
30786 @@ -898,7 +898,7 @@ retry:
30787 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
30788 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
30789
30790 - atomic_set(&mdev->packet_seq, 0);
30791 + atomic_set_unchecked(&mdev->packet_seq, 0);
30792 mdev->peer_seq = 0;
30793
30794 if (drbd_send_protocol(mdev) == -1)
30795 @@ -999,7 +999,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
30796 do {
30797 next_epoch = NULL;
30798
30799 - epoch_size = atomic_read(&epoch->epoch_size);
30800 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
30801
30802 switch (ev & ~EV_CLEANUP) {
30803 case EV_PUT:
30804 @@ -1035,7 +1035,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
30805 rv = FE_DESTROYED;
30806 } else {
30807 epoch->flags = 0;
30808 - atomic_set(&epoch->epoch_size, 0);
30809 + atomic_set_unchecked(&epoch->epoch_size, 0);
30810 /* atomic_set(&epoch->active, 0); is already zero */
30811 if (rv == FE_STILL_LIVE)
30812 rv = FE_RECYCLED;
30813 @@ -1210,14 +1210,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
30814 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
30815 drbd_flush(mdev);
30816
30817 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
30818 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
30819 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
30820 if (epoch)
30821 break;
30822 }
30823
30824 epoch = mdev->current_epoch;
30825 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
30826 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
30827
30828 D_ASSERT(atomic_read(&epoch->active) == 0);
30829 D_ASSERT(epoch->flags == 0);
30830 @@ -1229,11 +1229,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
30831 }
30832
30833 epoch->flags = 0;
30834 - atomic_set(&epoch->epoch_size, 0);
30835 + atomic_set_unchecked(&epoch->epoch_size, 0);
30836 atomic_set(&epoch->active, 0);
30837
30838 spin_lock(&mdev->epoch_lock);
30839 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
30840 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
30841 list_add(&epoch->list, &mdev->current_epoch->list);
30842 mdev->current_epoch = epoch;
30843 mdev->epochs++;
30844 @@ -1702,7 +1702,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
30845 spin_unlock(&mdev->peer_seq_lock);
30846
30847 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
30848 - atomic_inc(&mdev->current_epoch->epoch_size);
30849 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
30850 return drbd_drain_block(mdev, data_size);
30851 }
30852
30853 @@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
30854
30855 spin_lock(&mdev->epoch_lock);
30856 e->epoch = mdev->current_epoch;
30857 - atomic_inc(&e->epoch->epoch_size);
30858 + atomic_inc_unchecked(&e->epoch->epoch_size);
30859 atomic_inc(&e->epoch->active);
30860 spin_unlock(&mdev->epoch_lock);
30861
30862 @@ -3954,7 +3954,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
30863 D_ASSERT(list_empty(&mdev->done_ee));
30864
30865 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
30866 - atomic_set(&mdev->current_epoch->epoch_size, 0);
30867 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
30868 D_ASSERT(list_empty(&mdev->current_epoch->list));
30869 }
30870
30871 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30872 index 54046e5..7759c55 100644
30873 --- a/drivers/block/loop.c
30874 +++ b/drivers/block/loop.c
30875 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
30876 mm_segment_t old_fs = get_fs();
30877
30878 set_fs(get_ds());
30879 - bw = file->f_op->write(file, buf, len, &pos);
30880 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30881 set_fs(old_fs);
30882 if (likely(bw == len))
30883 return 0;
30884 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
30885 index d620b44..587561e 100644
30886 --- a/drivers/cdrom/cdrom.c
30887 +++ b/drivers/cdrom/cdrom.c
30888 @@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
30889 ENSURE(reset, CDC_RESET);
30890 ENSURE(generic_packet, CDC_GENERIC_PACKET);
30891 cdi->mc_flags = 0;
30892 - cdo->n_minors = 0;
30893 cdi->options = CDO_USE_FFLAGS;
30894
30895 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
30896 @@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
30897 else
30898 cdi->cdda_method = CDDA_OLD;
30899
30900 - if (!cdo->generic_packet)
30901 - cdo->generic_packet = cdrom_dummy_generic_packet;
30902 + if (!cdo->generic_packet) {
30903 + pax_open_kernel();
30904 + *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
30905 + pax_close_kernel();
30906 + }
30907
30908 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
30909 mutex_lock(&cdrom_mutex);
30910 @@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
30911 if (cdi->exit)
30912 cdi->exit(cdi);
30913
30914 - cdi->ops->n_minors--;
30915 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
30916 }
30917
30918 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
30919 index 75d485a..2809958 100644
30920 --- a/drivers/cdrom/gdrom.c
30921 +++ b/drivers/cdrom/gdrom.c
30922 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
30923 .audio_ioctl = gdrom_audio_ioctl,
30924 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
30925 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
30926 - .n_minors = 1,
30927 };
30928
30929 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
30930 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30931 index 72bedad..8181ce1 100644
30932 --- a/drivers/char/Kconfig
30933 +++ b/drivers/char/Kconfig
30934 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
30935
30936 config DEVKMEM
30937 bool "/dev/kmem virtual device support"
30938 - default y
30939 + default n
30940 + depends on !GRKERNSEC_KMEM
30941 help
30942 Say Y here if you want to support the /dev/kmem device. The
30943 /dev/kmem device is rarely used, but can be used for certain
30944 @@ -581,6 +582,7 @@ config DEVPORT
30945 bool
30946 depends on !M68K
30947 depends on ISA || PCI
30948 + depends on !GRKERNSEC_KMEM
30949 default y
30950
30951 source "drivers/s390/char/Kconfig"
30952 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30953 index 2e04433..22afc64 100644
30954 --- a/drivers/char/agp/frontend.c
30955 +++ b/drivers/char/agp/frontend.c
30956 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30957 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30958 return -EFAULT;
30959
30960 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30961 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30962 return -EFAULT;
30963
30964 client = agp_find_client_by_pid(reserve.pid);
30965 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30966 index 21cb980..f15107c 100644
30967 --- a/drivers/char/genrtc.c
30968 +++ b/drivers/char/genrtc.c
30969 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
30970 switch (cmd) {
30971
30972 case RTC_PLL_GET:
30973 + memset(&pll, 0, sizeof(pll));
30974 if (get_rtc_pll(&pll))
30975 return -EINVAL;
30976 else
30977 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30978 index dfd7876..c0b0885 100644
30979 --- a/drivers/char/hpet.c
30980 +++ b/drivers/char/hpet.c
30981 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30982 }
30983
30984 static int
30985 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
30986 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
30987 struct hpet_info *info)
30988 {
30989 struct hpet_timer __iomem *timer;
30990 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
30991 index a0c84bb..9edcf60 100644
30992 --- a/drivers/char/ipmi/ipmi_msghandler.c
30993 +++ b/drivers/char/ipmi/ipmi_msghandler.c
30994 @@ -420,7 +420,7 @@ struct ipmi_smi {
30995 struct proc_dir_entry *proc_dir;
30996 char proc_dir_name[10];
30997
30998 - atomic_t stats[IPMI_NUM_STATS];
30999 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31000
31001 /*
31002 * run_to_completion duplicate of smb_info, smi_info
31003 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31004
31005
31006 #define ipmi_inc_stat(intf, stat) \
31007 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31008 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31009 #define ipmi_get_stat(intf, stat) \
31010 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31011 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31012
31013 static int is_lan_addr(struct ipmi_addr *addr)
31014 {
31015 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31016 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31017 init_waitqueue_head(&intf->waitq);
31018 for (i = 0; i < IPMI_NUM_STATS; i++)
31019 - atomic_set(&intf->stats[i], 0);
31020 + atomic_set_unchecked(&intf->stats[i], 0);
31021
31022 intf->proc_dir = NULL;
31023
31024 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31025 index 32a6c7e..f6966a9 100644
31026 --- a/drivers/char/ipmi/ipmi_si_intf.c
31027 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31028 @@ -275,7 +275,7 @@ struct smi_info {
31029 unsigned char slave_addr;
31030
31031 /* Counters and things for the proc filesystem. */
31032 - atomic_t stats[SI_NUM_STATS];
31033 + atomic_unchecked_t stats[SI_NUM_STATS];
31034
31035 struct task_struct *thread;
31036
31037 @@ -284,9 +284,9 @@ struct smi_info {
31038 };
31039
31040 #define smi_inc_stat(smi, stat) \
31041 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31042 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31043 #define smi_get_stat(smi, stat) \
31044 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31045 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31046
31047 #define SI_MAX_PARMS 4
31048
31049 @@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
31050 atomic_set(&new_smi->req_events, 0);
31051 new_smi->run_to_completion = 0;
31052 for (i = 0; i < SI_NUM_STATS; i++)
31053 - atomic_set(&new_smi->stats[i], 0);
31054 + atomic_set_unchecked(&new_smi->stats[i], 0);
31055
31056 new_smi->interrupt_disabled = 1;
31057 atomic_set(&new_smi->stop_operation, 0);
31058 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31059 index 0537903..121c699 100644
31060 --- a/drivers/char/mem.c
31061 +++ b/drivers/char/mem.c
31062 @@ -18,6 +18,7 @@
31063 #include <linux/raw.h>
31064 #include <linux/tty.h>
31065 #include <linux/capability.h>
31066 +#include <linux/security.h>
31067 #include <linux/ptrace.h>
31068 #include <linux/device.h>
31069 #include <linux/highmem.h>
31070 @@ -37,6 +38,10 @@
31071
31072 #define DEVPORT_MINOR 4
31073
31074 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31075 +extern const struct file_operations grsec_fops;
31076 +#endif
31077 +
31078 static inline unsigned long size_inside_page(unsigned long start,
31079 unsigned long size)
31080 {
31081 @@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31082
31083 while (cursor < to) {
31084 if (!devmem_is_allowed(pfn)) {
31085 +#ifdef CONFIG_GRKERNSEC_KMEM
31086 + gr_handle_mem_readwrite(from, to);
31087 +#else
31088 printk(KERN_INFO
31089 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31090 current->comm, from, to);
31091 +#endif
31092 return 0;
31093 }
31094 cursor += PAGE_SIZE;
31095 @@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31096 }
31097 return 1;
31098 }
31099 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31100 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31101 +{
31102 + return 0;
31103 +}
31104 #else
31105 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31106 {
31107 @@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31108
31109 while (count > 0) {
31110 unsigned long remaining;
31111 + char *temp;
31112
31113 sz = size_inside_page(p, count);
31114
31115 @@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31116 if (!ptr)
31117 return -EFAULT;
31118
31119 - remaining = copy_to_user(buf, ptr, sz);
31120 +#ifdef CONFIG_PAX_USERCOPY
31121 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31122 + if (!temp) {
31123 + unxlate_dev_mem_ptr(p, ptr);
31124 + return -ENOMEM;
31125 + }
31126 + memcpy(temp, ptr, sz);
31127 +#else
31128 + temp = ptr;
31129 +#endif
31130 +
31131 + remaining = copy_to_user(buf, temp, sz);
31132 +
31133 +#ifdef CONFIG_PAX_USERCOPY
31134 + kfree(temp);
31135 +#endif
31136 +
31137 unxlate_dev_mem_ptr(p, ptr);
31138 if (remaining)
31139 return -EFAULT;
31140 @@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31141 size_t count, loff_t *ppos)
31142 {
31143 unsigned long p = *ppos;
31144 - ssize_t low_count, read, sz;
31145 + ssize_t low_count, read, sz, err = 0;
31146 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31147 - int err = 0;
31148
31149 read = 0;
31150 if (p < (unsigned long) high_memory) {
31151 @@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31152 }
31153 #endif
31154 while (low_count > 0) {
31155 + char *temp;
31156 +
31157 sz = size_inside_page(p, low_count);
31158
31159 /*
31160 @@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31161 */
31162 kbuf = xlate_dev_kmem_ptr((char *)p);
31163
31164 - if (copy_to_user(buf, kbuf, sz))
31165 +#ifdef CONFIG_PAX_USERCOPY
31166 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31167 + if (!temp)
31168 + return -ENOMEM;
31169 + memcpy(temp, kbuf, sz);
31170 +#else
31171 + temp = kbuf;
31172 +#endif
31173 +
31174 + err = copy_to_user(buf, temp, sz);
31175 +
31176 +#ifdef CONFIG_PAX_USERCOPY
31177 + kfree(temp);
31178 +#endif
31179 +
31180 + if (err)
31181 return -EFAULT;
31182 buf += sz;
31183 p += sz;
31184 @@ -833,6 +880,9 @@ static const struct memdev {
31185 #ifdef CONFIG_CRASH_DUMP
31186 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31187 #endif
31188 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31189 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31190 +#endif
31191 };
31192
31193 static int memory_open(struct inode *inode, struct file *filp)
31194 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
31195 index 9df78e2..01ba9ae 100644
31196 --- a/drivers/char/nvram.c
31197 +++ b/drivers/char/nvram.c
31198 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
31199
31200 spin_unlock_irq(&rtc_lock);
31201
31202 - if (copy_to_user(buf, contents, tmp - contents))
31203 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
31204 return -EFAULT;
31205
31206 *ppos = i;
31207 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
31208 index 21721d2..4e98777 100644
31209 --- a/drivers/char/pcmcia/synclink_cs.c
31210 +++ b/drivers/char/pcmcia/synclink_cs.c
31211 @@ -2346,9 +2346,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31212
31213 if (debug_level >= DEBUG_LEVEL_INFO)
31214 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
31215 - __FILE__,__LINE__, info->device_name, port->count);
31216 + __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
31217
31218 - WARN_ON(!port->count);
31219 + WARN_ON(!atomic_read(&port->count));
31220
31221 if (tty_port_close_start(port, tty, filp) == 0)
31222 goto cleanup;
31223 @@ -2366,7 +2366,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31224 cleanup:
31225 if (debug_level >= DEBUG_LEVEL_INFO)
31226 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
31227 - tty->driver->name, port->count);
31228 + tty->driver->name, atomic_read(&port->count));
31229 }
31230
31231 /* Wait until the transmitter is empty.
31232 @@ -2508,7 +2508,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31233
31234 if (debug_level >= DEBUG_LEVEL_INFO)
31235 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
31236 - __FILE__,__LINE__,tty->driver->name, port->count);
31237 + __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
31238
31239 /* If port is closing, signal caller to try again */
31240 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
31241 @@ -2528,11 +2528,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31242 goto cleanup;
31243 }
31244 spin_lock(&port->lock);
31245 - port->count++;
31246 + atomic_inc(&port->count);
31247 spin_unlock(&port->lock);
31248 spin_unlock_irqrestore(&info->netlock, flags);
31249
31250 - if (port->count == 1) {
31251 + if (atomic_read(&port->count) == 1) {
31252 /* 1st open on this device, init hardware */
31253 retval = startup(info, tty);
31254 if (retval < 0)
31255 @@ -3886,7 +3886,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
31256 unsigned short new_crctype;
31257
31258 /* return error if TTY interface open */
31259 - if (info->port.count)
31260 + if (atomic_read(&info->port.count))
31261 return -EBUSY;
31262
31263 switch (encoding)
31264 @@ -3989,7 +3989,7 @@ static int hdlcdev_open(struct net_device *dev)
31265
31266 /* arbitrate between network and tty opens */
31267 spin_lock_irqsave(&info->netlock, flags);
31268 - if (info->port.count != 0 || info->netcount != 0) {
31269 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
31270 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
31271 spin_unlock_irqrestore(&info->netlock, flags);
31272 return -EBUSY;
31273 @@ -4078,7 +4078,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
31274 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
31275
31276 /* return error if TTY interface open */
31277 - if (info->port.count)
31278 + if (atomic_read(&info->port.count))
31279 return -EBUSY;
31280
31281 if (cmd != SIOCWANDEV)
31282 diff --git a/drivers/char/random.c b/drivers/char/random.c
31283 index b86eae9..b9c2ed7 100644
31284 --- a/drivers/char/random.c
31285 +++ b/drivers/char/random.c
31286 @@ -272,8 +272,13 @@
31287 /*
31288 * Configuration information
31289 */
31290 +#ifdef CONFIG_GRKERNSEC_RANDNET
31291 +#define INPUT_POOL_WORDS 512
31292 +#define OUTPUT_POOL_WORDS 128
31293 +#else
31294 #define INPUT_POOL_WORDS 128
31295 #define OUTPUT_POOL_WORDS 32
31296 +#endif
31297 #define SEC_XFER_SIZE 512
31298 #define EXTRACT_SIZE 10
31299
31300 @@ -313,10 +318,17 @@ static struct poolinfo {
31301 int poolwords;
31302 int tap1, tap2, tap3, tap4, tap5;
31303 } poolinfo_table[] = {
31304 +#ifdef CONFIG_GRKERNSEC_RANDNET
31305 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31306 + { 512, 411, 308, 208, 104, 1 },
31307 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31308 + { 128, 103, 76, 51, 25, 1 },
31309 +#else
31310 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31311 { 128, 103, 76, 51, 25, 1 },
31312 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31313 { 32, 26, 20, 14, 7, 1 },
31314 +#endif
31315 #if 0
31316 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31317 { 2048, 1638, 1231, 819, 411, 1 },
31318 @@ -437,6 +449,7 @@ struct entropy_store {
31319 int entropy_count;
31320 int entropy_total;
31321 unsigned int initialized:1;
31322 + bool last_data_init;
31323 __u8 last_data[EXTRACT_SIZE];
31324 };
31325
31326 @@ -527,8 +540,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
31327 input_rotate += i ? 7 : 14;
31328 }
31329
31330 - ACCESS_ONCE(r->input_rotate) = input_rotate;
31331 - ACCESS_ONCE(r->add_ptr) = i;
31332 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
31333 + ACCESS_ONCE_RW(r->add_ptr) = i;
31334 smp_wmb();
31335
31336 if (out)
31337 @@ -957,6 +970,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
31338 ssize_t ret = 0, i;
31339 __u8 tmp[EXTRACT_SIZE];
31340
31341 + /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
31342 + if (fips_enabled && !r->last_data_init)
31343 + nbytes += EXTRACT_SIZE;
31344 +
31345 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
31346 xfer_secondary_pool(r, nbytes);
31347 nbytes = account(r, nbytes, min, reserved);
31348 @@ -967,6 +984,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
31349 if (fips_enabled) {
31350 unsigned long flags;
31351
31352 +
31353 + /* prime last_data value if need be, per fips 140-2 */
31354 + if (!r->last_data_init) {
31355 + spin_lock_irqsave(&r->lock, flags);
31356 + memcpy(r->last_data, tmp, EXTRACT_SIZE);
31357 + r->last_data_init = true;
31358 + nbytes -= EXTRACT_SIZE;
31359 + spin_unlock_irqrestore(&r->lock, flags);
31360 + extract_buf(r, tmp);
31361 + }
31362 +
31363 spin_lock_irqsave(&r->lock, flags);
31364 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
31365 panic("Hardware RNG duplicated output!\n");
31366 @@ -1008,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
31367
31368 extract_buf(r, tmp);
31369 i = min_t(int, nbytes, EXTRACT_SIZE);
31370 - if (copy_to_user(buf, tmp, i)) {
31371 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
31372 ret = -EFAULT;
31373 break;
31374 }
31375 @@ -1086,6 +1114,7 @@ static void init_std_data(struct entropy_store *r)
31376
31377 r->entropy_count = 0;
31378 r->entropy_total = 0;
31379 + r->last_data_init = false;
31380 mix_pool_bytes(r, &now, sizeof(now), NULL);
31381 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
31382 if (!arch_get_random_long(&rv))
31383 @@ -1342,7 +1371,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31384 #include <linux/sysctl.h>
31385
31386 static int min_read_thresh = 8, min_write_thresh;
31387 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31388 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31389 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31390 static char sysctl_bootid[16];
31391
31392 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31393 index 9b4f011..b7e0a1a 100644
31394 --- a/drivers/char/sonypi.c
31395 +++ b/drivers/char/sonypi.c
31396 @@ -54,6 +54,7 @@
31397
31398 #include <asm/uaccess.h>
31399 #include <asm/io.h>
31400 +#include <asm/local.h>
31401
31402 #include <linux/sonypi.h>
31403
31404 @@ -490,7 +491,7 @@ static struct sonypi_device {
31405 spinlock_t fifo_lock;
31406 wait_queue_head_t fifo_proc_list;
31407 struct fasync_struct *fifo_async;
31408 - int open_count;
31409 + local_t open_count;
31410 int model;
31411 struct input_dev *input_jog_dev;
31412 struct input_dev *input_key_dev;
31413 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31414 static int sonypi_misc_release(struct inode *inode, struct file *file)
31415 {
31416 mutex_lock(&sonypi_device.lock);
31417 - sonypi_device.open_count--;
31418 + local_dec(&sonypi_device.open_count);
31419 mutex_unlock(&sonypi_device.lock);
31420 return 0;
31421 }
31422 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31423 {
31424 mutex_lock(&sonypi_device.lock);
31425 /* Flush input queue on first open */
31426 - if (!sonypi_device.open_count)
31427 + if (!local_read(&sonypi_device.open_count))
31428 kfifo_reset(&sonypi_device.fifo);
31429 - sonypi_device.open_count++;
31430 + local_inc(&sonypi_device.open_count);
31431 mutex_unlock(&sonypi_device.lock);
31432
31433 return 0;
31434 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31435 index 93211df..c7805f7 100644
31436 --- a/drivers/char/tpm/tpm.c
31437 +++ b/drivers/char/tpm/tpm.c
31438 @@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31439 chip->vendor.req_complete_val)
31440 goto out_recv;
31441
31442 - if ((status == chip->vendor.req_canceled)) {
31443 + if (status == chip->vendor.req_canceled) {
31444 dev_err(chip->dev, "Operation Canceled\n");
31445 rc = -ECANCELED;
31446 goto out;
31447 diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
31448 index 56051d0..11cf3b7 100644
31449 --- a/drivers/char/tpm/tpm_acpi.c
31450 +++ b/drivers/char/tpm/tpm_acpi.c
31451 @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
31452 virt = acpi_os_map_memory(start, len);
31453 if (!virt) {
31454 kfree(log->bios_event_log);
31455 + log->bios_event_log = NULL;
31456 printk("%s: ERROR - Unable to map memory\n", __func__);
31457 return -EIO;
31458 }
31459
31460 - memcpy_fromio(log->bios_event_log, virt, len);
31461 + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
31462
31463 acpi_os_unmap_memory(virt, len);
31464 return 0;
31465 diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
31466 index 84ddc55..1d32f1e 100644
31467 --- a/drivers/char/tpm/tpm_eventlog.c
31468 +++ b/drivers/char/tpm/tpm_eventlog.c
31469 @@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31470 event = addr;
31471
31472 if ((event->event_type == 0 && event->event_size == 0) ||
31473 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31474 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31475 return NULL;
31476
31477 return addr;
31478 @@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31479 return NULL;
31480
31481 if ((event->event_type == 0 && event->event_size == 0) ||
31482 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31483 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31484 return NULL;
31485
31486 (*pos)++;
31487 @@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31488 int i;
31489
31490 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31491 - seq_putc(m, data[i]);
31492 + if (!seq_putc(m, data[i]))
31493 + return -EFAULT;
31494
31495 return 0;
31496 }
31497 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31498 index 8ab9c3d..c3e65d3 100644
31499 --- a/drivers/char/virtio_console.c
31500 +++ b/drivers/char/virtio_console.c
31501 @@ -622,7 +622,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
31502 if (to_user) {
31503 ssize_t ret;
31504
31505 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
31506 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
31507 if (ret)
31508 return -EFAULT;
31509 } else {
31510 @@ -721,7 +721,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
31511 if (!port_has_data(port) && !port->host_connected)
31512 return 0;
31513
31514 - return fill_readbuf(port, ubuf, count, true);
31515 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
31516 }
31517
31518 static int wait_port_writable(struct port *port, bool nonblock)
31519 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
31520 index e164c55..3aabb50 100644
31521 --- a/drivers/edac/edac_pci_sysfs.c
31522 +++ b/drivers/edac/edac_pci_sysfs.c
31523 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
31524 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
31525 static int edac_pci_poll_msec = 1000; /* one second workq period */
31526
31527 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
31528 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
31529 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
31530 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
31531
31532 static struct kobject *edac_pci_top_main_kobj;
31533 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
31534 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31535 edac_printk(KERN_CRIT, EDAC_PCI,
31536 "Signaled System Error on %s\n",
31537 pci_name(dev));
31538 - atomic_inc(&pci_nonparity_count);
31539 + atomic_inc_unchecked(&pci_nonparity_count);
31540 }
31541
31542 if (status & (PCI_STATUS_PARITY)) {
31543 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31544 "Master Data Parity Error on %s\n",
31545 pci_name(dev));
31546
31547 - atomic_inc(&pci_parity_count);
31548 + atomic_inc_unchecked(&pci_parity_count);
31549 }
31550
31551 if (status & (PCI_STATUS_DETECTED_PARITY)) {
31552 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31553 "Detected Parity Error on %s\n",
31554 pci_name(dev));
31555
31556 - atomic_inc(&pci_parity_count);
31557 + atomic_inc_unchecked(&pci_parity_count);
31558 }
31559 }
31560
31561 @@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31562 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
31563 "Signaled System Error on %s\n",
31564 pci_name(dev));
31565 - atomic_inc(&pci_nonparity_count);
31566 + atomic_inc_unchecked(&pci_nonparity_count);
31567 }
31568
31569 if (status & (PCI_STATUS_PARITY)) {
31570 @@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31571 "Master Data Parity Error on "
31572 "%s\n", pci_name(dev));
31573
31574 - atomic_inc(&pci_parity_count);
31575 + atomic_inc_unchecked(&pci_parity_count);
31576 }
31577
31578 if (status & (PCI_STATUS_DETECTED_PARITY)) {
31579 @@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31580 "Detected Parity Error on %s\n",
31581 pci_name(dev));
31582
31583 - atomic_inc(&pci_parity_count);
31584 + atomic_inc_unchecked(&pci_parity_count);
31585 }
31586 }
31587 }
31588 @@ -676,7 +676,7 @@ void edac_pci_do_parity_check(void)
31589 if (!check_pci_errors)
31590 return;
31591
31592 - before_count = atomic_read(&pci_parity_count);
31593 + before_count = atomic_read_unchecked(&pci_parity_count);
31594
31595 /* scan all PCI devices looking for a Parity Error on devices and
31596 * bridges.
31597 @@ -688,7 +688,7 @@ void edac_pci_do_parity_check(void)
31598 /* Only if operator has selected panic on PCI Error */
31599 if (edac_pci_get_panic_on_pe()) {
31600 /* If the count is different 'after' from 'before' */
31601 - if (before_count != atomic_read(&pci_parity_count))
31602 + if (before_count != atomic_read_unchecked(&pci_parity_count))
31603 panic("EDAC: PCI Parity Error");
31604 }
31605 }
31606 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
31607 index 8c87a5e..a19cbd7 100644
31608 --- a/drivers/edac/mce_amd.h
31609 +++ b/drivers/edac/mce_amd.h
31610 @@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
31611 struct amd_decoder_ops {
31612 bool (*dc_mce)(u16, u8);
31613 bool (*ic_mce)(u16, u8);
31614 -};
31615 +} __no_const;
31616
31617 void amd_report_gart_errors(bool);
31618 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
31619 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
31620 index 57ea7f4..789e3c3 100644
31621 --- a/drivers/firewire/core-card.c
31622 +++ b/drivers/firewire/core-card.c
31623 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
31624
31625 void fw_core_remove_card(struct fw_card *card)
31626 {
31627 - struct fw_card_driver dummy_driver = dummy_driver_template;
31628 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
31629
31630 card->driver->update_phy_reg(card, 4,
31631 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
31632 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
31633 index f8d2287..5aaf4db 100644
31634 --- a/drivers/firewire/core-cdev.c
31635 +++ b/drivers/firewire/core-cdev.c
31636 @@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
31637 int ret;
31638
31639 if ((request->channels == 0 && request->bandwidth == 0) ||
31640 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
31641 - request->bandwidth < 0)
31642 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
31643 return -EINVAL;
31644
31645 r = kmalloc(sizeof(*r), GFP_KERNEL);
31646 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
31647 index 28a94c7..58da63a 100644
31648 --- a/drivers/firewire/core-transaction.c
31649 +++ b/drivers/firewire/core-transaction.c
31650 @@ -38,6 +38,7 @@
31651 #include <linux/timer.h>
31652 #include <linux/types.h>
31653 #include <linux/workqueue.h>
31654 +#include <linux/sched.h>
31655
31656 #include <asm/byteorder.h>
31657
31658 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
31659 index 515a42c..5ecf3ba 100644
31660 --- a/drivers/firewire/core.h
31661 +++ b/drivers/firewire/core.h
31662 @@ -111,6 +111,7 @@ struct fw_card_driver {
31663
31664 int (*stop_iso)(struct fw_iso_context *ctx);
31665 };
31666 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
31667
31668 void fw_card_initialize(struct fw_card *card,
31669 const struct fw_card_driver *driver, struct device *device);
31670 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
31671 index b298158..7ed8432 100644
31672 --- a/drivers/firmware/dmi_scan.c
31673 +++ b/drivers/firmware/dmi_scan.c
31674 @@ -452,11 +452,6 @@ void __init dmi_scan_machine(void)
31675 }
31676 }
31677 else {
31678 - /*
31679 - * no iounmap() for that ioremap(); it would be a no-op, but
31680 - * it's so early in setup that sucker gets confused into doing
31681 - * what it shouldn't if we actually call it.
31682 - */
31683 p = dmi_ioremap(0xF0000, 0x10000);
31684 if (p == NULL)
31685 goto error;
31686 @@ -726,7 +721,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
31687 if (buf == NULL)
31688 return -1;
31689
31690 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
31691 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
31692
31693 iounmap(buf);
31694 return 0;
31695 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
31696 index 82d5c20..44a7177 100644
31697 --- a/drivers/gpio/gpio-vr41xx.c
31698 +++ b/drivers/gpio/gpio-vr41xx.c
31699 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
31700 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
31701 maskl, pendl, maskh, pendh);
31702
31703 - atomic_inc(&irq_err_count);
31704 + atomic_inc_unchecked(&irq_err_count);
31705
31706 return -EINVAL;
31707 }
31708 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
31709 index 1227adf..f2301c2 100644
31710 --- a/drivers/gpu/drm/drm_crtc_helper.c
31711 +++ b/drivers/gpu/drm/drm_crtc_helper.c
31712 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
31713 struct drm_crtc *tmp;
31714 int crtc_mask = 1;
31715
31716 - WARN(!crtc, "checking null crtc?\n");
31717 + BUG_ON(!crtc);
31718
31719 dev = crtc->dev;
31720
31721 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
31722 index be174ca..0bcbb71 100644
31723 --- a/drivers/gpu/drm/drm_drv.c
31724 +++ b/drivers/gpu/drm/drm_drv.c
31725 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
31726 /**
31727 * Copy and IOCTL return string to user space
31728 */
31729 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
31730 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
31731 {
31732 int len;
31733
31734 @@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
31735 return -ENODEV;
31736
31737 atomic_inc(&dev->ioctl_count);
31738 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
31739 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
31740 ++file_priv->ioctl_count;
31741
31742 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
31743 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
31744 index 133b413..fd68225 100644
31745 --- a/drivers/gpu/drm/drm_fops.c
31746 +++ b/drivers/gpu/drm/drm_fops.c
31747 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
31748 }
31749
31750 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
31751 - atomic_set(&dev->counts[i], 0);
31752 + atomic_set_unchecked(&dev->counts[i], 0);
31753
31754 dev->sigdata.lock = NULL;
31755
31756 @@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
31757 if (drm_device_is_unplugged(dev))
31758 return -ENODEV;
31759
31760 - if (!dev->open_count++)
31761 + if (local_inc_return(&dev->open_count) == 1)
31762 need_setup = 1;
31763 mutex_lock(&dev->struct_mutex);
31764 old_mapping = dev->dev_mapping;
31765 @@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
31766 retcode = drm_open_helper(inode, filp, dev);
31767 if (retcode)
31768 goto err_undo;
31769 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
31770 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
31771 if (need_setup) {
31772 retcode = drm_setup(dev);
31773 if (retcode)
31774 @@ -164,7 +164,7 @@ err_undo:
31775 iput(container_of(dev->dev_mapping, struct inode, i_data));
31776 dev->dev_mapping = old_mapping;
31777 mutex_unlock(&dev->struct_mutex);
31778 - dev->open_count--;
31779 + local_dec(&dev->open_count);
31780 return retcode;
31781 }
31782 EXPORT_SYMBOL(drm_open);
31783 @@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
31784
31785 mutex_lock(&drm_global_mutex);
31786
31787 - DRM_DEBUG("open_count = %d\n", dev->open_count);
31788 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
31789
31790 if (dev->driver->preclose)
31791 dev->driver->preclose(dev, file_priv);
31792 @@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
31793 * Begin inline drm_release
31794 */
31795
31796 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
31797 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
31798 task_pid_nr(current),
31799 (long)old_encode_dev(file_priv->minor->device),
31800 - dev->open_count);
31801 + local_read(&dev->open_count));
31802
31803 /* Release any auth tokens that might point to this file_priv,
31804 (do that under the drm_global_mutex) */
31805 @@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
31806 * End inline drm_release
31807 */
31808
31809 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
31810 - if (!--dev->open_count) {
31811 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
31812 + if (local_dec_and_test(&dev->open_count)) {
31813 if (atomic_read(&dev->ioctl_count)) {
31814 DRM_ERROR("Device busy: %d\n",
31815 atomic_read(&dev->ioctl_count));
31816 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
31817 index f731116..629842c 100644
31818 --- a/drivers/gpu/drm/drm_global.c
31819 +++ b/drivers/gpu/drm/drm_global.c
31820 @@ -36,7 +36,7 @@
31821 struct drm_global_item {
31822 struct mutex mutex;
31823 void *object;
31824 - int refcount;
31825 + atomic_t refcount;
31826 };
31827
31828 static struct drm_global_item glob[DRM_GLOBAL_NUM];
31829 @@ -49,7 +49,7 @@ void drm_global_init(void)
31830 struct drm_global_item *item = &glob[i];
31831 mutex_init(&item->mutex);
31832 item->object = NULL;
31833 - item->refcount = 0;
31834 + atomic_set(&item->refcount, 0);
31835 }
31836 }
31837
31838 @@ -59,7 +59,7 @@ void drm_global_release(void)
31839 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
31840 struct drm_global_item *item = &glob[i];
31841 BUG_ON(item->object != NULL);
31842 - BUG_ON(item->refcount != 0);
31843 + BUG_ON(atomic_read(&item->refcount) != 0);
31844 }
31845 }
31846
31847 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
31848 void *object;
31849
31850 mutex_lock(&item->mutex);
31851 - if (item->refcount == 0) {
31852 + if (atomic_read(&item->refcount) == 0) {
31853 item->object = kzalloc(ref->size, GFP_KERNEL);
31854 if (unlikely(item->object == NULL)) {
31855 ret = -ENOMEM;
31856 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
31857 goto out_err;
31858
31859 }
31860 - ++item->refcount;
31861 + atomic_inc(&item->refcount);
31862 ref->object = item->object;
31863 object = item->object;
31864 mutex_unlock(&item->mutex);
31865 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
31866 struct drm_global_item *item = &glob[ref->global_type];
31867
31868 mutex_lock(&item->mutex);
31869 - BUG_ON(item->refcount == 0);
31870 + BUG_ON(atomic_read(&item->refcount) == 0);
31871 BUG_ON(ref->object != item->object);
31872 - if (--item->refcount == 0) {
31873 + if (atomic_dec_and_test(&item->refcount)) {
31874 ref->release(ref);
31875 item->object = NULL;
31876 }
31877 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
31878 index d4b20ce..77a8d41 100644
31879 --- a/drivers/gpu/drm/drm_info.c
31880 +++ b/drivers/gpu/drm/drm_info.c
31881 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
31882 struct drm_local_map *map;
31883 struct drm_map_list *r_list;
31884
31885 - /* Hardcoded from _DRM_FRAME_BUFFER,
31886 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
31887 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
31888 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
31889 + static const char * const types[] = {
31890 + [_DRM_FRAME_BUFFER] = "FB",
31891 + [_DRM_REGISTERS] = "REG",
31892 + [_DRM_SHM] = "SHM",
31893 + [_DRM_AGP] = "AGP",
31894 + [_DRM_SCATTER_GATHER] = "SG",
31895 + [_DRM_CONSISTENT] = "PCI",
31896 + [_DRM_GEM] = "GEM" };
31897 const char *type;
31898 int i;
31899
31900 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
31901 map = r_list->map;
31902 if (!map)
31903 continue;
31904 - if (map->type < 0 || map->type > 5)
31905 + if (map->type >= ARRAY_SIZE(types))
31906 type = "??";
31907 else
31908 type = types[map->type];
31909 @@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
31910 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
31911 vma->vm_flags & VM_LOCKED ? 'l' : '-',
31912 vma->vm_flags & VM_IO ? 'i' : '-',
31913 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31914 + 0);
31915 +#else
31916 vma->vm_pgoff);
31917 +#endif
31918
31919 #if defined(__i386__)
31920 pgprot = pgprot_val(vma->vm_page_prot);
31921 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
31922 index 2f4c434..764794b 100644
31923 --- a/drivers/gpu/drm/drm_ioc32.c
31924 +++ b/drivers/gpu/drm/drm_ioc32.c
31925 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
31926 request = compat_alloc_user_space(nbytes);
31927 if (!access_ok(VERIFY_WRITE, request, nbytes))
31928 return -EFAULT;
31929 - list = (struct drm_buf_desc *) (request + 1);
31930 + list = (struct drm_buf_desc __user *) (request + 1);
31931
31932 if (__put_user(count, &request->count)
31933 || __put_user(list, &request->list))
31934 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
31935 request = compat_alloc_user_space(nbytes);
31936 if (!access_ok(VERIFY_WRITE, request, nbytes))
31937 return -EFAULT;
31938 - list = (struct drm_buf_pub *) (request + 1);
31939 + list = (struct drm_buf_pub __user *) (request + 1);
31940
31941 if (__put_user(count, &request->count)
31942 || __put_user(list, &request->list))
31943 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
31944 index 23dd975..63e9801 100644
31945 --- a/drivers/gpu/drm/drm_ioctl.c
31946 +++ b/drivers/gpu/drm/drm_ioctl.c
31947 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
31948 stats->data[i].value =
31949 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
31950 else
31951 - stats->data[i].value = atomic_read(&dev->counts[i]);
31952 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
31953 stats->data[i].type = dev->types[i];
31954 }
31955
31956 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
31957 index d752c96..fe08455 100644
31958 --- a/drivers/gpu/drm/drm_lock.c
31959 +++ b/drivers/gpu/drm/drm_lock.c
31960 @@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
31961 if (drm_lock_take(&master->lock, lock->context)) {
31962 master->lock.file_priv = file_priv;
31963 master->lock.lock_time = jiffies;
31964 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
31965 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
31966 break; /* Got lock */
31967 }
31968
31969 @@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
31970 return -EINVAL;
31971 }
31972
31973 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
31974 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
31975
31976 if (drm_lock_free(&master->lock, lock->context)) {
31977 /* FIXME: Should really bail out here. */
31978 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
31979 index c236fd2..6b5f2e7 100644
31980 --- a/drivers/gpu/drm/drm_stub.c
31981 +++ b/drivers/gpu/drm/drm_stub.c
31982 @@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
31983
31984 drm_device_set_unplugged(dev);
31985
31986 - if (dev->open_count == 0) {
31987 + if (local_read(&dev->open_count) == 0) {
31988 drm_put_dev(dev);
31989 }
31990 mutex_unlock(&drm_global_mutex);
31991 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
31992 index 004ecdf..db1f6e0 100644
31993 --- a/drivers/gpu/drm/i810/i810_dma.c
31994 +++ b/drivers/gpu/drm/i810/i810_dma.c
31995 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
31996 dma->buflist[vertex->idx],
31997 vertex->discard, vertex->used);
31998
31999 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32000 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32001 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32002 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32003 sarea_priv->last_enqueue = dev_priv->counter - 1;
32004 sarea_priv->last_dispatch = (int)hw_status[5];
32005
32006 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32007 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32008 mc->last_render);
32009
32010 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32011 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32012 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32013 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32014 sarea_priv->last_enqueue = dev_priv->counter - 1;
32015 sarea_priv->last_dispatch = (int)hw_status[5];
32016
32017 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32018 index 6e0acad..93c8289 100644
32019 --- a/drivers/gpu/drm/i810/i810_drv.h
32020 +++ b/drivers/gpu/drm/i810/i810_drv.h
32021 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32022 int page_flipping;
32023
32024 wait_queue_head_t irq_queue;
32025 - atomic_t irq_received;
32026 - atomic_t irq_emitted;
32027 + atomic_unchecked_t irq_received;
32028 + atomic_unchecked_t irq_emitted;
32029
32030 int front_offset;
32031 } drm_i810_private_t;
32032 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
32033 index dde8b50..da88e32 100644
32034 --- a/drivers/gpu/drm/i915/i915_debugfs.c
32035 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
32036 @@ -495,7 +495,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
32037 I915_READ(GTIMR));
32038 }
32039 seq_printf(m, "Interrupts received: %d\n",
32040 - atomic_read(&dev_priv->irq_received));
32041 + atomic_read_unchecked(&dev_priv->irq_received));
32042 for_each_ring(ring, dev_priv, i) {
32043 if (IS_GEN6(dev) || IS_GEN7(dev)) {
32044 seq_printf(m,
32045 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
32046 index 61ae104..f8a4bc1 100644
32047 --- a/drivers/gpu/drm/i915/i915_dma.c
32048 +++ b/drivers/gpu/drm/i915/i915_dma.c
32049 @@ -1274,7 +1274,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
32050 bool can_switch;
32051
32052 spin_lock(&dev->count_lock);
32053 - can_switch = (dev->open_count == 0);
32054 + can_switch = (local_read(&dev->open_count) == 0);
32055 spin_unlock(&dev->count_lock);
32056 return can_switch;
32057 }
32058 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
32059 index f511fa2..cebdc1c 100644
32060 --- a/drivers/gpu/drm/i915/i915_drv.h
32061 +++ b/drivers/gpu/drm/i915/i915_drv.h
32062 @@ -274,12 +274,12 @@ struct drm_i915_display_funcs {
32063 /* render clock increase/decrease */
32064 /* display clock increase/decrease */
32065 /* pll clock increase/decrease */
32066 -};
32067 +} __no_const;
32068
32069 struct drm_i915_gt_funcs {
32070 void (*force_wake_get)(struct drm_i915_private *dev_priv);
32071 void (*force_wake_put)(struct drm_i915_private *dev_priv);
32072 -};
32073 +} __no_const;
32074
32075 #define DEV_INFO_FLAGS \
32076 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
32077 @@ -430,7 +430,7 @@ typedef struct drm_i915_private {
32078
32079 struct resource mch_res;
32080
32081 - atomic_t irq_received;
32082 + atomic_unchecked_t irq_received;
32083
32084 /* protects the irq masks */
32085 spinlock_t irq_lock;
32086 @@ -500,7 +500,7 @@ typedef struct drm_i915_private {
32087 } edp;
32088 bool no_aux_handshake;
32089
32090 - struct notifier_block lid_notifier;
32091 + notifier_block_no_const lid_notifier;
32092
32093 int crt_ddc_pin;
32094 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
32095 @@ -1055,7 +1055,7 @@ struct drm_i915_gem_object {
32096 * will be page flipped away on the next vblank. When it
32097 * reaches 0, dev_priv->pending_flip_queue will be woken up.
32098 */
32099 - atomic_t pending_flip;
32100 + atomic_unchecked_t pending_flip;
32101 };
32102
32103 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
32104 @@ -1558,7 +1558,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
32105 struct drm_i915_private *dev_priv, unsigned port);
32106 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
32107 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
32108 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32109 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32110 {
32111 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
32112 }
32113 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32114 index 3eea143..a0b77db 100644
32115 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32116 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32117 @@ -660,7 +660,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
32118 i915_gem_clflush_object(obj);
32119
32120 if (obj->base.pending_write_domain)
32121 - flips |= atomic_read(&obj->pending_flip);
32122 + flips |= atomic_read_unchecked(&obj->pending_flip);
32123
32124 flush_domains |= obj->base.write_domain;
32125 }
32126 @@ -691,9 +691,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
32127
32128 static int
32129 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
32130 - int count)
32131 + unsigned int count)
32132 {
32133 - int i;
32134 + unsigned int i;
32135
32136 for (i = 0; i < count; i++) {
32137 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
32138 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
32139 index 32e1bda..9b2ca91 100644
32140 --- a/drivers/gpu/drm/i915/i915_irq.c
32141 +++ b/drivers/gpu/drm/i915/i915_irq.c
32142 @@ -531,7 +531,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
32143 u32 pipe_stats[I915_MAX_PIPES];
32144 bool blc_event;
32145
32146 - atomic_inc(&dev_priv->irq_received);
32147 + atomic_inc_unchecked(&dev_priv->irq_received);
32148
32149 while (true) {
32150 iir = I915_READ(VLV_IIR);
32151 @@ -678,7 +678,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
32152 irqreturn_t ret = IRQ_NONE;
32153 int i;
32154
32155 - atomic_inc(&dev_priv->irq_received);
32156 + atomic_inc_unchecked(&dev_priv->irq_received);
32157
32158 /* disable master interrupt before clearing iir */
32159 de_ier = I915_READ(DEIER);
32160 @@ -753,7 +753,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
32161 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
32162 u32 hotplug_mask;
32163
32164 - atomic_inc(&dev_priv->irq_received);
32165 + atomic_inc_unchecked(&dev_priv->irq_received);
32166
32167 /* disable master interrupt before clearing iir */
32168 de_ier = I915_READ(DEIER);
32169 @@ -1760,7 +1760,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
32170 {
32171 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32172
32173 - atomic_set(&dev_priv->irq_received, 0);
32174 + atomic_set_unchecked(&dev_priv->irq_received, 0);
32175
32176 I915_WRITE(HWSTAM, 0xeffe);
32177
32178 @@ -1786,7 +1786,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
32179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32180 int pipe;
32181
32182 - atomic_set(&dev_priv->irq_received, 0);
32183 + atomic_set_unchecked(&dev_priv->irq_received, 0);
32184
32185 /* VLV magic */
32186 I915_WRITE(VLV_IMR, 0);
32187 @@ -2091,7 +2091,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
32188 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32189 int pipe;
32190
32191 - atomic_set(&dev_priv->irq_received, 0);
32192 + atomic_set_unchecked(&dev_priv->irq_received, 0);
32193
32194 for_each_pipe(pipe)
32195 I915_WRITE(PIPESTAT(pipe), 0);
32196 @@ -2142,7 +2142,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
32197 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
32198 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
32199
32200 - atomic_inc(&dev_priv->irq_received);
32201 + atomic_inc_unchecked(&dev_priv->irq_received);
32202
32203 iir = I915_READ16(IIR);
32204 if (iir == 0)
32205 @@ -2227,7 +2227,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
32206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32207 int pipe;
32208
32209 - atomic_set(&dev_priv->irq_received, 0);
32210 + atomic_set_unchecked(&dev_priv->irq_received, 0);
32211
32212 if (I915_HAS_HOTPLUG(dev)) {
32213 I915_WRITE(PORT_HOTPLUG_EN, 0);
32214 @@ -2322,7 +2322,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
32215 };
32216 int pipe, ret = IRQ_NONE;
32217
32218 - atomic_inc(&dev_priv->irq_received);
32219 + atomic_inc_unchecked(&dev_priv->irq_received);
32220
32221 iir = I915_READ(IIR);
32222 do {
32223 @@ -2448,7 +2448,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
32224 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32225 int pipe;
32226
32227 - atomic_set(&dev_priv->irq_received, 0);
32228 + atomic_set_unchecked(&dev_priv->irq_received, 0);
32229
32230 I915_WRITE(PORT_HOTPLUG_EN, 0);
32231 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
32232 @@ -2555,7 +2555,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
32233 int irq_received;
32234 int ret = IRQ_NONE, pipe;
32235
32236 - atomic_inc(&dev_priv->irq_received);
32237 + atomic_inc_unchecked(&dev_priv->irq_received);
32238
32239 iir = I915_READ(IIR);
32240
32241 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
32242 index b426d44..1b9038d 100644
32243 --- a/drivers/gpu/drm/i915/intel_display.c
32244 +++ b/drivers/gpu/drm/i915/intel_display.c
32245 @@ -2131,7 +2131,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
32246
32247 wait_event(dev_priv->pending_flip_queue,
32248 atomic_read(&dev_priv->mm.wedged) ||
32249 - atomic_read(&obj->pending_flip) == 0);
32250 + atomic_read_unchecked(&obj->pending_flip) == 0);
32251
32252 /* Big Hammer, we also need to ensure that any pending
32253 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
32254 @@ -6236,8 +6236,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
32255
32256 obj = work->old_fb_obj;
32257
32258 - atomic_clear_mask(1 << intel_crtc->plane,
32259 - &obj->pending_flip.counter);
32260 + atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
32261
32262 wake_up(&dev_priv->pending_flip_queue);
32263 schedule_work(&work->work);
32264 @@ -6583,7 +6582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32265 /* Block clients from rendering to the new back buffer until
32266 * the flip occurs and the object is no longer visible.
32267 */
32268 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32269 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32270
32271 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
32272 if (ret)
32273 @@ -6598,7 +6597,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32274 return 0;
32275
32276 cleanup_pending:
32277 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32278 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32279 drm_gem_object_unreference(&work->old_fb_obj->base);
32280 drm_gem_object_unreference(&obj->base);
32281 mutex_unlock(&dev->struct_mutex);
32282 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
32283 index 54558a0..2d97005 100644
32284 --- a/drivers/gpu/drm/mga/mga_drv.h
32285 +++ b/drivers/gpu/drm/mga/mga_drv.h
32286 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
32287 u32 clear_cmd;
32288 u32 maccess;
32289
32290 - atomic_t vbl_received; /**< Number of vblanks received. */
32291 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
32292 wait_queue_head_t fence_queue;
32293 - atomic_t last_fence_retired;
32294 + atomic_unchecked_t last_fence_retired;
32295 u32 next_fence_to_post;
32296
32297 unsigned int fb_cpp;
32298 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
32299 index 598c281..60d590e 100644
32300 --- a/drivers/gpu/drm/mga/mga_irq.c
32301 +++ b/drivers/gpu/drm/mga/mga_irq.c
32302 @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
32303 if (crtc != 0)
32304 return 0;
32305
32306 - return atomic_read(&dev_priv->vbl_received);
32307 + return atomic_read_unchecked(&dev_priv->vbl_received);
32308 }
32309
32310
32311 @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
32312 /* VBLANK interrupt */
32313 if (status & MGA_VLINEPEN) {
32314 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
32315 - atomic_inc(&dev_priv->vbl_received);
32316 + atomic_inc_unchecked(&dev_priv->vbl_received);
32317 drm_handle_vblank(dev, 0);
32318 handled = 1;
32319 }
32320 @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
32321 if ((prim_start & ~0x03) != (prim_end & ~0x03))
32322 MGA_WRITE(MGA_PRIMEND, prim_end);
32323
32324 - atomic_inc(&dev_priv->last_fence_retired);
32325 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
32326 DRM_WAKEUP(&dev_priv->fence_queue);
32327 handled = 1;
32328 }
32329 @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
32330 * using fences.
32331 */
32332 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
32333 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
32334 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
32335 - *sequence) <= (1 << 23)));
32336
32337 *sequence = cur_fence;
32338 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
32339 index 09fdef2..57f5c3b 100644
32340 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
32341 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
32342 @@ -1240,7 +1240,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
32343 struct bit_table {
32344 const char id;
32345 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
32346 -};
32347 +} __no_const;
32348
32349 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
32350
32351 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
32352 index a101699..a163f0a 100644
32353 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
32354 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
32355 @@ -80,7 +80,7 @@ struct nouveau_drm {
32356 struct drm_global_reference mem_global_ref;
32357 struct ttm_bo_global_ref bo_global_ref;
32358 struct ttm_bo_device bdev;
32359 - atomic_t validate_sequence;
32360 + atomic_unchecked_t validate_sequence;
32361 int (*move)(struct nouveau_channel *,
32362 struct ttm_buffer_object *,
32363 struct ttm_mem_reg *, struct ttm_mem_reg *);
32364 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
32365 index bedafd1..ca5330a 100644
32366 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h
32367 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
32368 @@ -43,7 +43,7 @@ struct nouveau_fence_priv {
32369 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
32370 struct nouveau_channel *);
32371 u32 (*read)(struct nouveau_channel *);
32372 -};
32373 +} __no_const;
32374
32375 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
32376
32377 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
32378 index 5e2f521..0d21436 100644
32379 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
32380 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
32381 @@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
32382 int trycnt = 0;
32383 int ret, i;
32384
32385 - sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
32386 + sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
32387 retry:
32388 if (++trycnt > 100000) {
32389 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
32390 diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
32391 index 73b789c..ca5aa90 100644
32392 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h
32393 +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
32394 @@ -168,7 +168,7 @@ struct nouveau_pm {
32395 struct nouveau_pm_level *cur;
32396
32397 struct device *hwmon;
32398 - struct notifier_block acpi_nb;
32399 + notifier_block_no_const acpi_nb;
32400
32401 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
32402 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
32403 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
32404 index 6f0ac64..9c2dfb4 100644
32405 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
32406 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
32407 @@ -63,7 +63,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
32408 bool can_switch;
32409
32410 spin_lock(&dev->count_lock);
32411 - can_switch = (dev->open_count == 0);
32412 + can_switch = (local_read(&dev->open_count) == 0);
32413 spin_unlock(&dev->count_lock);
32414 return can_switch;
32415 }
32416 diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
32417 index 9f6f55c..30e3a29 100644
32418 --- a/drivers/gpu/drm/nouveau/nv50_evo.c
32419 +++ b/drivers/gpu/drm/nouveau/nv50_evo.c
32420 @@ -152,9 +152,9 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
32421 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
32422 evo->object->oclass->ofuncs =
32423 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
32424 - evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
32425 - evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
32426 - evo->object->oclass->ofuncs->rd08 =
32427 + *(void**)&evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
32428 + *(void**)&evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
32429 + *(void**)&evo->object->oclass->ofuncs->rd08 =
32430 ioremap(pci_resource_start(dev->pdev, 0) +
32431 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
32432 return 0;
32433 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
32434 index b562b59..9d725a8 100644
32435 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
32436 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
32437 @@ -317,7 +317,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
32438 }
32439
32440 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
32441 - struct dp_train_func func = {
32442 + static struct dp_train_func func = {
32443 .link_set = nv50_sor_dp_link_set,
32444 .train_set = nv50_sor_dp_train_set,
32445 .train_adj = nv50_sor_dp_train_adj
32446 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
32447 index c402fca..f1d694b 100644
32448 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
32449 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
32450 @@ -1389,7 +1389,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
32451 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
32452
32453 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
32454 - struct dp_train_func func = {
32455 + static struct dp_train_func func = {
32456 .link_set = nvd0_sor_dp_link_set,
32457 .train_set = nvd0_sor_dp_train_set,
32458 .train_adj = nvd0_sor_dp_train_adj
32459 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
32460 index d4660cf..70dbe65 100644
32461 --- a/drivers/gpu/drm/r128/r128_cce.c
32462 +++ b/drivers/gpu/drm/r128/r128_cce.c
32463 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
32464
32465 /* GH: Simple idle check.
32466 */
32467 - atomic_set(&dev_priv->idle_count, 0);
32468 + atomic_set_unchecked(&dev_priv->idle_count, 0);
32469
32470 /* We don't support anything other than bus-mastering ring mode,
32471 * but the ring can be in either AGP or PCI space for the ring
32472 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
32473 index 930c71b..499aded 100644
32474 --- a/drivers/gpu/drm/r128/r128_drv.h
32475 +++ b/drivers/gpu/drm/r128/r128_drv.h
32476 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
32477 int is_pci;
32478 unsigned long cce_buffers_offset;
32479
32480 - atomic_t idle_count;
32481 + atomic_unchecked_t idle_count;
32482
32483 int page_flipping;
32484 int current_page;
32485 u32 crtc_offset;
32486 u32 crtc_offset_cntl;
32487
32488 - atomic_t vbl_received;
32489 + atomic_unchecked_t vbl_received;
32490
32491 u32 color_fmt;
32492 unsigned int front_offset;
32493 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
32494 index 2ea4f09..d391371 100644
32495 --- a/drivers/gpu/drm/r128/r128_irq.c
32496 +++ b/drivers/gpu/drm/r128/r128_irq.c
32497 @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
32498 if (crtc != 0)
32499 return 0;
32500
32501 - return atomic_read(&dev_priv->vbl_received);
32502 + return atomic_read_unchecked(&dev_priv->vbl_received);
32503 }
32504
32505 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
32506 @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
32507 /* VBLANK interrupt */
32508 if (status & R128_CRTC_VBLANK_INT) {
32509 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
32510 - atomic_inc(&dev_priv->vbl_received);
32511 + atomic_inc_unchecked(&dev_priv->vbl_received);
32512 drm_handle_vblank(dev, 0);
32513 return IRQ_HANDLED;
32514 }
32515 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
32516 index 19bb7e6..de7e2a2 100644
32517 --- a/drivers/gpu/drm/r128/r128_state.c
32518 +++ b/drivers/gpu/drm/r128/r128_state.c
32519 @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
32520
32521 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
32522 {
32523 - if (atomic_read(&dev_priv->idle_count) == 0)
32524 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
32525 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
32526 else
32527 - atomic_set(&dev_priv->idle_count, 0);
32528 + atomic_set_unchecked(&dev_priv->idle_count, 0);
32529 }
32530
32531 #endif
32532 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
32533 index 5a82b6b..9e69c73 100644
32534 --- a/drivers/gpu/drm/radeon/mkregtable.c
32535 +++ b/drivers/gpu/drm/radeon/mkregtable.c
32536 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
32537 regex_t mask_rex;
32538 regmatch_t match[4];
32539 char buf[1024];
32540 - size_t end;
32541 + long end;
32542 int len;
32543 int done = 0;
32544 int r;
32545 unsigned o;
32546 struct offset *offset;
32547 char last_reg_s[10];
32548 - int last_reg;
32549 + unsigned long last_reg;
32550
32551 if (regcomp
32552 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
32553 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
32554 index 8c42d54..5b6b963 100644
32555 --- a/drivers/gpu/drm/radeon/radeon.h
32556 +++ b/drivers/gpu/drm/radeon/radeon.h
32557 @@ -728,7 +728,7 @@ struct r600_blit_cp_primitives {
32558 int x2, int y2);
32559 void (*draw_auto)(struct radeon_device *rdev);
32560 void (*set_default_state)(struct radeon_device *rdev);
32561 -};
32562 +} __no_const;
32563
32564 struct r600_blit {
32565 struct radeon_bo *shader_obj;
32566 @@ -1248,7 +1248,7 @@ struct radeon_asic {
32567 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
32568 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
32569 } pflip;
32570 -};
32571 +} __no_const;
32572
32573 /*
32574 * Asic structures
32575 @@ -1590,7 +1590,7 @@ struct radeon_device {
32576 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
32577 bool audio_enabled;
32578 struct r600_audio audio_status; /* audio stuff */
32579 - struct notifier_block acpi_nb;
32580 + notifier_block_no_const acpi_nb;
32581 /* only one userspace can use Hyperz features or CMASK at a time */
32582 struct drm_file *hyperz_filp;
32583 struct drm_file *cmask_filp;
32584 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
32585 index e2f5f88..82f22da 100644
32586 --- a/drivers/gpu/drm/radeon/radeon_device.c
32587 +++ b/drivers/gpu/drm/radeon/radeon_device.c
32588 @@ -940,7 +940,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
32589 bool can_switch;
32590
32591 spin_lock(&dev->count_lock);
32592 - can_switch = (dev->open_count == 0);
32593 + can_switch = (local_read(&dev->open_count) == 0);
32594 spin_unlock(&dev->count_lock);
32595 return can_switch;
32596 }
32597 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
32598 index a1b59ca..86f2d44 100644
32599 --- a/drivers/gpu/drm/radeon/radeon_drv.h
32600 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
32601 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
32602
32603 /* SW interrupt */
32604 wait_queue_head_t swi_queue;
32605 - atomic_t swi_emitted;
32606 + atomic_unchecked_t swi_emitted;
32607 int vblank_crtc;
32608 uint32_t irq_enable_reg;
32609 uint32_t r500_disp_irq_reg;
32610 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
32611 index c180df8..cd80dd2d 100644
32612 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
32613 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
32614 @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
32615 request = compat_alloc_user_space(sizeof(*request));
32616 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
32617 || __put_user(req32.param, &request->param)
32618 - || __put_user((void __user *)(unsigned long)req32.value,
32619 + || __put_user((unsigned long)req32.value,
32620 &request->value))
32621 return -EFAULT;
32622
32623 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
32624 index e771033..a0bc6b3 100644
32625 --- a/drivers/gpu/drm/radeon/radeon_irq.c
32626 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
32627 @@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
32628 unsigned int ret;
32629 RING_LOCALS;
32630
32631 - atomic_inc(&dev_priv->swi_emitted);
32632 - ret = atomic_read(&dev_priv->swi_emitted);
32633 + atomic_inc_unchecked(&dev_priv->swi_emitted);
32634 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
32635
32636 BEGIN_RING(4);
32637 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
32638 @@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
32639 drm_radeon_private_t *dev_priv =
32640 (drm_radeon_private_t *) dev->dev_private;
32641
32642 - atomic_set(&dev_priv->swi_emitted, 0);
32643 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
32644 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
32645
32646 dev->max_vblank_count = 0x001fffff;
32647 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
32648 index 8e9057b..af6dacb 100644
32649 --- a/drivers/gpu/drm/radeon/radeon_state.c
32650 +++ b/drivers/gpu/drm/radeon/radeon_state.c
32651 @@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
32652 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
32653 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
32654
32655 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
32656 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
32657 sarea_priv->nbox * sizeof(depth_boxes[0])))
32658 return -EFAULT;
32659
32660 @@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
32661 {
32662 drm_radeon_private_t *dev_priv = dev->dev_private;
32663 drm_radeon_getparam_t *param = data;
32664 - int value;
32665 + int value = 0;
32666
32667 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
32668
32669 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
32670 index 5ebe1b3..1ed9426 100644
32671 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
32672 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
32673 @@ -822,8 +822,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
32674 }
32675 if (unlikely(ttm_vm_ops == NULL)) {
32676 ttm_vm_ops = vma->vm_ops;
32677 - radeon_ttm_vm_ops = *ttm_vm_ops;
32678 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
32679 + pax_open_kernel();
32680 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
32681 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
32682 + pax_close_kernel();
32683 }
32684 vma->vm_ops = &radeon_ttm_vm_ops;
32685 return 0;
32686 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
32687 index 5706d2a..17aedaa 100644
32688 --- a/drivers/gpu/drm/radeon/rs690.c
32689 +++ b/drivers/gpu/drm/radeon/rs690.c
32690 @@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
32691 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
32692 rdev->pm.sideport_bandwidth.full)
32693 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
32694 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
32695 + read_delay_latency.full = dfixed_const(800 * 1000);
32696 read_delay_latency.full = dfixed_div(read_delay_latency,
32697 rdev->pm.igp_sideport_mclk);
32698 + a.full = dfixed_const(370);
32699 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
32700 } else {
32701 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
32702 rdev->pm.k8_bandwidth.full)
32703 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
32704 index bd2a3b4..122d9ad 100644
32705 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
32706 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
32707 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
32708 static int ttm_pool_mm_shrink(struct shrinker *shrink,
32709 struct shrink_control *sc)
32710 {
32711 - static atomic_t start_pool = ATOMIC_INIT(0);
32712 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
32713 unsigned i;
32714 - unsigned pool_offset = atomic_add_return(1, &start_pool);
32715 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
32716 struct ttm_page_pool *pool;
32717 int shrink_pages = sc->nr_to_scan;
32718
32719 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
32720 index 893a650..6190d3b 100644
32721 --- a/drivers/gpu/drm/via/via_drv.h
32722 +++ b/drivers/gpu/drm/via/via_drv.h
32723 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
32724 typedef uint32_t maskarray_t[5];
32725
32726 typedef struct drm_via_irq {
32727 - atomic_t irq_received;
32728 + atomic_unchecked_t irq_received;
32729 uint32_t pending_mask;
32730 uint32_t enable_mask;
32731 wait_queue_head_t irq_queue;
32732 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
32733 struct timeval last_vblank;
32734 int last_vblank_valid;
32735 unsigned usec_per_vblank;
32736 - atomic_t vbl_received;
32737 + atomic_unchecked_t vbl_received;
32738 drm_via_state_t hc_state;
32739 char pci_buf[VIA_PCI_BUF_SIZE];
32740 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
32741 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
32742 index ac98964..5dbf512 100644
32743 --- a/drivers/gpu/drm/via/via_irq.c
32744 +++ b/drivers/gpu/drm/via/via_irq.c
32745 @@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
32746 if (crtc != 0)
32747 return 0;
32748
32749 - return atomic_read(&dev_priv->vbl_received);
32750 + return atomic_read_unchecked(&dev_priv->vbl_received);
32751 }
32752
32753 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
32754 @@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
32755
32756 status = VIA_READ(VIA_REG_INTERRUPT);
32757 if (status & VIA_IRQ_VBLANK_PENDING) {
32758 - atomic_inc(&dev_priv->vbl_received);
32759 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
32760 + atomic_inc_unchecked(&dev_priv->vbl_received);
32761 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
32762 do_gettimeofday(&cur_vblank);
32763 if (dev_priv->last_vblank_valid) {
32764 dev_priv->usec_per_vblank =
32765 @@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
32766 dev_priv->last_vblank = cur_vblank;
32767 dev_priv->last_vblank_valid = 1;
32768 }
32769 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
32770 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
32771 DRM_DEBUG("US per vblank is: %u\n",
32772 dev_priv->usec_per_vblank);
32773 }
32774 @@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
32775
32776 for (i = 0; i < dev_priv->num_irqs; ++i) {
32777 if (status & cur_irq->pending_mask) {
32778 - atomic_inc(&cur_irq->irq_received);
32779 + atomic_inc_unchecked(&cur_irq->irq_received);
32780 DRM_WAKEUP(&cur_irq->irq_queue);
32781 handled = 1;
32782 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
32783 @@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
32784 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
32785 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
32786 masks[irq][4]));
32787 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
32788 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
32789 } else {
32790 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
32791 (((cur_irq_sequence =
32792 - atomic_read(&cur_irq->irq_received)) -
32793 + atomic_read_unchecked(&cur_irq->irq_received)) -
32794 *sequence) <= (1 << 23)));
32795 }
32796 *sequence = cur_irq_sequence;
32797 @@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
32798 }
32799
32800 for (i = 0; i < dev_priv->num_irqs; ++i) {
32801 - atomic_set(&cur_irq->irq_received, 0);
32802 + atomic_set_unchecked(&cur_irq->irq_received, 0);
32803 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
32804 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
32805 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
32806 @@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
32807 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
32808 case VIA_IRQ_RELATIVE:
32809 irqwait->request.sequence +=
32810 - atomic_read(&cur_irq->irq_received);
32811 + atomic_read_unchecked(&cur_irq->irq_received);
32812 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
32813 case VIA_IRQ_ABSOLUTE:
32814 break;
32815 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
32816 index 88a179e..cf13317 100644
32817 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
32818 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
32819 @@ -263,7 +263,7 @@ struct vmw_private {
32820 * Fencing and IRQs.
32821 */
32822
32823 - atomic_t marker_seq;
32824 + atomic_unchecked_t marker_seq;
32825 wait_queue_head_t fence_queue;
32826 wait_queue_head_t fifo_queue;
32827 int fence_queue_waiters; /* Protected by hw_mutex */
32828 @@ -306,7 +306,7 @@ struct vmw_private {
32829
32830 struct vmw_master *active_master;
32831 struct vmw_master fbdev_master;
32832 - struct notifier_block pm_nb;
32833 + notifier_block_no_const pm_nb;
32834 bool suspended;
32835
32836 struct mutex release_mutex;
32837 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
32838 index 3eb1486..0a47ee9 100644
32839 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
32840 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
32841 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
32842 (unsigned int) min,
32843 (unsigned int) fifo->capabilities);
32844
32845 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
32846 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
32847 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
32848 vmw_marker_queue_init(&fifo->marker_queue);
32849 return vmw_fifo_send_fence(dev_priv, &dummy);
32850 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
32851 if (reserveable)
32852 iowrite32(bytes, fifo_mem +
32853 SVGA_FIFO_RESERVED);
32854 - return fifo_mem + (next_cmd >> 2);
32855 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
32856 } else {
32857 need_bounce = true;
32858 }
32859 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
32860
32861 fm = vmw_fifo_reserve(dev_priv, bytes);
32862 if (unlikely(fm == NULL)) {
32863 - *seqno = atomic_read(&dev_priv->marker_seq);
32864 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
32865 ret = -ENOMEM;
32866 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
32867 false, 3*HZ);
32868 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
32869 }
32870
32871 do {
32872 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
32873 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
32874 } while (*seqno == 0);
32875
32876 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
32877 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32878 index 4640adb..e1384ed 100644
32879 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32880 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32881 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
32882 * emitted. Then the fence is stale and signaled.
32883 */
32884
32885 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
32886 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
32887 > VMW_FENCE_WRAP);
32888
32889 return ret;
32890 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
32891
32892 if (fifo_idle)
32893 down_read(&fifo_state->rwsem);
32894 - signal_seq = atomic_read(&dev_priv->marker_seq);
32895 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
32896 ret = 0;
32897
32898 for (;;) {
32899 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
32900 index 8a8725c..afed796 100644
32901 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
32902 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
32903 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
32904 while (!vmw_lag_lt(queue, us)) {
32905 spin_lock(&queue->lock);
32906 if (list_empty(&queue->head))
32907 - seqno = atomic_read(&dev_priv->marker_seq);
32908 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
32909 else {
32910 marker = list_first_entry(&queue->head,
32911 struct vmw_marker, head);
32912 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
32913 index f4109fd..7c62889 100644
32914 --- a/drivers/hid/hid-core.c
32915 +++ b/drivers/hid/hid-core.c
32916 @@ -2200,7 +2200,7 @@ static bool hid_ignore(struct hid_device *hdev)
32917
32918 int hid_add_device(struct hid_device *hdev)
32919 {
32920 - static atomic_t id = ATOMIC_INIT(0);
32921 + static atomic_unchecked_t id = ATOMIC_INIT(0);
32922 int ret;
32923
32924 if (WARN_ON(hdev->status & HID_STAT_ADDED))
32925 @@ -2235,7 +2235,7 @@ int hid_add_device(struct hid_device *hdev)
32926 /* XXX hack, any other cleaner solution after the driver core
32927 * is converted to allow more than 20 bytes as the device name? */
32928 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
32929 - hdev->vendor, hdev->product, atomic_inc_return(&id));
32930 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
32931
32932 hid_debug_register(hdev, dev_name(&hdev->dev));
32933 ret = device_add(&hdev->dev);
32934 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
32935 index eec3291..8ed706b 100644
32936 --- a/drivers/hid/hid-wiimote-debug.c
32937 +++ b/drivers/hid/hid-wiimote-debug.c
32938 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
32939 else if (size == 0)
32940 return -EIO;
32941
32942 - if (copy_to_user(u, buf, size))
32943 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
32944 return -EFAULT;
32945
32946 *off += size;
32947 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
32948 index 14599e2..711c965 100644
32949 --- a/drivers/hid/usbhid/hiddev.c
32950 +++ b/drivers/hid/usbhid/hiddev.c
32951 @@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32952 break;
32953
32954 case HIDIOCAPPLICATION:
32955 - if (arg < 0 || arg >= hid->maxapplication)
32956 + if (arg >= hid->maxapplication)
32957 break;
32958
32959 for (i = 0; i < hid->maxcollection; i++)
32960 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
32961 index f4c3d28..82f45a9 100644
32962 --- a/drivers/hv/channel.c
32963 +++ b/drivers/hv/channel.c
32964 @@ -402,8 +402,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
32965 int ret = 0;
32966 int t;
32967
32968 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32969 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32970 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32971 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32972
32973 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32974 if (ret)
32975 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
32976 index 3648f8f..30ef30d 100644
32977 --- a/drivers/hv/hv.c
32978 +++ b/drivers/hv/hv.c
32979 @@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
32980 u64 output_address = (output) ? virt_to_phys(output) : 0;
32981 u32 output_address_hi = output_address >> 32;
32982 u32 output_address_lo = output_address & 0xFFFFFFFF;
32983 - void *hypercall_page = hv_context.hypercall_page;
32984 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32985
32986 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32987 "=a"(hv_status_lo) : "d" (control_hi),
32988 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
32989 index d8d1fad..b91caf7 100644
32990 --- a/drivers/hv/hyperv_vmbus.h
32991 +++ b/drivers/hv/hyperv_vmbus.h
32992 @@ -594,7 +594,7 @@ enum vmbus_connect_state {
32993 struct vmbus_connection {
32994 enum vmbus_connect_state conn_state;
32995
32996 - atomic_t next_gpadl_handle;
32997 + atomic_unchecked_t next_gpadl_handle;
32998
32999 /*
33000 * Represents channel interrupts. Each bit position represents a
33001 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
33002 index 8e1a9ec..4687821 100644
33003 --- a/drivers/hv/vmbus_drv.c
33004 +++ b/drivers/hv/vmbus_drv.c
33005 @@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
33006 {
33007 int ret = 0;
33008
33009 - static atomic_t device_num = ATOMIC_INIT(0);
33010 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
33011
33012 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
33013 - atomic_inc_return(&device_num));
33014 + atomic_inc_return_unchecked(&device_num));
33015
33016 child_device_obj->device.bus = &hv_bus;
33017 child_device_obj->device.parent = &hv_acpi_dev->dev;
33018 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33019 index 07a0c1a..9fa531f 100644
33020 --- a/drivers/hwmon/sht15.c
33021 +++ b/drivers/hwmon/sht15.c
33022 @@ -165,11 +165,11 @@ struct sht15_data {
33023 struct device *dev;
33024 struct device *hwmon_dev;
33025 struct regulator *reg;
33026 - struct notifier_block nb;
33027 + notifier_block_no_const nb;
33028 int supply_uV;
33029 bool supply_uV_valid;
33030 struct work_struct update_supply_work;
33031 - atomic_t interrupt_handled;
33032 + atomic_unchecked_t interrupt_handled;
33033 };
33034
33035 /**
33036 @@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
33037 return ret;
33038
33039 gpio_direction_input(data->pdata->gpio_data);
33040 - atomic_set(&data->interrupt_handled, 0);
33041 + atomic_set_unchecked(&data->interrupt_handled, 0);
33042
33043 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33044 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33045 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33046 /* Only relevant if the interrupt hasn't occurred. */
33047 - if (!atomic_read(&data->interrupt_handled))
33048 + if (!atomic_read_unchecked(&data->interrupt_handled))
33049 schedule_work(&data->read_work);
33050 }
33051 ret = wait_event_timeout(data->wait_queue,
33052 @@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33053
33054 /* First disable the interrupt */
33055 disable_irq_nosync(irq);
33056 - atomic_inc(&data->interrupt_handled);
33057 + atomic_inc_unchecked(&data->interrupt_handled);
33058 /* Then schedule a reading work struct */
33059 if (data->state != SHT15_READING_NOTHING)
33060 schedule_work(&data->read_work);
33061 @@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33062 * If not, then start the interrupt again - care here as could
33063 * have gone low in meantime so verify it hasn't!
33064 */
33065 - atomic_set(&data->interrupt_handled, 0);
33066 + atomic_set_unchecked(&data->interrupt_handled, 0);
33067 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33068 /* If still not occurred or another handler was scheduled */
33069 if (gpio_get_value(data->pdata->gpio_data)
33070 - || atomic_read(&data->interrupt_handled))
33071 + || atomic_read_unchecked(&data->interrupt_handled))
33072 return;
33073 }
33074
33075 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33076 index 378fcb5..5e91fa8 100644
33077 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
33078 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33079 @@ -43,7 +43,7 @@
33080 extern struct i2c_adapter amd756_smbus;
33081
33082 static struct i2c_adapter *s4882_adapter;
33083 -static struct i2c_algorithm *s4882_algo;
33084 +static i2c_algorithm_no_const *s4882_algo;
33085
33086 /* Wrapper access functions for multiplexed SMBus */
33087 static DEFINE_MUTEX(amd756_lock);
33088 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33089 index 29015eb..af2d8e9 100644
33090 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33091 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33092 @@ -41,7 +41,7 @@
33093 extern struct i2c_adapter *nforce2_smbus;
33094
33095 static struct i2c_adapter *s4985_adapter;
33096 -static struct i2c_algorithm *s4985_algo;
33097 +static i2c_algorithm_no_const *s4985_algo;
33098
33099 /* Wrapper access functions for multiplexed SMBus */
33100 static DEFINE_MUTEX(nforce2_lock);
33101 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
33102 index d94e0ce..7055125 100644
33103 --- a/drivers/i2c/i2c-mux.c
33104 +++ b/drivers/i2c/i2c-mux.c
33105 @@ -30,7 +30,7 @@
33106 /* multiplexer per channel data */
33107 struct i2c_mux_priv {
33108 struct i2c_adapter adap;
33109 - struct i2c_algorithm algo;
33110 + i2c_algorithm_no_const algo;
33111
33112 struct i2c_adapter *parent;
33113 void *mux_priv; /* the mux chip/device */
33114 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
33115 index 8126824..55a2798 100644
33116 --- a/drivers/ide/ide-cd.c
33117 +++ b/drivers/ide/ide-cd.c
33118 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
33119 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
33120 if ((unsigned long)buf & alignment
33121 || blk_rq_bytes(rq) & q->dma_pad_mask
33122 - || object_is_on_stack(buf))
33123 + || object_starts_on_stack(buf))
33124 drive->dma = 0;
33125 }
33126 }
33127 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
33128 index 394fea2..c833880 100644
33129 --- a/drivers/infiniband/core/cm.c
33130 +++ b/drivers/infiniband/core/cm.c
33131 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
33132
33133 struct cm_counter_group {
33134 struct kobject obj;
33135 - atomic_long_t counter[CM_ATTR_COUNT];
33136 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
33137 };
33138
33139 struct cm_counter_attribute {
33140 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
33141 struct ib_mad_send_buf *msg = NULL;
33142 int ret;
33143
33144 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33145 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33146 counter[CM_REQ_COUNTER]);
33147
33148 /* Quick state check to discard duplicate REQs. */
33149 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
33150 if (!cm_id_priv)
33151 return;
33152
33153 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33154 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33155 counter[CM_REP_COUNTER]);
33156 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
33157 if (ret)
33158 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
33159 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
33160 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
33161 spin_unlock_irq(&cm_id_priv->lock);
33162 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33163 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33164 counter[CM_RTU_COUNTER]);
33165 goto out;
33166 }
33167 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
33168 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
33169 dreq_msg->local_comm_id);
33170 if (!cm_id_priv) {
33171 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33172 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33173 counter[CM_DREQ_COUNTER]);
33174 cm_issue_drep(work->port, work->mad_recv_wc);
33175 return -EINVAL;
33176 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
33177 case IB_CM_MRA_REP_RCVD:
33178 break;
33179 case IB_CM_TIMEWAIT:
33180 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33181 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33182 counter[CM_DREQ_COUNTER]);
33183 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33184 goto unlock;
33185 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
33186 cm_free_msg(msg);
33187 goto deref;
33188 case IB_CM_DREQ_RCVD:
33189 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33190 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33191 counter[CM_DREQ_COUNTER]);
33192 goto unlock;
33193 default:
33194 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
33195 ib_modify_mad(cm_id_priv->av.port->mad_agent,
33196 cm_id_priv->msg, timeout)) {
33197 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
33198 - atomic_long_inc(&work->port->
33199 + atomic_long_inc_unchecked(&work->port->
33200 counter_group[CM_RECV_DUPLICATES].
33201 counter[CM_MRA_COUNTER]);
33202 goto out;
33203 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
33204 break;
33205 case IB_CM_MRA_REQ_RCVD:
33206 case IB_CM_MRA_REP_RCVD:
33207 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33208 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33209 counter[CM_MRA_COUNTER]);
33210 /* fall through */
33211 default:
33212 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
33213 case IB_CM_LAP_IDLE:
33214 break;
33215 case IB_CM_MRA_LAP_SENT:
33216 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33217 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33218 counter[CM_LAP_COUNTER]);
33219 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33220 goto unlock;
33221 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
33222 cm_free_msg(msg);
33223 goto deref;
33224 case IB_CM_LAP_RCVD:
33225 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33226 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33227 counter[CM_LAP_COUNTER]);
33228 goto unlock;
33229 default:
33230 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
33231 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
33232 if (cur_cm_id_priv) {
33233 spin_unlock_irq(&cm.lock);
33234 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33235 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33236 counter[CM_SIDR_REQ_COUNTER]);
33237 goto out; /* Duplicate message. */
33238 }
33239 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
33240 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
33241 msg->retries = 1;
33242
33243 - atomic_long_add(1 + msg->retries,
33244 + atomic_long_add_unchecked(1 + msg->retries,
33245 &port->counter_group[CM_XMIT].counter[attr_index]);
33246 if (msg->retries)
33247 - atomic_long_add(msg->retries,
33248 + atomic_long_add_unchecked(msg->retries,
33249 &port->counter_group[CM_XMIT_RETRIES].
33250 counter[attr_index]);
33251
33252 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
33253 }
33254
33255 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
33256 - atomic_long_inc(&port->counter_group[CM_RECV].
33257 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
33258 counter[attr_id - CM_ATTR_ID_OFFSET]);
33259
33260 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
33261 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
33262 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
33263
33264 return sprintf(buf, "%ld\n",
33265 - atomic_long_read(&group->counter[cm_attr->index]));
33266 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
33267 }
33268
33269 static const struct sysfs_ops cm_counter_ops = {
33270 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
33271 index 176c8f9..2627b62 100644
33272 --- a/drivers/infiniband/core/fmr_pool.c
33273 +++ b/drivers/infiniband/core/fmr_pool.c
33274 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
33275
33276 struct task_struct *thread;
33277
33278 - atomic_t req_ser;
33279 - atomic_t flush_ser;
33280 + atomic_unchecked_t req_ser;
33281 + atomic_unchecked_t flush_ser;
33282
33283 wait_queue_head_t force_wait;
33284 };
33285 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33286 struct ib_fmr_pool *pool = pool_ptr;
33287
33288 do {
33289 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
33290 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
33291 ib_fmr_batch_release(pool);
33292
33293 - atomic_inc(&pool->flush_ser);
33294 + atomic_inc_unchecked(&pool->flush_ser);
33295 wake_up_interruptible(&pool->force_wait);
33296
33297 if (pool->flush_function)
33298 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33299 }
33300
33301 set_current_state(TASK_INTERRUPTIBLE);
33302 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
33303 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
33304 !kthread_should_stop())
33305 schedule();
33306 __set_current_state(TASK_RUNNING);
33307 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
33308 pool->dirty_watermark = params->dirty_watermark;
33309 pool->dirty_len = 0;
33310 spin_lock_init(&pool->pool_lock);
33311 - atomic_set(&pool->req_ser, 0);
33312 - atomic_set(&pool->flush_ser, 0);
33313 + atomic_set_unchecked(&pool->req_ser, 0);
33314 + atomic_set_unchecked(&pool->flush_ser, 0);
33315 init_waitqueue_head(&pool->force_wait);
33316
33317 pool->thread = kthread_run(ib_fmr_cleanup_thread,
33318 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
33319 }
33320 spin_unlock_irq(&pool->pool_lock);
33321
33322 - serial = atomic_inc_return(&pool->req_ser);
33323 + serial = atomic_inc_return_unchecked(&pool->req_ser);
33324 wake_up_process(pool->thread);
33325
33326 if (wait_event_interruptible(pool->force_wait,
33327 - atomic_read(&pool->flush_ser) - serial >= 0))
33328 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
33329 return -EINTR;
33330
33331 return 0;
33332 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
33333 } else {
33334 list_add_tail(&fmr->list, &pool->dirty_list);
33335 if (++pool->dirty_len >= pool->dirty_watermark) {
33336 - atomic_inc(&pool->req_ser);
33337 + atomic_inc_unchecked(&pool->req_ser);
33338 wake_up_process(pool->thread);
33339 }
33340 }
33341 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
33342 index afd8179..598063f 100644
33343 --- a/drivers/infiniband/hw/cxgb4/mem.c
33344 +++ b/drivers/infiniband/hw/cxgb4/mem.c
33345 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33346 int err;
33347 struct fw_ri_tpte tpt;
33348 u32 stag_idx;
33349 - static atomic_t key;
33350 + static atomic_unchecked_t key;
33351
33352 if (c4iw_fatal_error(rdev))
33353 return -EIO;
33354 @@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33355 if (rdev->stats.stag.cur > rdev->stats.stag.max)
33356 rdev->stats.stag.max = rdev->stats.stag.cur;
33357 mutex_unlock(&rdev->stats.lock);
33358 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
33359 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
33360 }
33361 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
33362 __func__, stag_state, type, pdid, stag_idx);
33363 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
33364 index 79b3dbc..96e5fcc 100644
33365 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
33366 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
33367 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
33368 struct ib_atomic_eth *ateth;
33369 struct ipath_ack_entry *e;
33370 u64 vaddr;
33371 - atomic64_t *maddr;
33372 + atomic64_unchecked_t *maddr;
33373 u64 sdata;
33374 u32 rkey;
33375 u8 next;
33376 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
33377 IB_ACCESS_REMOTE_ATOMIC)))
33378 goto nack_acc_unlck;
33379 /* Perform atomic OP and save result. */
33380 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
33381 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
33382 sdata = be64_to_cpu(ateth->swap_data);
33383 e = &qp->s_ack_queue[qp->r_head_ack_queue];
33384 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
33385 - (u64) atomic64_add_return(sdata, maddr) - sdata :
33386 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
33387 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
33388 be64_to_cpu(ateth->compare_data),
33389 sdata);
33390 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
33391 index 1f95bba..9530f87 100644
33392 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
33393 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
33394 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
33395 unsigned long flags;
33396 struct ib_wc wc;
33397 u64 sdata;
33398 - atomic64_t *maddr;
33399 + atomic64_unchecked_t *maddr;
33400 enum ib_wc_status send_status;
33401
33402 /*
33403 @@ -382,11 +382,11 @@ again:
33404 IB_ACCESS_REMOTE_ATOMIC)))
33405 goto acc_err;
33406 /* Perform atomic OP and save result. */
33407 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
33408 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
33409 sdata = wqe->wr.wr.atomic.compare_add;
33410 *(u64 *) sqp->s_sge.sge.vaddr =
33411 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
33412 - (u64) atomic64_add_return(sdata, maddr) - sdata :
33413 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
33414 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
33415 sdata, wqe->wr.wr.atomic.swap);
33416 goto send_comp;
33417 diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
33418 index e04cbc9..8c247a7 100644
33419 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
33420 +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
33421 @@ -416,7 +416,7 @@ struct mlx4_ib_sriov {
33422 struct mlx4_ib_iboe {
33423 spinlock_t lock;
33424 struct net_device *netdevs[MLX4_MAX_PORTS];
33425 - struct notifier_block nb;
33426 + notifier_block_no_const nb;
33427 union ib_gid gid_table[MLX4_MAX_PORTS][128];
33428 };
33429
33430 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
33431 index 748db2d..5f75cc3 100644
33432 --- a/drivers/infiniband/hw/nes/nes.c
33433 +++ b/drivers/infiniband/hw/nes/nes.c
33434 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
33435 LIST_HEAD(nes_adapter_list);
33436 static LIST_HEAD(nes_dev_list);
33437
33438 -atomic_t qps_destroyed;
33439 +atomic_unchecked_t qps_destroyed;
33440
33441 static unsigned int ee_flsh_adapter;
33442 static unsigned int sysfs_nonidx_addr;
33443 @@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
33444 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
33445 struct nes_adapter *nesadapter = nesdev->nesadapter;
33446
33447 - atomic_inc(&qps_destroyed);
33448 + atomic_inc_unchecked(&qps_destroyed);
33449
33450 /* Free the control structures */
33451
33452 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
33453 index 5cac29e..c471744 100644
33454 --- a/drivers/infiniband/hw/nes/nes.h
33455 +++ b/drivers/infiniband/hw/nes/nes.h
33456 @@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
33457 extern unsigned int wqm_quanta;
33458 extern struct list_head nes_adapter_list;
33459
33460 -extern atomic_t cm_connects;
33461 -extern atomic_t cm_accepts;
33462 -extern atomic_t cm_disconnects;
33463 -extern atomic_t cm_closes;
33464 -extern atomic_t cm_connecteds;
33465 -extern atomic_t cm_connect_reqs;
33466 -extern atomic_t cm_rejects;
33467 -extern atomic_t mod_qp_timouts;
33468 -extern atomic_t qps_created;
33469 -extern atomic_t qps_destroyed;
33470 -extern atomic_t sw_qps_destroyed;
33471 +extern atomic_unchecked_t cm_connects;
33472 +extern atomic_unchecked_t cm_accepts;
33473 +extern atomic_unchecked_t cm_disconnects;
33474 +extern atomic_unchecked_t cm_closes;
33475 +extern atomic_unchecked_t cm_connecteds;
33476 +extern atomic_unchecked_t cm_connect_reqs;
33477 +extern atomic_unchecked_t cm_rejects;
33478 +extern atomic_unchecked_t mod_qp_timouts;
33479 +extern atomic_unchecked_t qps_created;
33480 +extern atomic_unchecked_t qps_destroyed;
33481 +extern atomic_unchecked_t sw_qps_destroyed;
33482 extern u32 mh_detected;
33483 extern u32 mh_pauses_sent;
33484 extern u32 cm_packets_sent;
33485 @@ -196,16 +196,16 @@ extern u32 cm_packets_created;
33486 extern u32 cm_packets_received;
33487 extern u32 cm_packets_dropped;
33488 extern u32 cm_packets_retrans;
33489 -extern atomic_t cm_listens_created;
33490 -extern atomic_t cm_listens_destroyed;
33491 +extern atomic_unchecked_t cm_listens_created;
33492 +extern atomic_unchecked_t cm_listens_destroyed;
33493 extern u32 cm_backlog_drops;
33494 -extern atomic_t cm_loopbacks;
33495 -extern atomic_t cm_nodes_created;
33496 -extern atomic_t cm_nodes_destroyed;
33497 -extern atomic_t cm_accel_dropped_pkts;
33498 -extern atomic_t cm_resets_recvd;
33499 -extern atomic_t pau_qps_created;
33500 -extern atomic_t pau_qps_destroyed;
33501 +extern atomic_unchecked_t cm_loopbacks;
33502 +extern atomic_unchecked_t cm_nodes_created;
33503 +extern atomic_unchecked_t cm_nodes_destroyed;
33504 +extern atomic_unchecked_t cm_accel_dropped_pkts;
33505 +extern atomic_unchecked_t cm_resets_recvd;
33506 +extern atomic_unchecked_t pau_qps_created;
33507 +extern atomic_unchecked_t pau_qps_destroyed;
33508
33509 extern u32 int_mod_timer_init;
33510 extern u32 int_mod_cq_depth_256;
33511 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
33512 index cfaacaf..fa0722e 100644
33513 --- a/drivers/infiniband/hw/nes/nes_cm.c
33514 +++ b/drivers/infiniband/hw/nes/nes_cm.c
33515 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
33516 u32 cm_packets_retrans;
33517 u32 cm_packets_created;
33518 u32 cm_packets_received;
33519 -atomic_t cm_listens_created;
33520 -atomic_t cm_listens_destroyed;
33521 +atomic_unchecked_t cm_listens_created;
33522 +atomic_unchecked_t cm_listens_destroyed;
33523 u32 cm_backlog_drops;
33524 -atomic_t cm_loopbacks;
33525 -atomic_t cm_nodes_created;
33526 -atomic_t cm_nodes_destroyed;
33527 -atomic_t cm_accel_dropped_pkts;
33528 -atomic_t cm_resets_recvd;
33529 +atomic_unchecked_t cm_loopbacks;
33530 +atomic_unchecked_t cm_nodes_created;
33531 +atomic_unchecked_t cm_nodes_destroyed;
33532 +atomic_unchecked_t cm_accel_dropped_pkts;
33533 +atomic_unchecked_t cm_resets_recvd;
33534
33535 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
33536 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
33537 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
33538
33539 static struct nes_cm_core *g_cm_core;
33540
33541 -atomic_t cm_connects;
33542 -atomic_t cm_accepts;
33543 -atomic_t cm_disconnects;
33544 -atomic_t cm_closes;
33545 -atomic_t cm_connecteds;
33546 -atomic_t cm_connect_reqs;
33547 -atomic_t cm_rejects;
33548 +atomic_unchecked_t cm_connects;
33549 +atomic_unchecked_t cm_accepts;
33550 +atomic_unchecked_t cm_disconnects;
33551 +atomic_unchecked_t cm_closes;
33552 +atomic_unchecked_t cm_connecteds;
33553 +atomic_unchecked_t cm_connect_reqs;
33554 +atomic_unchecked_t cm_rejects;
33555
33556 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
33557 {
33558 @@ -1281,7 +1281,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
33559 kfree(listener);
33560 listener = NULL;
33561 ret = 0;
33562 - atomic_inc(&cm_listens_destroyed);
33563 + atomic_inc_unchecked(&cm_listens_destroyed);
33564 } else {
33565 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
33566 }
33567 @@ -1480,7 +1480,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
33568 cm_node->rem_mac);
33569
33570 add_hte_node(cm_core, cm_node);
33571 - atomic_inc(&cm_nodes_created);
33572 + atomic_inc_unchecked(&cm_nodes_created);
33573
33574 return cm_node;
33575 }
33576 @@ -1538,7 +1538,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
33577 }
33578
33579 atomic_dec(&cm_core->node_cnt);
33580 - atomic_inc(&cm_nodes_destroyed);
33581 + atomic_inc_unchecked(&cm_nodes_destroyed);
33582 nesqp = cm_node->nesqp;
33583 if (nesqp) {
33584 nesqp->cm_node = NULL;
33585 @@ -1602,7 +1602,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
33586
33587 static void drop_packet(struct sk_buff *skb)
33588 {
33589 - atomic_inc(&cm_accel_dropped_pkts);
33590 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
33591 dev_kfree_skb_any(skb);
33592 }
33593
33594 @@ -1665,7 +1665,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
33595 {
33596
33597 int reset = 0; /* whether to send reset in case of err.. */
33598 - atomic_inc(&cm_resets_recvd);
33599 + atomic_inc_unchecked(&cm_resets_recvd);
33600 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
33601 " refcnt=%d\n", cm_node, cm_node->state,
33602 atomic_read(&cm_node->ref_count));
33603 @@ -2306,7 +2306,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
33604 rem_ref_cm_node(cm_node->cm_core, cm_node);
33605 return NULL;
33606 }
33607 - atomic_inc(&cm_loopbacks);
33608 + atomic_inc_unchecked(&cm_loopbacks);
33609 loopbackremotenode->loopbackpartner = cm_node;
33610 loopbackremotenode->tcp_cntxt.rcv_wscale =
33611 NES_CM_DEFAULT_RCV_WND_SCALE;
33612 @@ -2581,7 +2581,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
33613 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
33614 else {
33615 rem_ref_cm_node(cm_core, cm_node);
33616 - atomic_inc(&cm_accel_dropped_pkts);
33617 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
33618 dev_kfree_skb_any(skb);
33619 }
33620 break;
33621 @@ -2889,7 +2889,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
33622
33623 if ((cm_id) && (cm_id->event_handler)) {
33624 if (issue_disconn) {
33625 - atomic_inc(&cm_disconnects);
33626 + atomic_inc_unchecked(&cm_disconnects);
33627 cm_event.event = IW_CM_EVENT_DISCONNECT;
33628 cm_event.status = disconn_status;
33629 cm_event.local_addr = cm_id->local_addr;
33630 @@ -2911,7 +2911,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
33631 }
33632
33633 if (issue_close) {
33634 - atomic_inc(&cm_closes);
33635 + atomic_inc_unchecked(&cm_closes);
33636 nes_disconnect(nesqp, 1);
33637
33638 cm_id->provider_data = nesqp;
33639 @@ -3047,7 +3047,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
33640
33641 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
33642 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
33643 - atomic_inc(&cm_accepts);
33644 + atomic_inc_unchecked(&cm_accepts);
33645
33646 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
33647 netdev_refcnt_read(nesvnic->netdev));
33648 @@ -3242,7 +3242,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
33649 struct nes_cm_core *cm_core;
33650 u8 *start_buff;
33651
33652 - atomic_inc(&cm_rejects);
33653 + atomic_inc_unchecked(&cm_rejects);
33654 cm_node = (struct nes_cm_node *)cm_id->provider_data;
33655 loopback = cm_node->loopbackpartner;
33656 cm_core = cm_node->cm_core;
33657 @@ -3302,7 +3302,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
33658 ntohl(cm_id->local_addr.sin_addr.s_addr),
33659 ntohs(cm_id->local_addr.sin_port));
33660
33661 - atomic_inc(&cm_connects);
33662 + atomic_inc_unchecked(&cm_connects);
33663 nesqp->active_conn = 1;
33664
33665 /* cache the cm_id in the qp */
33666 @@ -3412,7 +3412,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
33667 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
33668 return err;
33669 }
33670 - atomic_inc(&cm_listens_created);
33671 + atomic_inc_unchecked(&cm_listens_created);
33672 }
33673
33674 cm_id->add_ref(cm_id);
33675 @@ -3513,7 +3513,7 @@ static void cm_event_connected(struct nes_cm_event *event)
33676
33677 if (nesqp->destroyed)
33678 return;
33679 - atomic_inc(&cm_connecteds);
33680 + atomic_inc_unchecked(&cm_connecteds);
33681 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
33682 " local port 0x%04X. jiffies = %lu.\n",
33683 nesqp->hwqp.qp_id,
33684 @@ -3693,7 +3693,7 @@ static void cm_event_reset(struct nes_cm_event *event)
33685
33686 cm_id->add_ref(cm_id);
33687 ret = cm_id->event_handler(cm_id, &cm_event);
33688 - atomic_inc(&cm_closes);
33689 + atomic_inc_unchecked(&cm_closes);
33690 cm_event.event = IW_CM_EVENT_CLOSE;
33691 cm_event.status = 0;
33692 cm_event.provider_data = cm_id->provider_data;
33693 @@ -3729,7 +3729,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
33694 return;
33695 cm_id = cm_node->cm_id;
33696
33697 - atomic_inc(&cm_connect_reqs);
33698 + atomic_inc_unchecked(&cm_connect_reqs);
33699 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
33700 cm_node, cm_id, jiffies);
33701
33702 @@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
33703 return;
33704 cm_id = cm_node->cm_id;
33705
33706 - atomic_inc(&cm_connect_reqs);
33707 + atomic_inc_unchecked(&cm_connect_reqs);
33708 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
33709 cm_node, cm_id, jiffies);
33710
33711 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
33712 index 3ba7be3..c81f6ff 100644
33713 --- a/drivers/infiniband/hw/nes/nes_mgt.c
33714 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
33715 @@ -40,8 +40,8 @@
33716 #include "nes.h"
33717 #include "nes_mgt.h"
33718
33719 -atomic_t pau_qps_created;
33720 -atomic_t pau_qps_destroyed;
33721 +atomic_unchecked_t pau_qps_created;
33722 +atomic_unchecked_t pau_qps_destroyed;
33723
33724 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
33725 {
33726 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
33727 {
33728 struct sk_buff *skb;
33729 unsigned long flags;
33730 - atomic_inc(&pau_qps_destroyed);
33731 + atomic_inc_unchecked(&pau_qps_destroyed);
33732
33733 /* Free packets that have not yet been forwarded */
33734 /* Lock is acquired by skb_dequeue when removing the skb */
33735 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
33736 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
33737 skb_queue_head_init(&nesqp->pau_list);
33738 spin_lock_init(&nesqp->pau_lock);
33739 - atomic_inc(&pau_qps_created);
33740 + atomic_inc_unchecked(&pau_qps_created);
33741 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
33742 }
33743
33744 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
33745 index 0564be7..f68b0f1 100644
33746 --- a/drivers/infiniband/hw/nes/nes_nic.c
33747 +++ b/drivers/infiniband/hw/nes/nes_nic.c
33748 @@ -1272,39 +1272,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
33749 target_stat_values[++index] = mh_detected;
33750 target_stat_values[++index] = mh_pauses_sent;
33751 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
33752 - target_stat_values[++index] = atomic_read(&cm_connects);
33753 - target_stat_values[++index] = atomic_read(&cm_accepts);
33754 - target_stat_values[++index] = atomic_read(&cm_disconnects);
33755 - target_stat_values[++index] = atomic_read(&cm_connecteds);
33756 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
33757 - target_stat_values[++index] = atomic_read(&cm_rejects);
33758 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
33759 - target_stat_values[++index] = atomic_read(&qps_created);
33760 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
33761 - target_stat_values[++index] = atomic_read(&qps_destroyed);
33762 - target_stat_values[++index] = atomic_read(&cm_closes);
33763 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
33764 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
33765 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
33766 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
33767 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
33768 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
33769 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
33770 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
33771 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
33772 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
33773 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
33774 target_stat_values[++index] = cm_packets_sent;
33775 target_stat_values[++index] = cm_packets_bounced;
33776 target_stat_values[++index] = cm_packets_created;
33777 target_stat_values[++index] = cm_packets_received;
33778 target_stat_values[++index] = cm_packets_dropped;
33779 target_stat_values[++index] = cm_packets_retrans;
33780 - target_stat_values[++index] = atomic_read(&cm_listens_created);
33781 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
33782 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
33783 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
33784 target_stat_values[++index] = cm_backlog_drops;
33785 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
33786 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
33787 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
33788 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
33789 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
33790 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
33791 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
33792 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
33793 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
33794 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
33795 target_stat_values[++index] = nesadapter->free_4kpbl;
33796 target_stat_values[++index] = nesadapter->free_256pbl;
33797 target_stat_values[++index] = int_mod_timer_init;
33798 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
33799 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
33800 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
33801 - target_stat_values[++index] = atomic_read(&pau_qps_created);
33802 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
33803 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
33804 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
33805 }
33806
33807 /**
33808 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
33809 index cd0ecb2..7099ff0 100644
33810 --- a/drivers/infiniband/hw/nes/nes_verbs.c
33811 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
33812 @@ -46,9 +46,9 @@
33813
33814 #include <rdma/ib_umem.h>
33815
33816 -atomic_t mod_qp_timouts;
33817 -atomic_t qps_created;
33818 -atomic_t sw_qps_destroyed;
33819 +atomic_unchecked_t mod_qp_timouts;
33820 +atomic_unchecked_t qps_created;
33821 +atomic_unchecked_t sw_qps_destroyed;
33822
33823 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
33824
33825 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
33826 if (init_attr->create_flags)
33827 return ERR_PTR(-EINVAL);
33828
33829 - atomic_inc(&qps_created);
33830 + atomic_inc_unchecked(&qps_created);
33831 switch (init_attr->qp_type) {
33832 case IB_QPT_RC:
33833 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
33834 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
33835 struct iw_cm_event cm_event;
33836 int ret = 0;
33837
33838 - atomic_inc(&sw_qps_destroyed);
33839 + atomic_inc_unchecked(&sw_qps_destroyed);
33840 nesqp->destroyed = 1;
33841
33842 /* Blow away the connection if it exists. */
33843 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
33844 index 4d11575..3e890e5 100644
33845 --- a/drivers/infiniband/hw/qib/qib.h
33846 +++ b/drivers/infiniband/hw/qib/qib.h
33847 @@ -51,6 +51,7 @@
33848 #include <linux/completion.h>
33849 #include <linux/kref.h>
33850 #include <linux/sched.h>
33851 +#include <linux/slab.h>
33852
33853 #include "qib_common.h"
33854 #include "qib_verbs.h"
33855 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
33856 index da739d9..da1c7f4 100644
33857 --- a/drivers/input/gameport/gameport.c
33858 +++ b/drivers/input/gameport/gameport.c
33859 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
33860 */
33861 static void gameport_init_port(struct gameport *gameport)
33862 {
33863 - static atomic_t gameport_no = ATOMIC_INIT(0);
33864 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
33865
33866 __module_get(THIS_MODULE);
33867
33868 mutex_init(&gameport->drv_mutex);
33869 device_initialize(&gameport->dev);
33870 dev_set_name(&gameport->dev, "gameport%lu",
33871 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
33872 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
33873 gameport->dev.bus = &gameport_bus;
33874 gameport->dev.release = gameport_release_port;
33875 if (gameport->parent)
33876 diff --git a/drivers/input/input.c b/drivers/input/input.c
33877 index 53a0dde..abffda7 100644
33878 --- a/drivers/input/input.c
33879 +++ b/drivers/input/input.c
33880 @@ -1902,7 +1902,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
33881 */
33882 int input_register_device(struct input_dev *dev)
33883 {
33884 - static atomic_t input_no = ATOMIC_INIT(0);
33885 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
33886 struct input_handler *handler;
33887 unsigned int packet_size;
33888 const char *path;
33889 @@ -1945,7 +1945,7 @@ int input_register_device(struct input_dev *dev)
33890 dev->setkeycode = input_default_setkeycode;
33891
33892 dev_set_name(&dev->dev, "input%ld",
33893 - (unsigned long) atomic_inc_return(&input_no) - 1);
33894 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
33895
33896 error = device_add(&dev->dev);
33897 if (error)
33898 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
33899 index 04c69af..5f92d00 100644
33900 --- a/drivers/input/joystick/sidewinder.c
33901 +++ b/drivers/input/joystick/sidewinder.c
33902 @@ -30,6 +30,7 @@
33903 #include <linux/kernel.h>
33904 #include <linux/module.h>
33905 #include <linux/slab.h>
33906 +#include <linux/sched.h>
33907 #include <linux/init.h>
33908 #include <linux/input.h>
33909 #include <linux/gameport.h>
33910 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
33911 index 83811e4..0822b90 100644
33912 --- a/drivers/input/joystick/xpad.c
33913 +++ b/drivers/input/joystick/xpad.c
33914 @@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
33915
33916 static int xpad_led_probe(struct usb_xpad *xpad)
33917 {
33918 - static atomic_t led_seq = ATOMIC_INIT(0);
33919 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
33920 long led_no;
33921 struct xpad_led *led;
33922 struct led_classdev *led_cdev;
33923 @@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
33924 if (!led)
33925 return -ENOMEM;
33926
33927 - led_no = (long)atomic_inc_return(&led_seq) - 1;
33928 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
33929
33930 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
33931 led->xpad = xpad;
33932 diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
33933 index e9e8674..f098622 100644
33934 --- a/drivers/input/keyboard/adp5520-keys.c
33935 +++ b/drivers/input/keyboard/adp5520-keys.c
33936 @@ -16,7 +16,7 @@
33937
33938 struct adp5520_keys {
33939 struct input_dev *input;
33940 - struct notifier_block notifier;
33941 + notifier_block_no_const notifier;
33942 struct device *master;
33943 unsigned short keycode[ADP5520_KEYMAPSIZE];
33944 };
33945 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
33946 index 4c842c3..590b0bf 100644
33947 --- a/drivers/input/mousedev.c
33948 +++ b/drivers/input/mousedev.c
33949 @@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
33950
33951 spin_unlock_irq(&client->packet_lock);
33952
33953 - if (copy_to_user(buffer, data, count))
33954 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
33955 return -EFAULT;
33956
33957 return count;
33958 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
33959 index d0f7533..fb8215b 100644
33960 --- a/drivers/input/serio/serio.c
33961 +++ b/drivers/input/serio/serio.c
33962 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
33963 */
33964 static void serio_init_port(struct serio *serio)
33965 {
33966 - static atomic_t serio_no = ATOMIC_INIT(0);
33967 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
33968
33969 __module_get(THIS_MODULE);
33970
33971 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
33972 mutex_init(&serio->drv_mutex);
33973 device_initialize(&serio->dev);
33974 dev_set_name(&serio->dev, "serio%ld",
33975 - (long)atomic_inc_return(&serio_no) - 1);
33976 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
33977 serio->dev.bus = &serio_bus;
33978 serio->dev.release = serio_release_port;
33979 serio->dev.groups = serio_device_attr_groups;
33980 diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
33981 index 36b65cf..6fbd367 100644
33982 --- a/drivers/input/touchscreen/da9034-ts.c
33983 +++ b/drivers/input/touchscreen/da9034-ts.c
33984 @@ -55,7 +55,7 @@ struct da9034_touch {
33985 struct input_dev *input_dev;
33986
33987 struct delayed_work tsi_work;
33988 - struct notifier_block notifier;
33989 + notifier_block_no_const notifier;
33990
33991 int state;
33992
33993 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
33994 index c679867..6e2e34d 100644
33995 --- a/drivers/isdn/capi/capi.c
33996 +++ b/drivers/isdn/capi/capi.c
33997 @@ -83,8 +83,8 @@ struct capiminor {
33998
33999 struct capi20_appl *ap;
34000 u32 ncci;
34001 - atomic_t datahandle;
34002 - atomic_t msgid;
34003 + atomic_unchecked_t datahandle;
34004 + atomic_unchecked_t msgid;
34005
34006 struct tty_port port;
34007 int ttyinstop;
34008 @@ -393,7 +393,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
34009 capimsg_setu16(s, 2, mp->ap->applid);
34010 capimsg_setu8 (s, 4, CAPI_DATA_B3);
34011 capimsg_setu8 (s, 5, CAPI_RESP);
34012 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
34013 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
34014 capimsg_setu32(s, 8, mp->ncci);
34015 capimsg_setu16(s, 12, datahandle);
34016 }
34017 @@ -514,14 +514,14 @@ static void handle_minor_send(struct capiminor *mp)
34018 mp->outbytes -= len;
34019 spin_unlock_bh(&mp->outlock);
34020
34021 - datahandle = atomic_inc_return(&mp->datahandle);
34022 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
34023 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
34024 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34025 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34026 capimsg_setu16(skb->data, 2, mp->ap->applid);
34027 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
34028 capimsg_setu8 (skb->data, 5, CAPI_REQ);
34029 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
34030 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
34031 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
34032 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
34033 capimsg_setu16(skb->data, 16, len); /* Data length */
34034 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
34035 index 67abf3f..076b3a6 100644
34036 --- a/drivers/isdn/gigaset/interface.c
34037 +++ b/drivers/isdn/gigaset/interface.c
34038 @@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
34039 }
34040 tty->driver_data = cs;
34041
34042 - ++cs->port.count;
34043 + atomic_inc(&cs->port.count);
34044
34045 - if (cs->port.count == 1) {
34046 + if (atomic_read(&cs->port.count) == 1) {
34047 tty_port_tty_set(&cs->port, tty);
34048 tty->low_latency = 1;
34049 }
34050 @@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
34051
34052 if (!cs->connected)
34053 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
34054 - else if (!cs->port.count)
34055 + else if (!atomic_read(&cs->port.count))
34056 dev_warn(cs->dev, "%s: device not opened\n", __func__);
34057 - else if (!--cs->port.count)
34058 + else if (!atomic_dec_return(&cs->port.count))
34059 tty_port_tty_set(&cs->port, NULL);
34060
34061 mutex_unlock(&cs->mutex);
34062 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
34063 index 821f7ac..28d4030 100644
34064 --- a/drivers/isdn/hardware/avm/b1.c
34065 +++ b/drivers/isdn/hardware/avm/b1.c
34066 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
34067 }
34068 if (left) {
34069 if (t4file->user) {
34070 - if (copy_from_user(buf, dp, left))
34071 + if (left > sizeof buf || copy_from_user(buf, dp, left))
34072 return -EFAULT;
34073 } else {
34074 memcpy(buf, dp, left);
34075 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
34076 }
34077 if (left) {
34078 if (config->user) {
34079 - if (copy_from_user(buf, dp, left))
34080 + if (left > sizeof buf || copy_from_user(buf, dp, left))
34081 return -EFAULT;
34082 } else {
34083 memcpy(buf, dp, left);
34084 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
34085 index dd6b53a..19d9ee6 100644
34086 --- a/drivers/isdn/hardware/eicon/divasync.h
34087 +++ b/drivers/isdn/hardware/eicon/divasync.h
34088 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
34089 } diva_didd_add_adapter_t;
34090 typedef struct _diva_didd_remove_adapter {
34091 IDI_CALL p_request;
34092 -} diva_didd_remove_adapter_t;
34093 +} __no_const diva_didd_remove_adapter_t;
34094 typedef struct _diva_didd_read_adapter_array {
34095 void *buffer;
34096 dword length;
34097 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
34098 index d303e65..28bcb7b 100644
34099 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
34100 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
34101 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
34102 typedef struct _diva_os_idi_adapter_interface {
34103 diva_init_card_proc_t cleanup_adapter_proc;
34104 diva_cmd_card_proc_t cmd_proc;
34105 -} diva_os_idi_adapter_interface_t;
34106 +} __no_const diva_os_idi_adapter_interface_t;
34107
34108 typedef struct _diva_os_xdi_adapter {
34109 struct list_head link;
34110 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
34111 index b817809..409caff 100644
34112 --- a/drivers/isdn/i4l/isdn_tty.c
34113 +++ b/drivers/isdn/i4l/isdn_tty.c
34114 @@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
34115
34116 #ifdef ISDN_DEBUG_MODEM_OPEN
34117 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
34118 - port->count);
34119 + atomic_read(&port->count));
34120 #endif
34121 - port->count++;
34122 + atomic_inc(&port->count);
34123 port->tty = tty;
34124 /*
34125 * Start up serial port
34126 @@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34127 #endif
34128 return;
34129 }
34130 - if ((tty->count == 1) && (port->count != 1)) {
34131 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
34132 /*
34133 * Uh, oh. tty->count is 1, which means that the tty
34134 * structure will be freed. Info->count should always
34135 @@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34136 * serial port won't be shutdown.
34137 */
34138 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
34139 - "info->count is %d\n", port->count);
34140 - port->count = 1;
34141 + "info->count is %d\n", atomic_read(&port->count));
34142 + atomic_set(&port->count, 1);
34143 }
34144 - if (--port->count < 0) {
34145 + if (atomic_dec_return(&port->count) < 0) {
34146 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
34147 - info->line, port->count);
34148 - port->count = 0;
34149 + info->line, atomic_read(&port->count));
34150 + atomic_set(&port->count, 0);
34151 }
34152 - if (port->count) {
34153 + if (atomic_read(&port->count)) {
34154 #ifdef ISDN_DEBUG_MODEM_OPEN
34155 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
34156 #endif
34157 @@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
34158 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
34159 return;
34160 isdn_tty_shutdown(info);
34161 - port->count = 0;
34162 + atomic_set(&port->count, 0);
34163 port->flags &= ~ASYNC_NORMAL_ACTIVE;
34164 port->tty = NULL;
34165 wake_up_interruptible(&port->open_wait);
34166 @@ -1971,7 +1971,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
34167 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
34168 modem_info *info = &dev->mdm.info[i];
34169
34170 - if (info->port.count == 0)
34171 + if (atomic_read(&info->port.count) == 0)
34172 continue;
34173 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
34174 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
34175 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
34176 index e74df7c..03a03ba 100644
34177 --- a/drivers/isdn/icn/icn.c
34178 +++ b/drivers/isdn/icn/icn.c
34179 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
34180 if (count > len)
34181 count = len;
34182 if (user) {
34183 - if (copy_from_user(msg, buf, count))
34184 + if (count > sizeof msg || copy_from_user(msg, buf, count))
34185 return -EFAULT;
34186 } else
34187 memcpy(msg, buf, count);
34188 diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
34189 index b941685..f7e816b 100644
34190 --- a/drivers/leds/ledtrig-backlight.c
34191 +++ b/drivers/leds/ledtrig-backlight.c
34192 @@ -25,7 +25,7 @@ struct bl_trig_notifier {
34193 struct led_classdev *led;
34194 int brightness;
34195 int old_status;
34196 - struct notifier_block notifier;
34197 + notifier_block_no_const notifier;
34198 unsigned invert;
34199 };
34200
34201 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
34202 index b5fdcb7..5b6c59f 100644
34203 --- a/drivers/lguest/core.c
34204 +++ b/drivers/lguest/core.c
34205 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
34206 * it's worked so far. The end address needs +1 because __get_vm_area
34207 * allocates an extra guard page, so we need space for that.
34208 */
34209 +
34210 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
34211 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34212 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
34213 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34214 +#else
34215 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34216 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
34217 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34218 +#endif
34219 +
34220 if (!switcher_vma) {
34221 err = -ENOMEM;
34222 printk("lguest: could not map switcher pages high\n");
34223 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
34224 * Now the Switcher is mapped at the right address, we can't fail!
34225 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
34226 */
34227 - memcpy(switcher_vma->addr, start_switcher_text,
34228 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
34229 end_switcher_text - start_switcher_text);
34230
34231 printk(KERN_INFO "lguest: mapped switcher at %p\n",
34232 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
34233 index 4af12e1..0e89afe 100644
34234 --- a/drivers/lguest/x86/core.c
34235 +++ b/drivers/lguest/x86/core.c
34236 @@ -59,7 +59,7 @@ static struct {
34237 /* Offset from where switcher.S was compiled to where we've copied it */
34238 static unsigned long switcher_offset(void)
34239 {
34240 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
34241 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
34242 }
34243
34244 /* This cpu's struct lguest_pages. */
34245 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
34246 * These copies are pretty cheap, so we do them unconditionally: */
34247 /* Save the current Host top-level page directory.
34248 */
34249 +
34250 +#ifdef CONFIG_PAX_PER_CPU_PGD
34251 + pages->state.host_cr3 = read_cr3();
34252 +#else
34253 pages->state.host_cr3 = __pa(current->mm->pgd);
34254 +#endif
34255 +
34256 /*
34257 * Set up the Guest's page tables to see this CPU's pages (and no
34258 * other CPU's pages).
34259 @@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
34260 * compiled-in switcher code and the high-mapped copy we just made.
34261 */
34262 for (i = 0; i < IDT_ENTRIES; i++)
34263 - default_idt_entries[i] += switcher_offset();
34264 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
34265
34266 /*
34267 * Set up the Switcher's per-cpu areas.
34268 @@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
34269 * it will be undisturbed when we switch. To change %cs and jump we
34270 * need this structure to feed to Intel's "lcall" instruction.
34271 */
34272 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
34273 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
34274 lguest_entry.segment = LGUEST_CS;
34275
34276 /*
34277 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
34278 index 40634b0..4f5855e 100644
34279 --- a/drivers/lguest/x86/switcher_32.S
34280 +++ b/drivers/lguest/x86/switcher_32.S
34281 @@ -87,6 +87,7 @@
34282 #include <asm/page.h>
34283 #include <asm/segment.h>
34284 #include <asm/lguest.h>
34285 +#include <asm/processor-flags.h>
34286
34287 // We mark the start of the code to copy
34288 // It's placed in .text tho it's never run here
34289 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
34290 // Changes type when we load it: damn Intel!
34291 // For after we switch over our page tables
34292 // That entry will be read-only: we'd crash.
34293 +
34294 +#ifdef CONFIG_PAX_KERNEXEC
34295 + mov %cr0, %edx
34296 + xor $X86_CR0_WP, %edx
34297 + mov %edx, %cr0
34298 +#endif
34299 +
34300 movl $(GDT_ENTRY_TSS*8), %edx
34301 ltr %dx
34302
34303 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
34304 // Let's clear it again for our return.
34305 // The GDT descriptor of the Host
34306 // Points to the table after two "size" bytes
34307 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
34308 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
34309 // Clear "used" from type field (byte 5, bit 2)
34310 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
34311 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
34312 +
34313 +#ifdef CONFIG_PAX_KERNEXEC
34314 + mov %cr0, %eax
34315 + xor $X86_CR0_WP, %eax
34316 + mov %eax, %cr0
34317 +#endif
34318
34319 // Once our page table's switched, the Guest is live!
34320 // The Host fades as we run this final step.
34321 @@ -295,13 +309,12 @@ deliver_to_host:
34322 // I consulted gcc, and it gave
34323 // These instructions, which I gladly credit:
34324 leal (%edx,%ebx,8), %eax
34325 - movzwl (%eax),%edx
34326 - movl 4(%eax), %eax
34327 - xorw %ax, %ax
34328 - orl %eax, %edx
34329 + movl 4(%eax), %edx
34330 + movw (%eax), %dx
34331 // Now the address of the handler's in %edx
34332 // We call it now: its "iret" drops us home.
34333 - jmp *%edx
34334 + ljmp $__KERNEL_CS, $1f
34335 +1: jmp *%edx
34336
34337 // Every interrupt can come to us here
34338 // But we must truly tell each apart.
34339 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
34340 index 7155945..4bcc562 100644
34341 --- a/drivers/md/bitmap.c
34342 +++ b/drivers/md/bitmap.c
34343 @@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
34344 chunk_kb ? "KB" : "B");
34345 if (bitmap->storage.file) {
34346 seq_printf(seq, ", file: ");
34347 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
34348 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
34349 }
34350
34351 seq_printf(seq, "\n");
34352 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
34353 index afd9598..528d8f9 100644
34354 --- a/drivers/md/dm-ioctl.c
34355 +++ b/drivers/md/dm-ioctl.c
34356 @@ -1593,7 +1593,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
34357 cmd == DM_LIST_VERSIONS_CMD)
34358 return 0;
34359
34360 - if ((cmd == DM_DEV_CREATE_CMD)) {
34361 + if (cmd == DM_DEV_CREATE_CMD) {
34362 if (!*param->name) {
34363 DMWARN("name not supplied when creating device");
34364 return -EINVAL;
34365 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
34366 index fd61f98..8050783 100644
34367 --- a/drivers/md/dm-raid1.c
34368 +++ b/drivers/md/dm-raid1.c
34369 @@ -40,7 +40,7 @@ enum dm_raid1_error {
34370
34371 struct mirror {
34372 struct mirror_set *ms;
34373 - atomic_t error_count;
34374 + atomic_unchecked_t error_count;
34375 unsigned long error_type;
34376 struct dm_dev *dev;
34377 sector_t offset;
34378 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
34379 struct mirror *m;
34380
34381 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
34382 - if (!atomic_read(&m->error_count))
34383 + if (!atomic_read_unchecked(&m->error_count))
34384 return m;
34385
34386 return NULL;
34387 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
34388 * simple way to tell if a device has encountered
34389 * errors.
34390 */
34391 - atomic_inc(&m->error_count);
34392 + atomic_inc_unchecked(&m->error_count);
34393
34394 if (test_and_set_bit(error_type, &m->error_type))
34395 return;
34396 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
34397 struct mirror *m = get_default_mirror(ms);
34398
34399 do {
34400 - if (likely(!atomic_read(&m->error_count)))
34401 + if (likely(!atomic_read_unchecked(&m->error_count)))
34402 return m;
34403
34404 if (m-- == ms->mirror)
34405 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
34406 {
34407 struct mirror *default_mirror = get_default_mirror(m->ms);
34408
34409 - return !atomic_read(&default_mirror->error_count);
34410 + return !atomic_read_unchecked(&default_mirror->error_count);
34411 }
34412
34413 static int mirror_available(struct mirror_set *ms, struct bio *bio)
34414 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
34415 */
34416 if (likely(region_in_sync(ms, region, 1)))
34417 m = choose_mirror(ms, bio->bi_sector);
34418 - else if (m && atomic_read(&m->error_count))
34419 + else if (m && atomic_read_unchecked(&m->error_count))
34420 m = NULL;
34421
34422 if (likely(m))
34423 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
34424 }
34425
34426 ms->mirror[mirror].ms = ms;
34427 - atomic_set(&(ms->mirror[mirror].error_count), 0);
34428 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
34429 ms->mirror[mirror].error_type = 0;
34430 ms->mirror[mirror].offset = offset;
34431
34432 @@ -1356,7 +1356,7 @@ static void mirror_resume(struct dm_target *ti)
34433 */
34434 static char device_status_char(struct mirror *m)
34435 {
34436 - if (!atomic_read(&(m->error_count)))
34437 + if (!atomic_read_unchecked(&(m->error_count)))
34438 return 'A';
34439
34440 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
34441 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
34442 index e2f87653..f279abe 100644
34443 --- a/drivers/md/dm-stripe.c
34444 +++ b/drivers/md/dm-stripe.c
34445 @@ -20,7 +20,7 @@ struct stripe {
34446 struct dm_dev *dev;
34447 sector_t physical_start;
34448
34449 - atomic_t error_count;
34450 + atomic_unchecked_t error_count;
34451 };
34452
34453 struct stripe_c {
34454 @@ -183,7 +183,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34455 kfree(sc);
34456 return r;
34457 }
34458 - atomic_set(&(sc->stripe[i].error_count), 0);
34459 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
34460 }
34461
34462 ti->private = sc;
34463 @@ -324,7 +324,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
34464 DMEMIT("%d ", sc->stripes);
34465 for (i = 0; i < sc->stripes; i++) {
34466 DMEMIT("%s ", sc->stripe[i].dev->name);
34467 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
34468 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
34469 'D' : 'A';
34470 }
34471 buffer[i] = '\0';
34472 @@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
34473 */
34474 for (i = 0; i < sc->stripes; i++)
34475 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
34476 - atomic_inc(&(sc->stripe[i].error_count));
34477 - if (atomic_read(&(sc->stripe[i].error_count)) <
34478 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
34479 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
34480 DM_IO_ERROR_THRESHOLD)
34481 schedule_work(&sc->trigger_event);
34482 }
34483 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
34484 index 100368e..64262ce 100644
34485 --- a/drivers/md/dm-table.c
34486 +++ b/drivers/md/dm-table.c
34487 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
34488 if (!dev_size)
34489 return 0;
34490
34491 - if ((start >= dev_size) || (start + len > dev_size)) {
34492 + if ((start >= dev_size) || (len > dev_size - start)) {
34493 DMWARN("%s: %s too small for target: "
34494 "start=%llu, len=%llu, dev_size=%llu",
34495 dm_device_name(ti->table->md), bdevname(bdev, b),
34496 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
34497 index 693e149..b7e0fde 100644
34498 --- a/drivers/md/dm-thin-metadata.c
34499 +++ b/drivers/md/dm-thin-metadata.c
34500 @@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
34501 {
34502 pmd->info.tm = pmd->tm;
34503 pmd->info.levels = 2;
34504 - pmd->info.value_type.context = pmd->data_sm;
34505 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
34506 pmd->info.value_type.size = sizeof(__le64);
34507 pmd->info.value_type.inc = data_block_inc;
34508 pmd->info.value_type.dec = data_block_dec;
34509 @@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
34510
34511 pmd->bl_info.tm = pmd->tm;
34512 pmd->bl_info.levels = 1;
34513 - pmd->bl_info.value_type.context = pmd->data_sm;
34514 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
34515 pmd->bl_info.value_type.size = sizeof(__le64);
34516 pmd->bl_info.value_type.inc = data_block_inc;
34517 pmd->bl_info.value_type.dec = data_block_dec;
34518 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
34519 index 77e6eff..913d695 100644
34520 --- a/drivers/md/dm.c
34521 +++ b/drivers/md/dm.c
34522 @@ -182,9 +182,9 @@ struct mapped_device {
34523 /*
34524 * Event handling.
34525 */
34526 - atomic_t event_nr;
34527 + atomic_unchecked_t event_nr;
34528 wait_queue_head_t eventq;
34529 - atomic_t uevent_seq;
34530 + atomic_unchecked_t uevent_seq;
34531 struct list_head uevent_list;
34532 spinlock_t uevent_lock; /* Protect access to uevent_list */
34533
34534 @@ -1847,8 +1847,8 @@ static struct mapped_device *alloc_dev(int minor)
34535 rwlock_init(&md->map_lock);
34536 atomic_set(&md->holders, 1);
34537 atomic_set(&md->open_count, 0);
34538 - atomic_set(&md->event_nr, 0);
34539 - atomic_set(&md->uevent_seq, 0);
34540 + atomic_set_unchecked(&md->event_nr, 0);
34541 + atomic_set_unchecked(&md->uevent_seq, 0);
34542 INIT_LIST_HEAD(&md->uevent_list);
34543 spin_lock_init(&md->uevent_lock);
34544
34545 @@ -1982,7 +1982,7 @@ static void event_callback(void *context)
34546
34547 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
34548
34549 - atomic_inc(&md->event_nr);
34550 + atomic_inc_unchecked(&md->event_nr);
34551 wake_up(&md->eventq);
34552 }
34553
34554 @@ -2637,18 +2637,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
34555
34556 uint32_t dm_next_uevent_seq(struct mapped_device *md)
34557 {
34558 - return atomic_add_return(1, &md->uevent_seq);
34559 + return atomic_add_return_unchecked(1, &md->uevent_seq);
34560 }
34561
34562 uint32_t dm_get_event_nr(struct mapped_device *md)
34563 {
34564 - return atomic_read(&md->event_nr);
34565 + return atomic_read_unchecked(&md->event_nr);
34566 }
34567
34568 int dm_wait_event(struct mapped_device *md, int event_nr)
34569 {
34570 return wait_event_interruptible(md->eventq,
34571 - (event_nr != atomic_read(&md->event_nr)));
34572 + (event_nr != atomic_read_unchecked(&md->event_nr)));
34573 }
34574
34575 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
34576 diff --git a/drivers/md/md.c b/drivers/md/md.c
34577 index 6120071..31d9be2 100644
34578 --- a/drivers/md/md.c
34579 +++ b/drivers/md/md.c
34580 @@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
34581 * start build, activate spare
34582 */
34583 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
34584 -static atomic_t md_event_count;
34585 +static atomic_unchecked_t md_event_count;
34586 void md_new_event(struct mddev *mddev)
34587 {
34588 - atomic_inc(&md_event_count);
34589 + atomic_inc_unchecked(&md_event_count);
34590 wake_up(&md_event_waiters);
34591 }
34592 EXPORT_SYMBOL_GPL(md_new_event);
34593 @@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
34594 */
34595 static void md_new_event_inintr(struct mddev *mddev)
34596 {
34597 - atomic_inc(&md_event_count);
34598 + atomic_inc_unchecked(&md_event_count);
34599 wake_up(&md_event_waiters);
34600 }
34601
34602 @@ -1504,7 +1504,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
34603 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
34604 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
34605 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
34606 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
34607 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
34608
34609 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
34610 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
34611 @@ -1748,7 +1748,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
34612 else
34613 sb->resync_offset = cpu_to_le64(0);
34614
34615 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
34616 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
34617
34618 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
34619 sb->size = cpu_to_le64(mddev->dev_sectors);
34620 @@ -2748,7 +2748,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
34621 static ssize_t
34622 errors_show(struct md_rdev *rdev, char *page)
34623 {
34624 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
34625 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
34626 }
34627
34628 static ssize_t
34629 @@ -2757,7 +2757,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
34630 char *e;
34631 unsigned long n = simple_strtoul(buf, &e, 10);
34632 if (*buf && (*e == 0 || *e == '\n')) {
34633 - atomic_set(&rdev->corrected_errors, n);
34634 + atomic_set_unchecked(&rdev->corrected_errors, n);
34635 return len;
34636 }
34637 return -EINVAL;
34638 @@ -3204,8 +3204,8 @@ int md_rdev_init(struct md_rdev *rdev)
34639 rdev->sb_loaded = 0;
34640 rdev->bb_page = NULL;
34641 atomic_set(&rdev->nr_pending, 0);
34642 - atomic_set(&rdev->read_errors, 0);
34643 - atomic_set(&rdev->corrected_errors, 0);
34644 + atomic_set_unchecked(&rdev->read_errors, 0);
34645 + atomic_set_unchecked(&rdev->corrected_errors, 0);
34646
34647 INIT_LIST_HEAD(&rdev->same_set);
34648 init_waitqueue_head(&rdev->blocked_wait);
34649 @@ -6984,7 +6984,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
34650
34651 spin_unlock(&pers_lock);
34652 seq_printf(seq, "\n");
34653 - seq->poll_event = atomic_read(&md_event_count);
34654 + seq->poll_event = atomic_read_unchecked(&md_event_count);
34655 return 0;
34656 }
34657 if (v == (void*)2) {
34658 @@ -7087,7 +7087,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
34659 return error;
34660
34661 seq = file->private_data;
34662 - seq->poll_event = atomic_read(&md_event_count);
34663 + seq->poll_event = atomic_read_unchecked(&md_event_count);
34664 return error;
34665 }
34666
34667 @@ -7101,7 +7101,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
34668 /* always allow read */
34669 mask = POLLIN | POLLRDNORM;
34670
34671 - if (seq->poll_event != atomic_read(&md_event_count))
34672 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
34673 mask |= POLLERR | POLLPRI;
34674 return mask;
34675 }
34676 @@ -7145,7 +7145,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
34677 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
34678 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
34679 (int)part_stat_read(&disk->part0, sectors[1]) -
34680 - atomic_read(&disk->sync_io);
34681 + atomic_read_unchecked(&disk->sync_io);
34682 /* sync IO will cause sync_io to increase before the disk_stats
34683 * as sync_io is counted when a request starts, and
34684 * disk_stats is counted when it completes.
34685 diff --git a/drivers/md/md.h b/drivers/md/md.h
34686 index af443ab..0f93be3 100644
34687 --- a/drivers/md/md.h
34688 +++ b/drivers/md/md.h
34689 @@ -94,13 +94,13 @@ struct md_rdev {
34690 * only maintained for arrays that
34691 * support hot removal
34692 */
34693 - atomic_t read_errors; /* number of consecutive read errors that
34694 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
34695 * we have tried to ignore.
34696 */
34697 struct timespec last_read_error; /* monotonic time since our
34698 * last read error
34699 */
34700 - atomic_t corrected_errors; /* number of corrected read errors,
34701 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
34702 * for reporting to userspace and storing
34703 * in superblock.
34704 */
34705 @@ -432,7 +432,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
34706
34707 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
34708 {
34709 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
34710 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
34711 }
34712
34713 struct md_personality
34714 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
34715 index f6d29e6..7917f5e 100644
34716 --- a/drivers/md/persistent-data/dm-space-map-disk.c
34717 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
34718 @@ -22,7 +22,7 @@
34719 * Space map interface.
34720 */
34721 struct sm_disk {
34722 - struct dm_space_map sm;
34723 + dm_space_map_no_const sm;
34724
34725 struct ll_disk ll;
34726 struct ll_disk old_ll;
34727 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
34728 index e89ae5e..062e4c2 100644
34729 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
34730 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
34731 @@ -43,7 +43,7 @@ struct block_op {
34732 };
34733
34734 struct sm_metadata {
34735 - struct dm_space_map sm;
34736 + dm_space_map_no_const sm;
34737
34738 struct ll_disk ll;
34739 struct ll_disk old_ll;
34740 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
34741 index 1cbfc6b..56e1dbb 100644
34742 --- a/drivers/md/persistent-data/dm-space-map.h
34743 +++ b/drivers/md/persistent-data/dm-space-map.h
34744 @@ -60,6 +60,7 @@ struct dm_space_map {
34745 int (*root_size)(struct dm_space_map *sm, size_t *result);
34746 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
34747 };
34748 +typedef struct dm_space_map __no_const dm_space_map_no_const;
34749
34750 /*----------------------------------------------------------------*/
34751
34752 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
34753 index a0f7309..5599dbc 100644
34754 --- a/drivers/md/raid1.c
34755 +++ b/drivers/md/raid1.c
34756 @@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
34757 if (r1_sync_page_io(rdev, sect, s,
34758 bio->bi_io_vec[idx].bv_page,
34759 READ) != 0)
34760 - atomic_add(s, &rdev->corrected_errors);
34761 + atomic_add_unchecked(s, &rdev->corrected_errors);
34762 }
34763 sectors -= s;
34764 sect += s;
34765 @@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
34766 test_bit(In_sync, &rdev->flags)) {
34767 if (r1_sync_page_io(rdev, sect, s,
34768 conf->tmppage, READ)) {
34769 - atomic_add(s, &rdev->corrected_errors);
34770 + atomic_add_unchecked(s, &rdev->corrected_errors);
34771 printk(KERN_INFO
34772 "md/raid1:%s: read error corrected "
34773 "(%d sectors at %llu on %s)\n",
34774 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
34775 index c9acbd7..386cd3e 100644
34776 --- a/drivers/md/raid10.c
34777 +++ b/drivers/md/raid10.c
34778 @@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
34779 /* The write handler will notice the lack of
34780 * R10BIO_Uptodate and record any errors etc
34781 */
34782 - atomic_add(r10_bio->sectors,
34783 + atomic_add_unchecked(r10_bio->sectors,
34784 &conf->mirrors[d].rdev->corrected_errors);
34785
34786 /* for reconstruct, we always reschedule after a read.
34787 @@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
34788 {
34789 struct timespec cur_time_mon;
34790 unsigned long hours_since_last;
34791 - unsigned int read_errors = atomic_read(&rdev->read_errors);
34792 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
34793
34794 ktime_get_ts(&cur_time_mon);
34795
34796 @@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
34797 * overflowing the shift of read_errors by hours_since_last.
34798 */
34799 if (hours_since_last >= 8 * sizeof(read_errors))
34800 - atomic_set(&rdev->read_errors, 0);
34801 + atomic_set_unchecked(&rdev->read_errors, 0);
34802 else
34803 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
34804 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
34805 }
34806
34807 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
34808 @@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
34809 return;
34810
34811 check_decay_read_errors(mddev, rdev);
34812 - atomic_inc(&rdev->read_errors);
34813 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
34814 + atomic_inc_unchecked(&rdev->read_errors);
34815 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
34816 char b[BDEVNAME_SIZE];
34817 bdevname(rdev->bdev, b);
34818
34819 @@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
34820 "md/raid10:%s: %s: Raid device exceeded "
34821 "read_error threshold [cur %d:max %d]\n",
34822 mdname(mddev), b,
34823 - atomic_read(&rdev->read_errors), max_read_errors);
34824 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
34825 printk(KERN_NOTICE
34826 "md/raid10:%s: %s: Failing raid device\n",
34827 mdname(mddev), b);
34828 @@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
34829 sect +
34830 choose_data_offset(r10_bio, rdev)),
34831 bdevname(rdev->bdev, b));
34832 - atomic_add(s, &rdev->corrected_errors);
34833 + atomic_add_unchecked(s, &rdev->corrected_errors);
34834 }
34835
34836 rdev_dec_pending(rdev, mddev);
34837 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
34838 index a450268..c4168a9 100644
34839 --- a/drivers/md/raid5.c
34840 +++ b/drivers/md/raid5.c
34841 @@ -1789,21 +1789,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
34842 mdname(conf->mddev), STRIPE_SECTORS,
34843 (unsigned long long)s,
34844 bdevname(rdev->bdev, b));
34845 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
34846 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
34847 clear_bit(R5_ReadError, &sh->dev[i].flags);
34848 clear_bit(R5_ReWrite, &sh->dev[i].flags);
34849 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
34850 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
34851
34852 - if (atomic_read(&rdev->read_errors))
34853 - atomic_set(&rdev->read_errors, 0);
34854 + if (atomic_read_unchecked(&rdev->read_errors))
34855 + atomic_set_unchecked(&rdev->read_errors, 0);
34856 } else {
34857 const char *bdn = bdevname(rdev->bdev, b);
34858 int retry = 0;
34859 int set_bad = 0;
34860
34861 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
34862 - atomic_inc(&rdev->read_errors);
34863 + atomic_inc_unchecked(&rdev->read_errors);
34864 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
34865 printk_ratelimited(
34866 KERN_WARNING
34867 @@ -1831,7 +1831,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
34868 mdname(conf->mddev),
34869 (unsigned long long)s,
34870 bdn);
34871 - } else if (atomic_read(&rdev->read_errors)
34872 + } else if (atomic_read_unchecked(&rdev->read_errors)
34873 > conf->max_nr_stripes)
34874 printk(KERN_WARNING
34875 "md/raid:%s: Too many read errors, failing device %s.\n",
34876 diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
34877 index 18b2c4a..3d7d59f 100644
34878 --- a/drivers/md/raid5.h
34879 +++ b/drivers/md/raid5.h
34880 @@ -439,7 +439,7 @@ struct r5conf {
34881 * cpu hotplug while reshaping
34882 */
34883 #ifdef CONFIG_HOTPLUG_CPU
34884 - struct notifier_block cpu_notify;
34885 + notifier_block_no_const cpu_notify;
34886 #endif
34887
34888 /*
34889 diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
34890 index fa7188a..04a045e 100644
34891 --- a/drivers/media/dvb-core/dvb_demux.h
34892 +++ b/drivers/media/dvb-core/dvb_demux.h
34893 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
34894 union {
34895 dmx_ts_cb ts;
34896 dmx_section_cb sec;
34897 - } cb;
34898 + } __no_const cb;
34899
34900 struct dvb_demux *demux;
34901 void *priv;
34902 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
34903 index d33101a..6b13069 100644
34904 --- a/drivers/media/dvb-core/dvbdev.c
34905 +++ b/drivers/media/dvb-core/dvbdev.c
34906 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
34907 const struct dvb_device *template, void *priv, int type)
34908 {
34909 struct dvb_device *dvbdev;
34910 - struct file_operations *dvbdevfops;
34911 + file_operations_no_const *dvbdevfops;
34912 struct device *clsdev;
34913 int minor;
34914 int id;
34915 diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
34916 index 404f63a..4796533 100644
34917 --- a/drivers/media/dvb-frontends/dib3000.h
34918 +++ b/drivers/media/dvb-frontends/dib3000.h
34919 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
34920 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
34921 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
34922 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
34923 -};
34924 +} __no_const;
34925
34926 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
34927 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
34928 diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
34929 index 3aa6856..435ad25 100644
34930 --- a/drivers/media/pci/cx88/cx88-alsa.c
34931 +++ b/drivers/media/pci/cx88/cx88-alsa.c
34932 @@ -749,7 +749,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
34933 * Only boards with eeprom and byte 1 at eeprom=1 have it
34934 */
34935
34936 -static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
34937 +static const struct pci_device_id cx88_audio_pci_tbl[] __devinitconst = {
34938 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34939 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34940 {0, }
34941 diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
34942 index feff57e..66a2c67 100644
34943 --- a/drivers/media/pci/ddbridge/ddbridge-core.c
34944 +++ b/drivers/media/pci/ddbridge/ddbridge-core.c
34945 @@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
34946 .subvendor = _subvend, .subdevice = _subdev, \
34947 .driver_data = (unsigned long)&_driverdata }
34948
34949 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
34950 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
34951 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
34952 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
34953 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
34954 diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
34955 index 96a13ed..6df45b4 100644
34956 --- a/drivers/media/pci/ngene/ngene-cards.c
34957 +++ b/drivers/media/pci/ngene/ngene-cards.c
34958 @@ -741,7 +741,7 @@ static struct ngene_info ngene_info_terratec = {
34959
34960 /****************************************************************************/
34961
34962 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
34963 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
34964 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
34965 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
34966 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
34967 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
34968 index a3b1a34..71ce0e3 100644
34969 --- a/drivers/media/platform/omap/omap_vout.c
34970 +++ b/drivers/media/platform/omap/omap_vout.c
34971 @@ -65,7 +65,6 @@ enum omap_vout_channels {
34972 OMAP_VIDEO2,
34973 };
34974
34975 -static struct videobuf_queue_ops video_vbq_ops;
34976 /* Variables configurable through module params*/
34977 static u32 video1_numbuffers = 3;
34978 static u32 video2_numbuffers = 3;
34979 @@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
34980 {
34981 struct videobuf_queue *q;
34982 struct omap_vout_device *vout = NULL;
34983 + static struct videobuf_queue_ops video_vbq_ops = {
34984 + .buf_setup = omap_vout_buffer_setup,
34985 + .buf_prepare = omap_vout_buffer_prepare,
34986 + .buf_release = omap_vout_buffer_release,
34987 + .buf_queue = omap_vout_buffer_queue,
34988 + };
34989
34990 vout = video_drvdata(file);
34991 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
34992 @@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
34993 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
34994
34995 q = &vout->vbq;
34996 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34997 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34998 - video_vbq_ops.buf_release = omap_vout_buffer_release;
34999 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
35000 spin_lock_init(&vout->vbq_lock);
35001
35002 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
35003 diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
35004 index 02194c0..091733b 100644
35005 --- a/drivers/media/platform/timblogiw.c
35006 +++ b/drivers/media/platform/timblogiw.c
35007 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
35008
35009 /* Platform device functions */
35010
35011 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35012 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
35013 .vidioc_querycap = timblogiw_querycap,
35014 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
35015 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
35016 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35017 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
35018 };
35019
35020 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
35021 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
35022 .owner = THIS_MODULE,
35023 .open = timblogiw_open,
35024 .release = timblogiw_close,
35025 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
35026 index 697a421..16c5a5f 100644
35027 --- a/drivers/media/radio/radio-cadet.c
35028 +++ b/drivers/media/radio/radio-cadet.c
35029 @@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35030 unsigned char readbuf[RDS_BUFFER];
35031 int i = 0;
35032
35033 + if (count > RDS_BUFFER)
35034 + return -EFAULT;
35035 mutex_lock(&dev->lock);
35036 if (dev->rdsstat == 0)
35037 cadet_start_rds(dev);
35038 @@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35039 while (i < count && dev->rdsin != dev->rdsout)
35040 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
35041
35042 - if (i && copy_to_user(data, readbuf, i))
35043 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
35044 i = -EFAULT;
35045 unlock:
35046 mutex_unlock(&dev->lock);
35047 diff --git a/drivers/media/radio/radio-tea5777.h b/drivers/media/radio/radio-tea5777.h
35048 index 4ea43a9..66f4a8f 100644
35049 --- a/drivers/media/radio/radio-tea5777.h
35050 +++ b/drivers/media/radio/radio-tea5777.h
35051 @@ -63,7 +63,7 @@ struct radio_tea5777_ops {
35052
35053 struct radio_tea5777 {
35054 struct v4l2_device *v4l2_dev;
35055 - struct v4l2_file_operations fops;
35056 + v4l2_file_operations_no_const fops;
35057 struct video_device vd; /* video device */
35058 bool has_am; /* Device can tune to AM freqs */
35059 bool write_before_read; /* must write before read quirk */
35060 diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
35061 index 66a56ef..d139911 100644
35062 --- a/drivers/media/usb/au0828/au0828.h
35063 +++ b/drivers/media/usb/au0828/au0828.h
35064 @@ -191,7 +191,7 @@ struct au0828_dev {
35065
35066 /* I2C */
35067 struct i2c_adapter i2c_adap;
35068 - struct i2c_algorithm i2c_algo;
35069 + i2c_algorithm_no_const i2c_algo;
35070 struct i2c_client i2c_client;
35071 u32 i2c_rc;
35072
35073 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
35074 index 3940bb0..fb3952a 100644
35075 --- a/drivers/media/usb/dvb-usb/cxusb.c
35076 +++ b/drivers/media/usb/dvb-usb/cxusb.c
35077 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
35078
35079 struct dib0700_adapter_state {
35080 int (*set_param_save) (struct dvb_frontend *);
35081 -};
35082 +} __no_const;
35083
35084 static int dib7070_set_param_override(struct dvb_frontend *fe)
35085 {
35086 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
35087 index 9382895..ac8093c 100644
35088 --- a/drivers/media/usb/dvb-usb/dw2102.c
35089 +++ b/drivers/media/usb/dvb-usb/dw2102.c
35090 @@ -95,7 +95,7 @@ struct su3000_state {
35091
35092 struct s6x0_state {
35093 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
35094 -};
35095 +} __no_const;
35096
35097 /* debug */
35098 static int dvb_usb_dw2102_debug;
35099 diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
35100 index 036952f..80d356d 100644
35101 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
35102 +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
35103 @@ -196,7 +196,7 @@ struct pvr2_hdw {
35104
35105 /* I2C stuff */
35106 struct i2c_adapter i2c_adap;
35107 - struct i2c_algorithm i2c_algo;
35108 + i2c_algorithm_no_const i2c_algo;
35109 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
35110 int i2c_cx25840_hack_state;
35111 int i2c_linked;
35112 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
35113 index fb69baa..cf7ad22 100644
35114 --- a/drivers/message/fusion/mptbase.c
35115 +++ b/drivers/message/fusion/mptbase.c
35116 @@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
35117 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
35118 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
35119
35120 +#ifdef CONFIG_GRKERNSEC_HIDESYM
35121 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
35122 +#else
35123 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
35124 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
35125 +#endif
35126 +
35127 /*
35128 * Rounding UP to nearest 4-kB boundary here...
35129 */
35130 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
35131 index 551262e..7551198 100644
35132 --- a/drivers/message/fusion/mptsas.c
35133 +++ b/drivers/message/fusion/mptsas.c
35134 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
35135 return 0;
35136 }
35137
35138 +static inline void
35139 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35140 +{
35141 + if (phy_info->port_details) {
35142 + phy_info->port_details->rphy = rphy;
35143 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35144 + ioc->name, rphy));
35145 + }
35146 +
35147 + if (rphy) {
35148 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35149 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35150 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35151 + ioc->name, rphy, rphy->dev.release));
35152 + }
35153 +}
35154 +
35155 /* no mutex */
35156 static void
35157 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
35158 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
35159 return NULL;
35160 }
35161
35162 -static inline void
35163 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35164 -{
35165 - if (phy_info->port_details) {
35166 - phy_info->port_details->rphy = rphy;
35167 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35168 - ioc->name, rphy));
35169 - }
35170 -
35171 - if (rphy) {
35172 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35173 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35174 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35175 - ioc->name, rphy, rphy->dev.release));
35176 - }
35177 -}
35178 -
35179 static inline struct sas_port *
35180 mptsas_get_port(struct mptsas_phyinfo *phy_info)
35181 {
35182 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
35183 index 0c3ced7..1fe34ec 100644
35184 --- a/drivers/message/fusion/mptscsih.c
35185 +++ b/drivers/message/fusion/mptscsih.c
35186 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
35187
35188 h = shost_priv(SChost);
35189
35190 - if (h) {
35191 - if (h->info_kbuf == NULL)
35192 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35193 - return h->info_kbuf;
35194 - h->info_kbuf[0] = '\0';
35195 + if (!h)
35196 + return NULL;
35197
35198 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35199 - h->info_kbuf[size-1] = '\0';
35200 - }
35201 + if (h->info_kbuf == NULL)
35202 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35203 + return h->info_kbuf;
35204 + h->info_kbuf[0] = '\0';
35205 +
35206 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35207 + h->info_kbuf[size-1] = '\0';
35208
35209 return h->info_kbuf;
35210 }
35211 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
35212 index 8001aa6..b137580 100644
35213 --- a/drivers/message/i2o/i2o_proc.c
35214 +++ b/drivers/message/i2o/i2o_proc.c
35215 @@ -255,12 +255,6 @@ static char *scsi_devices[] = {
35216 "Array Controller Device"
35217 };
35218
35219 -static char *chtostr(char *tmp, u8 *chars, int n)
35220 -{
35221 - tmp[0] = 0;
35222 - return strncat(tmp, (char *)chars, n);
35223 -}
35224 -
35225 static int i2o_report_query_status(struct seq_file *seq, int block_status,
35226 char *group)
35227 {
35228 @@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35229 } *result;
35230
35231 i2o_exec_execute_ddm_table ddm_table;
35232 - char tmp[28 + 1];
35233
35234 result = kmalloc(sizeof(*result), GFP_KERNEL);
35235 if (!result)
35236 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35237
35238 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
35239 seq_printf(seq, "%-#8x", ddm_table.module_id);
35240 - seq_printf(seq, "%-29s",
35241 - chtostr(tmp, ddm_table.module_name_version, 28));
35242 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
35243 seq_printf(seq, "%9d ", ddm_table.data_size);
35244 seq_printf(seq, "%8d", ddm_table.code_size);
35245
35246 @@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35247
35248 i2o_driver_result_table *result;
35249 i2o_driver_store_table *dst;
35250 - char tmp[28 + 1];
35251
35252 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
35253 if (result == NULL)
35254 @@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35255
35256 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
35257 seq_printf(seq, "%-#8x", dst->module_id);
35258 - seq_printf(seq, "%-29s",
35259 - chtostr(tmp, dst->module_name_version, 28));
35260 - seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
35261 + seq_printf(seq, "%-.28s", dst->module_name_version);
35262 + seq_printf(seq, "%-.8s", dst->date);
35263 seq_printf(seq, "%8d ", dst->module_size);
35264 seq_printf(seq, "%8d ", dst->mpb_size);
35265 seq_printf(seq, "0x%04x", dst->module_flags);
35266 @@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35267 // == (allow) 512d bytes (max)
35268 static u16 *work16 = (u16 *) work32;
35269 int token;
35270 - char tmp[16 + 1];
35271
35272 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
35273
35274 @@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35275 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
35276 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
35277 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
35278 - seq_printf(seq, "Vendor info : %s\n",
35279 - chtostr(tmp, (u8 *) (work32 + 2), 16));
35280 - seq_printf(seq, "Product info : %s\n",
35281 - chtostr(tmp, (u8 *) (work32 + 6), 16));
35282 - seq_printf(seq, "Description : %s\n",
35283 - chtostr(tmp, (u8 *) (work32 + 10), 16));
35284 - seq_printf(seq, "Product rev. : %s\n",
35285 - chtostr(tmp, (u8 *) (work32 + 14), 8));
35286 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
35287 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
35288 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
35289 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
35290
35291 seq_printf(seq, "Serial number : ");
35292 print_serial_number(seq, (u8 *) (work32 + 16),
35293 @@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35294 u8 pad[256]; // allow up to 256 byte (max) serial number
35295 } result;
35296
35297 - char tmp[24 + 1];
35298 -
35299 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
35300
35301 if (token < 0) {
35302 @@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35303 }
35304
35305 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
35306 - seq_printf(seq, "Module name : %s\n",
35307 - chtostr(tmp, result.module_name, 24));
35308 - seq_printf(seq, "Module revision : %s\n",
35309 - chtostr(tmp, result.module_rev, 8));
35310 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
35311 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
35312
35313 seq_printf(seq, "Serial number : ");
35314 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
35315 @@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35316 u8 instance_number[4];
35317 } result;
35318
35319 - char tmp[64 + 1];
35320 -
35321 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
35322
35323 if (token < 0) {
35324 @@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35325 return 0;
35326 }
35327
35328 - seq_printf(seq, "Device name : %s\n",
35329 - chtostr(tmp, result.device_name, 64));
35330 - seq_printf(seq, "Service name : %s\n",
35331 - chtostr(tmp, result.service_name, 64));
35332 - seq_printf(seq, "Physical name : %s\n",
35333 - chtostr(tmp, result.physical_location, 64));
35334 - seq_printf(seq, "Instance number : %s\n",
35335 - chtostr(tmp, result.instance_number, 4));
35336 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
35337 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
35338 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
35339 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
35340
35341 return 0;
35342 }
35343 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
35344 index a8c08f3..155fe3d 100644
35345 --- a/drivers/message/i2o/iop.c
35346 +++ b/drivers/message/i2o/iop.c
35347 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
35348
35349 spin_lock_irqsave(&c->context_list_lock, flags);
35350
35351 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
35352 - atomic_inc(&c->context_list_counter);
35353 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
35354 + atomic_inc_unchecked(&c->context_list_counter);
35355
35356 - entry->context = atomic_read(&c->context_list_counter);
35357 + entry->context = atomic_read_unchecked(&c->context_list_counter);
35358
35359 list_add(&entry->list, &c->context_list);
35360
35361 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
35362
35363 #if BITS_PER_LONG == 64
35364 spin_lock_init(&c->context_list_lock);
35365 - atomic_set(&c->context_list_counter, 0);
35366 + atomic_set_unchecked(&c->context_list_counter, 0);
35367 INIT_LIST_HEAD(&c->context_list);
35368 #endif
35369
35370 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
35371 index 7ce65f4..e66e9bc 100644
35372 --- a/drivers/mfd/abx500-core.c
35373 +++ b/drivers/mfd/abx500-core.c
35374 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
35375
35376 struct abx500_device_entry {
35377 struct list_head list;
35378 - struct abx500_ops ops;
35379 + abx500_ops_no_const ops;
35380 struct device *dev;
35381 };
35382
35383 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
35384 index 965c480..71f2db9 100644
35385 --- a/drivers/mfd/janz-cmodio.c
35386 +++ b/drivers/mfd/janz-cmodio.c
35387 @@ -13,6 +13,7 @@
35388
35389 #include <linux/kernel.h>
35390 #include <linux/module.h>
35391 +#include <linux/slab.h>
35392 #include <linux/init.h>
35393 #include <linux/pci.h>
35394 #include <linux/interrupt.h>
35395 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
35396 index 3aa9a96..59cf685 100644
35397 --- a/drivers/misc/kgdbts.c
35398 +++ b/drivers/misc/kgdbts.c
35399 @@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
35400 char before[BREAK_INSTR_SIZE];
35401 char after[BREAK_INSTR_SIZE];
35402
35403 - probe_kernel_read(before, (char *)kgdbts_break_test,
35404 + probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
35405 BREAK_INSTR_SIZE);
35406 init_simple_test();
35407 ts.tst = plant_and_detach_test;
35408 @@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
35409 /* Activate test with initial breakpoint */
35410 if (!is_early)
35411 kgdb_breakpoint();
35412 - probe_kernel_read(after, (char *)kgdbts_break_test,
35413 + probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
35414 BREAK_INSTR_SIZE);
35415 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
35416 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
35417 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
35418 index 4a87e5c..76bdf5c 100644
35419 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
35420 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
35421 @@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
35422 * the lid is closed. This leads to interrupts as soon as a little move
35423 * is done.
35424 */
35425 - atomic_inc(&lis3->count);
35426 + atomic_inc_unchecked(&lis3->count);
35427
35428 wake_up_interruptible(&lis3->misc_wait);
35429 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
35430 @@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35431 if (lis3->pm_dev)
35432 pm_runtime_get_sync(lis3->pm_dev);
35433
35434 - atomic_set(&lis3->count, 0);
35435 + atomic_set_unchecked(&lis3->count, 0);
35436 return 0;
35437 }
35438
35439 @@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35440 add_wait_queue(&lis3->misc_wait, &wait);
35441 while (true) {
35442 set_current_state(TASK_INTERRUPTIBLE);
35443 - data = atomic_xchg(&lis3->count, 0);
35444 + data = atomic_xchg_unchecked(&lis3->count, 0);
35445 if (data)
35446 break;
35447
35448 @@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35449 struct lis3lv02d, miscdev);
35450
35451 poll_wait(file, &lis3->misc_wait, wait);
35452 - if (atomic_read(&lis3->count))
35453 + if (atomic_read_unchecked(&lis3->count))
35454 return POLLIN | POLLRDNORM;
35455 return 0;
35456 }
35457 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
35458 index c439c82..1f20f57 100644
35459 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
35460 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
35461 @@ -297,7 +297,7 @@ struct lis3lv02d {
35462 struct input_polled_dev *idev; /* input device */
35463 struct platform_device *pdev; /* platform device */
35464 struct regulator_bulk_data regulators[2];
35465 - atomic_t count; /* interrupt count after last read */
35466 + atomic_unchecked_t count; /* interrupt count after last read */
35467 union axis_conversion ac; /* hw -> logical axis */
35468 int mapped_btns[3];
35469
35470 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
35471 index 2f30bad..c4c13d0 100644
35472 --- a/drivers/misc/sgi-gru/gruhandles.c
35473 +++ b/drivers/misc/sgi-gru/gruhandles.c
35474 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
35475 unsigned long nsec;
35476
35477 nsec = CLKS2NSEC(clks);
35478 - atomic_long_inc(&mcs_op_statistics[op].count);
35479 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
35480 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
35481 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
35482 if (mcs_op_statistics[op].max < nsec)
35483 mcs_op_statistics[op].max = nsec;
35484 }
35485 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
35486 index 950dbe9..eeef0f8 100644
35487 --- a/drivers/misc/sgi-gru/gruprocfs.c
35488 +++ b/drivers/misc/sgi-gru/gruprocfs.c
35489 @@ -32,9 +32,9 @@
35490
35491 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
35492
35493 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
35494 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
35495 {
35496 - unsigned long val = atomic_long_read(v);
35497 + unsigned long val = atomic_long_read_unchecked(v);
35498
35499 seq_printf(s, "%16lu %s\n", val, id);
35500 }
35501 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
35502
35503 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
35504 for (op = 0; op < mcsop_last; op++) {
35505 - count = atomic_long_read(&mcs_op_statistics[op].count);
35506 - total = atomic_long_read(&mcs_op_statistics[op].total);
35507 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
35508 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
35509 max = mcs_op_statistics[op].max;
35510 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
35511 count ? total / count : 0, max);
35512 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
35513 index 5c3ce24..4915ccb 100644
35514 --- a/drivers/misc/sgi-gru/grutables.h
35515 +++ b/drivers/misc/sgi-gru/grutables.h
35516 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
35517 * GRU statistics.
35518 */
35519 struct gru_stats_s {
35520 - atomic_long_t vdata_alloc;
35521 - atomic_long_t vdata_free;
35522 - atomic_long_t gts_alloc;
35523 - atomic_long_t gts_free;
35524 - atomic_long_t gms_alloc;
35525 - atomic_long_t gms_free;
35526 - atomic_long_t gts_double_allocate;
35527 - atomic_long_t assign_context;
35528 - atomic_long_t assign_context_failed;
35529 - atomic_long_t free_context;
35530 - atomic_long_t load_user_context;
35531 - atomic_long_t load_kernel_context;
35532 - atomic_long_t lock_kernel_context;
35533 - atomic_long_t unlock_kernel_context;
35534 - atomic_long_t steal_user_context;
35535 - atomic_long_t steal_kernel_context;
35536 - atomic_long_t steal_context_failed;
35537 - atomic_long_t nopfn;
35538 - atomic_long_t asid_new;
35539 - atomic_long_t asid_next;
35540 - atomic_long_t asid_wrap;
35541 - atomic_long_t asid_reuse;
35542 - atomic_long_t intr;
35543 - atomic_long_t intr_cbr;
35544 - atomic_long_t intr_tfh;
35545 - atomic_long_t intr_spurious;
35546 - atomic_long_t intr_mm_lock_failed;
35547 - atomic_long_t call_os;
35548 - atomic_long_t call_os_wait_queue;
35549 - atomic_long_t user_flush_tlb;
35550 - atomic_long_t user_unload_context;
35551 - atomic_long_t user_exception;
35552 - atomic_long_t set_context_option;
35553 - atomic_long_t check_context_retarget_intr;
35554 - atomic_long_t check_context_unload;
35555 - atomic_long_t tlb_dropin;
35556 - atomic_long_t tlb_preload_page;
35557 - atomic_long_t tlb_dropin_fail_no_asid;
35558 - atomic_long_t tlb_dropin_fail_upm;
35559 - atomic_long_t tlb_dropin_fail_invalid;
35560 - atomic_long_t tlb_dropin_fail_range_active;
35561 - atomic_long_t tlb_dropin_fail_idle;
35562 - atomic_long_t tlb_dropin_fail_fmm;
35563 - atomic_long_t tlb_dropin_fail_no_exception;
35564 - atomic_long_t tfh_stale_on_fault;
35565 - atomic_long_t mmu_invalidate_range;
35566 - atomic_long_t mmu_invalidate_page;
35567 - atomic_long_t flush_tlb;
35568 - atomic_long_t flush_tlb_gru;
35569 - atomic_long_t flush_tlb_gru_tgh;
35570 - atomic_long_t flush_tlb_gru_zero_asid;
35571 + atomic_long_unchecked_t vdata_alloc;
35572 + atomic_long_unchecked_t vdata_free;
35573 + atomic_long_unchecked_t gts_alloc;
35574 + atomic_long_unchecked_t gts_free;
35575 + atomic_long_unchecked_t gms_alloc;
35576 + atomic_long_unchecked_t gms_free;
35577 + atomic_long_unchecked_t gts_double_allocate;
35578 + atomic_long_unchecked_t assign_context;
35579 + atomic_long_unchecked_t assign_context_failed;
35580 + atomic_long_unchecked_t free_context;
35581 + atomic_long_unchecked_t load_user_context;
35582 + atomic_long_unchecked_t load_kernel_context;
35583 + atomic_long_unchecked_t lock_kernel_context;
35584 + atomic_long_unchecked_t unlock_kernel_context;
35585 + atomic_long_unchecked_t steal_user_context;
35586 + atomic_long_unchecked_t steal_kernel_context;
35587 + atomic_long_unchecked_t steal_context_failed;
35588 + atomic_long_unchecked_t nopfn;
35589 + atomic_long_unchecked_t asid_new;
35590 + atomic_long_unchecked_t asid_next;
35591 + atomic_long_unchecked_t asid_wrap;
35592 + atomic_long_unchecked_t asid_reuse;
35593 + atomic_long_unchecked_t intr;
35594 + atomic_long_unchecked_t intr_cbr;
35595 + atomic_long_unchecked_t intr_tfh;
35596 + atomic_long_unchecked_t intr_spurious;
35597 + atomic_long_unchecked_t intr_mm_lock_failed;
35598 + atomic_long_unchecked_t call_os;
35599 + atomic_long_unchecked_t call_os_wait_queue;
35600 + atomic_long_unchecked_t user_flush_tlb;
35601 + atomic_long_unchecked_t user_unload_context;
35602 + atomic_long_unchecked_t user_exception;
35603 + atomic_long_unchecked_t set_context_option;
35604 + atomic_long_unchecked_t check_context_retarget_intr;
35605 + atomic_long_unchecked_t check_context_unload;
35606 + atomic_long_unchecked_t tlb_dropin;
35607 + atomic_long_unchecked_t tlb_preload_page;
35608 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
35609 + atomic_long_unchecked_t tlb_dropin_fail_upm;
35610 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
35611 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
35612 + atomic_long_unchecked_t tlb_dropin_fail_idle;
35613 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
35614 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
35615 + atomic_long_unchecked_t tfh_stale_on_fault;
35616 + atomic_long_unchecked_t mmu_invalidate_range;
35617 + atomic_long_unchecked_t mmu_invalidate_page;
35618 + atomic_long_unchecked_t flush_tlb;
35619 + atomic_long_unchecked_t flush_tlb_gru;
35620 + atomic_long_unchecked_t flush_tlb_gru_tgh;
35621 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
35622
35623 - atomic_long_t copy_gpa;
35624 - atomic_long_t read_gpa;
35625 + atomic_long_unchecked_t copy_gpa;
35626 + atomic_long_unchecked_t read_gpa;
35627
35628 - atomic_long_t mesq_receive;
35629 - atomic_long_t mesq_receive_none;
35630 - atomic_long_t mesq_send;
35631 - atomic_long_t mesq_send_failed;
35632 - atomic_long_t mesq_noop;
35633 - atomic_long_t mesq_send_unexpected_error;
35634 - atomic_long_t mesq_send_lb_overflow;
35635 - atomic_long_t mesq_send_qlimit_reached;
35636 - atomic_long_t mesq_send_amo_nacked;
35637 - atomic_long_t mesq_send_put_nacked;
35638 - atomic_long_t mesq_page_overflow;
35639 - atomic_long_t mesq_qf_locked;
35640 - atomic_long_t mesq_qf_noop_not_full;
35641 - atomic_long_t mesq_qf_switch_head_failed;
35642 - atomic_long_t mesq_qf_unexpected_error;
35643 - atomic_long_t mesq_noop_unexpected_error;
35644 - atomic_long_t mesq_noop_lb_overflow;
35645 - atomic_long_t mesq_noop_qlimit_reached;
35646 - atomic_long_t mesq_noop_amo_nacked;
35647 - atomic_long_t mesq_noop_put_nacked;
35648 - atomic_long_t mesq_noop_page_overflow;
35649 + atomic_long_unchecked_t mesq_receive;
35650 + atomic_long_unchecked_t mesq_receive_none;
35651 + atomic_long_unchecked_t mesq_send;
35652 + atomic_long_unchecked_t mesq_send_failed;
35653 + atomic_long_unchecked_t mesq_noop;
35654 + atomic_long_unchecked_t mesq_send_unexpected_error;
35655 + atomic_long_unchecked_t mesq_send_lb_overflow;
35656 + atomic_long_unchecked_t mesq_send_qlimit_reached;
35657 + atomic_long_unchecked_t mesq_send_amo_nacked;
35658 + atomic_long_unchecked_t mesq_send_put_nacked;
35659 + atomic_long_unchecked_t mesq_page_overflow;
35660 + atomic_long_unchecked_t mesq_qf_locked;
35661 + atomic_long_unchecked_t mesq_qf_noop_not_full;
35662 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
35663 + atomic_long_unchecked_t mesq_qf_unexpected_error;
35664 + atomic_long_unchecked_t mesq_noop_unexpected_error;
35665 + atomic_long_unchecked_t mesq_noop_lb_overflow;
35666 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
35667 + atomic_long_unchecked_t mesq_noop_amo_nacked;
35668 + atomic_long_unchecked_t mesq_noop_put_nacked;
35669 + atomic_long_unchecked_t mesq_noop_page_overflow;
35670
35671 };
35672
35673 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
35674 tghop_invalidate, mcsop_last};
35675
35676 struct mcs_op_statistic {
35677 - atomic_long_t count;
35678 - atomic_long_t total;
35679 + atomic_long_unchecked_t count;
35680 + atomic_long_unchecked_t total;
35681 unsigned long max;
35682 };
35683
35684 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
35685
35686 #define STAT(id) do { \
35687 if (gru_options & OPT_STATS) \
35688 - atomic_long_inc(&gru_stats.id); \
35689 + atomic_long_inc_unchecked(&gru_stats.id); \
35690 } while (0)
35691
35692 #ifdef CONFIG_SGI_GRU_DEBUG
35693 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
35694 index c862cd4..0d176fe 100644
35695 --- a/drivers/misc/sgi-xp/xp.h
35696 +++ b/drivers/misc/sgi-xp/xp.h
35697 @@ -288,7 +288,7 @@ struct xpc_interface {
35698 xpc_notify_func, void *);
35699 void (*received) (short, int, void *);
35700 enum xp_retval (*partid_to_nasids) (short, void *);
35701 -};
35702 +} __no_const;
35703
35704 extern struct xpc_interface xpc_interface;
35705
35706 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
35707 index b94d5f7..7f494c5 100644
35708 --- a/drivers/misc/sgi-xp/xpc.h
35709 +++ b/drivers/misc/sgi-xp/xpc.h
35710 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
35711 void (*received_payload) (struct xpc_channel *, void *);
35712 void (*notify_senders_of_disconnect) (struct xpc_channel *);
35713 };
35714 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
35715
35716 /* struct xpc_partition act_state values (for XPC HB) */
35717
35718 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
35719 /* found in xpc_main.c */
35720 extern struct device *xpc_part;
35721 extern struct device *xpc_chan;
35722 -extern struct xpc_arch_operations xpc_arch_ops;
35723 +extern xpc_arch_operations_no_const xpc_arch_ops;
35724 extern int xpc_disengage_timelimit;
35725 extern int xpc_disengage_timedout;
35726 extern int xpc_activate_IRQ_rcvd;
35727 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
35728 index 8d082b4..aa749ae 100644
35729 --- a/drivers/misc/sgi-xp/xpc_main.c
35730 +++ b/drivers/misc/sgi-xp/xpc_main.c
35731 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
35732 .notifier_call = xpc_system_die,
35733 };
35734
35735 -struct xpc_arch_operations xpc_arch_ops;
35736 +xpc_arch_operations_no_const xpc_arch_ops;
35737
35738 /*
35739 * Timer function to enforce the timelimit on the partition disengage.
35740 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
35741 index a0e1720..ee63d0b 100644
35742 --- a/drivers/mmc/core/mmc_ops.c
35743 +++ b/drivers/mmc/core/mmc_ops.c
35744 @@ -245,7 +245,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
35745 void *data_buf;
35746 int is_on_stack;
35747
35748 - is_on_stack = object_is_on_stack(buf);
35749 + is_on_stack = object_starts_on_stack(buf);
35750 if (is_on_stack) {
35751 /*
35752 * dma onto stack is unsafe/nonportable, but callers to this
35753 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
35754 index a4eb8b5..8c0628f 100644
35755 --- a/drivers/mtd/devices/doc2000.c
35756 +++ b/drivers/mtd/devices/doc2000.c
35757 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
35758
35759 /* The ECC will not be calculated correctly if less than 512 is written */
35760 /* DBB-
35761 - if (len != 0x200 && eccbuf)
35762 + if (len != 0x200)
35763 printk(KERN_WARNING
35764 "ECC needs a full sector write (adr: %lx size %lx)\n",
35765 (long) to, (long) len);
35766 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
35767 index e706a23..b3d262f 100644
35768 --- a/drivers/mtd/nand/denali.c
35769 +++ b/drivers/mtd/nand/denali.c
35770 @@ -26,6 +26,7 @@
35771 #include <linux/pci.h>
35772 #include <linux/mtd/mtd.h>
35773 #include <linux/module.h>
35774 +#include <linux/slab.h>
35775
35776 #include "denali.h"
35777
35778 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
35779 index 51b9d6a..52af9a7 100644
35780 --- a/drivers/mtd/nftlmount.c
35781 +++ b/drivers/mtd/nftlmount.c
35782 @@ -24,6 +24,7 @@
35783 #include <asm/errno.h>
35784 #include <linux/delay.h>
35785 #include <linux/slab.h>
35786 +#include <linux/sched.h>
35787 #include <linux/mtd/mtd.h>
35788 #include <linux/mtd/nand.h>
35789 #include <linux/mtd/nftl.h>
35790 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35791 index acf2fe4..25cf8fd 100644
35792 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35793 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
35794 @@ -486,7 +486,7 @@ struct bnx2x_rx_mode_obj {
35795
35796 int (*wait_comp)(struct bnx2x *bp,
35797 struct bnx2x_rx_mode_ramrod_params *p);
35798 -};
35799 +} __no_const;
35800
35801 /********************** Set multicast group ***********************************/
35802
35803 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
35804 index d9308c32..d87b824 100644
35805 --- a/drivers/net/ethernet/broadcom/tg3.h
35806 +++ b/drivers/net/ethernet/broadcom/tg3.h
35807 @@ -140,6 +140,7 @@
35808 #define CHIPREV_ID_5750_A0 0x4000
35809 #define CHIPREV_ID_5750_A1 0x4001
35810 #define CHIPREV_ID_5750_A3 0x4003
35811 +#define CHIPREV_ID_5750_C1 0x4201
35812 #define CHIPREV_ID_5750_C2 0x4202
35813 #define CHIPREV_ID_5752_A0_HW 0x5000
35814 #define CHIPREV_ID_5752_A0 0x6000
35815 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35816 index 8cffcdf..aadf043 100644
35817 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35818 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
35819 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
35820 */
35821 struct l2t_skb_cb {
35822 arp_failure_handler_func arp_failure_handler;
35823 -};
35824 +} __no_const;
35825
35826 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
35827
35828 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
35829 index f879e92..726f20f 100644
35830 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
35831 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
35832 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35833 for (i=0; i<ETH_ALEN; i++) {
35834 tmp.addr[i] = dev->dev_addr[i];
35835 }
35836 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35837 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35838 break;
35839
35840 case DE4X5_SET_HWADDR: /* Set the hardware address */
35841 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35842 spin_lock_irqsave(&lp->lock, flags);
35843 memcpy(&statbuf, &lp->pktStats, ioc->len);
35844 spin_unlock_irqrestore(&lp->lock, flags);
35845 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
35846 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35847 return -EFAULT;
35848 break;
35849 }
35850 diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
35851 index 75d45f8..3d9c55b 100644
35852 --- a/drivers/net/ethernet/dec/tulip/uli526x.c
35853 +++ b/drivers/net/ethernet/dec/tulip/uli526x.c
35854 @@ -129,7 +129,7 @@ struct uli526x_board_info {
35855 struct uli_phy_ops {
35856 void (*write)(struct uli526x_board_info *, u8, u8, u16);
35857 u16 (*read)(struct uli526x_board_info *, u8, u8);
35858 - } phy;
35859 + } __no_const phy;
35860 struct net_device *next_dev; /* next device */
35861 struct pci_dev *pdev; /* PCI device */
35862 spinlock_t lock;
35863 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
35864 index d1b6cc5..cde0d97 100644
35865 --- a/drivers/net/ethernet/emulex/benet/be_main.c
35866 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
35867 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
35868
35869 if (wrapped)
35870 newacc += 65536;
35871 - ACCESS_ONCE(*acc) = newacc;
35872 + ACCESS_ONCE_RW(*acc) = newacc;
35873 }
35874
35875 void be_parse_stats(struct be_adapter *adapter)
35876 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
35877 index 74d749e..eefb1bd 100644
35878 --- a/drivers/net/ethernet/faraday/ftgmac100.c
35879 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
35880 @@ -31,6 +31,8 @@
35881 #include <linux/netdevice.h>
35882 #include <linux/phy.h>
35883 #include <linux/platform_device.h>
35884 +#include <linux/interrupt.h>
35885 +#include <linux/irqreturn.h>
35886 #include <net/ip.h>
35887
35888 #include "ftgmac100.h"
35889 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
35890 index b901a01..1ff32ee 100644
35891 --- a/drivers/net/ethernet/faraday/ftmac100.c
35892 +++ b/drivers/net/ethernet/faraday/ftmac100.c
35893 @@ -31,6 +31,8 @@
35894 #include <linux/module.h>
35895 #include <linux/netdevice.h>
35896 #include <linux/platform_device.h>
35897 +#include <linux/interrupt.h>
35898 +#include <linux/irqreturn.h>
35899
35900 #include "ftmac100.h"
35901
35902 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
35903 index d37bfd9..5e13032 100644
35904 --- a/drivers/net/ethernet/intel/e1000e/hw.h
35905 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
35906 @@ -799,6 +799,7 @@ struct e1000_mac_operations {
35907 void (*rar_set)(struct e1000_hw *, u8 *, u32);
35908 s32 (*read_mac_addr)(struct e1000_hw *);
35909 };
35910 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35911
35912 /*
35913 * When to use various PHY register access functions:
35914 @@ -839,6 +840,7 @@ struct e1000_phy_operations {
35915 void (*power_up)(struct e1000_hw *);
35916 void (*power_down)(struct e1000_hw *);
35917 };
35918 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35919
35920 /* Function pointers for the NVM. */
35921 struct e1000_nvm_operations {
35922 @@ -851,9 +853,10 @@ struct e1000_nvm_operations {
35923 s32 (*validate)(struct e1000_hw *);
35924 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
35925 };
35926 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35927
35928 struct e1000_mac_info {
35929 - struct e1000_mac_operations ops;
35930 + e1000_mac_operations_no_const ops;
35931 u8 addr[ETH_ALEN];
35932 u8 perm_addr[ETH_ALEN];
35933
35934 @@ -894,7 +897,7 @@ struct e1000_mac_info {
35935 };
35936
35937 struct e1000_phy_info {
35938 - struct e1000_phy_operations ops;
35939 + e1000_phy_operations_no_const ops;
35940
35941 enum e1000_phy_type type;
35942
35943 @@ -928,7 +931,7 @@ struct e1000_phy_info {
35944 };
35945
35946 struct e1000_nvm_info {
35947 - struct e1000_nvm_operations ops;
35948 + e1000_nvm_operations_no_const ops;
35949
35950 enum e1000_nvm_type type;
35951 enum e1000_nvm_override override;
35952 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
35953 index c2a51dc..c2bd262 100644
35954 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
35955 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
35956 @@ -327,6 +327,7 @@ struct e1000_mac_operations {
35957 void (*release_swfw_sync)(struct e1000_hw *, u16);
35958
35959 };
35960 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35961
35962 struct e1000_phy_operations {
35963 s32 (*acquire)(struct e1000_hw *);
35964 @@ -343,6 +344,7 @@ struct e1000_phy_operations {
35965 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
35966 s32 (*write_reg)(struct e1000_hw *, u32, u16);
35967 };
35968 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35969
35970 struct e1000_nvm_operations {
35971 s32 (*acquire)(struct e1000_hw *);
35972 @@ -353,6 +355,7 @@ struct e1000_nvm_operations {
35973 s32 (*validate)(struct e1000_hw *);
35974 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
35975 };
35976 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35977
35978 struct e1000_info {
35979 s32 (*get_invariants)(struct e1000_hw *);
35980 @@ -364,7 +367,7 @@ struct e1000_info {
35981 extern const struct e1000_info e1000_82575_info;
35982
35983 struct e1000_mac_info {
35984 - struct e1000_mac_operations ops;
35985 + e1000_mac_operations_no_const ops;
35986
35987 u8 addr[6];
35988 u8 perm_addr[6];
35989 @@ -402,7 +405,7 @@ struct e1000_mac_info {
35990 };
35991
35992 struct e1000_phy_info {
35993 - struct e1000_phy_operations ops;
35994 + e1000_phy_operations_no_const ops;
35995
35996 enum e1000_phy_type type;
35997
35998 @@ -437,7 +440,7 @@ struct e1000_phy_info {
35999 };
36000
36001 struct e1000_nvm_info {
36002 - struct e1000_nvm_operations ops;
36003 + e1000_nvm_operations_no_const ops;
36004 enum e1000_nvm_type type;
36005 enum e1000_nvm_override override;
36006
36007 @@ -482,6 +485,7 @@ struct e1000_mbx_operations {
36008 s32 (*check_for_ack)(struct e1000_hw *, u16);
36009 s32 (*check_for_rst)(struct e1000_hw *, u16);
36010 };
36011 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
36012
36013 struct e1000_mbx_stats {
36014 u32 msgs_tx;
36015 @@ -493,7 +497,7 @@ struct e1000_mbx_stats {
36016 };
36017
36018 struct e1000_mbx_info {
36019 - struct e1000_mbx_operations ops;
36020 + e1000_mbx_operations_no_const ops;
36021 struct e1000_mbx_stats stats;
36022 u32 timeout;
36023 u32 usec_delay;
36024 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
36025 index 57db3c6..aa825fc 100644
36026 --- a/drivers/net/ethernet/intel/igbvf/vf.h
36027 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
36028 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
36029 s32 (*read_mac_addr)(struct e1000_hw *);
36030 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
36031 };
36032 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
36033
36034 struct e1000_mac_info {
36035 - struct e1000_mac_operations ops;
36036 + e1000_mac_operations_no_const ops;
36037 u8 addr[6];
36038 u8 perm_addr[6];
36039
36040 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
36041 s32 (*check_for_ack)(struct e1000_hw *);
36042 s32 (*check_for_rst)(struct e1000_hw *);
36043 };
36044 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
36045
36046 struct e1000_mbx_stats {
36047 u32 msgs_tx;
36048 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
36049 };
36050
36051 struct e1000_mbx_info {
36052 - struct e1000_mbx_operations ops;
36053 + e1000_mbx_operations_no_const ops;
36054 struct e1000_mbx_stats stats;
36055 u32 timeout;
36056 u32 usec_delay;
36057 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36058 index d929131..aed108f 100644
36059 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36060 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36061 @@ -865,7 +865,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
36062 /* store the new cycle speed */
36063 adapter->cycle_speed = cycle_speed;
36064
36065 - ACCESS_ONCE(adapter->base_incval) = incval;
36066 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
36067 smp_mb();
36068
36069 /* grab the ptp lock */
36070 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
36071 index 0722f33..771758a 100644
36072 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
36073 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
36074 @@ -2800,6 +2800,7 @@ struct ixgbe_eeprom_operations {
36075 s32 (*update_checksum)(struct ixgbe_hw *);
36076 u16 (*calc_checksum)(struct ixgbe_hw *);
36077 };
36078 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
36079
36080 struct ixgbe_mac_operations {
36081 s32 (*init_hw)(struct ixgbe_hw *);
36082 @@ -2866,6 +2867,7 @@ struct ixgbe_mac_operations {
36083 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
36084 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
36085 };
36086 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
36087
36088 struct ixgbe_phy_operations {
36089 s32 (*identify)(struct ixgbe_hw *);
36090 @@ -2885,9 +2887,10 @@ struct ixgbe_phy_operations {
36091 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
36092 s32 (*check_overtemp)(struct ixgbe_hw *);
36093 };
36094 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
36095
36096 struct ixgbe_eeprom_info {
36097 - struct ixgbe_eeprom_operations ops;
36098 + ixgbe_eeprom_operations_no_const ops;
36099 enum ixgbe_eeprom_type type;
36100 u32 semaphore_delay;
36101 u16 word_size;
36102 @@ -2897,7 +2900,7 @@ struct ixgbe_eeprom_info {
36103
36104 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
36105 struct ixgbe_mac_info {
36106 - struct ixgbe_mac_operations ops;
36107 + ixgbe_mac_operations_no_const ops;
36108 enum ixgbe_mac_type type;
36109 u8 addr[ETH_ALEN];
36110 u8 perm_addr[ETH_ALEN];
36111 @@ -2927,7 +2930,7 @@ struct ixgbe_mac_info {
36112 };
36113
36114 struct ixgbe_phy_info {
36115 - struct ixgbe_phy_operations ops;
36116 + ixgbe_phy_operations_no_const ops;
36117 struct mdio_if_info mdio;
36118 enum ixgbe_phy_type type;
36119 u32 id;
36120 @@ -2955,6 +2958,7 @@ struct ixgbe_mbx_operations {
36121 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
36122 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
36123 };
36124 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
36125
36126 struct ixgbe_mbx_stats {
36127 u32 msgs_tx;
36128 @@ -2966,7 +2970,7 @@ struct ixgbe_mbx_stats {
36129 };
36130
36131 struct ixgbe_mbx_info {
36132 - struct ixgbe_mbx_operations ops;
36133 + ixgbe_mbx_operations_no_const ops;
36134 struct ixgbe_mbx_stats stats;
36135 u32 timeout;
36136 u32 usec_delay;
36137 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
36138 index 47f11a5..c817d97 100644
36139 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
36140 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
36141 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
36142 s32 (*clear_vfta)(struct ixgbe_hw *);
36143 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
36144 };
36145 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
36146
36147 enum ixgbe_mac_type {
36148 ixgbe_mac_unknown = 0,
36149 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
36150 };
36151
36152 struct ixgbe_mac_info {
36153 - struct ixgbe_mac_operations ops;
36154 + ixgbe_mac_operations_no_const ops;
36155 u8 addr[6];
36156 u8 perm_addr[6];
36157
36158 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
36159 s32 (*check_for_ack)(struct ixgbe_hw *);
36160 s32 (*check_for_rst)(struct ixgbe_hw *);
36161 };
36162 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
36163
36164 struct ixgbe_mbx_stats {
36165 u32 msgs_tx;
36166 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
36167 };
36168
36169 struct ixgbe_mbx_info {
36170 - struct ixgbe_mbx_operations ops;
36171 + ixgbe_mbx_operations_no_const ops;
36172 struct ixgbe_mbx_stats stats;
36173 u32 timeout;
36174 u32 udelay;
36175 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
36176 index 9e0c1ee..8471f77 100644
36177 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
36178 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
36179 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
36180 void (*link_down)(struct __vxge_hw_device *devh);
36181 void (*crit_err)(struct __vxge_hw_device *devh,
36182 enum vxge_hw_event type, u64 ext_data);
36183 -};
36184 +} __no_const;
36185
36186 /*
36187 * struct __vxge_hw_blockpool_entry - Block private data structure
36188 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
36189 index 4a518a3..936b334 100644
36190 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
36191 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
36192 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
36193 struct vxge_hw_mempool_dma *dma_object,
36194 u32 index,
36195 u32 is_last);
36196 -};
36197 +} __no_const;
36198
36199 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
36200 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
36201 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
36202 index 927aa33..a6c2518 100644
36203 --- a/drivers/net/ethernet/realtek/r8169.c
36204 +++ b/drivers/net/ethernet/realtek/r8169.c
36205 @@ -747,22 +747,22 @@ struct rtl8169_private {
36206 struct mdio_ops {
36207 void (*write)(struct rtl8169_private *, int, int);
36208 int (*read)(struct rtl8169_private *, int);
36209 - } mdio_ops;
36210 + } __no_const mdio_ops;
36211
36212 struct pll_power_ops {
36213 void (*down)(struct rtl8169_private *);
36214 void (*up)(struct rtl8169_private *);
36215 - } pll_power_ops;
36216 + } __no_const pll_power_ops;
36217
36218 struct jumbo_ops {
36219 void (*enable)(struct rtl8169_private *);
36220 void (*disable)(struct rtl8169_private *);
36221 - } jumbo_ops;
36222 + } __no_const jumbo_ops;
36223
36224 struct csi_ops {
36225 void (*write)(struct rtl8169_private *, int, int);
36226 u32 (*read)(struct rtl8169_private *, int);
36227 - } csi_ops;
36228 + } __no_const csi_ops;
36229
36230 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
36231 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
36232 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
36233 index 0767043f..08c2553 100644
36234 --- a/drivers/net/ethernet/sfc/ptp.c
36235 +++ b/drivers/net/ethernet/sfc/ptp.c
36236 @@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
36237 (u32)((u64)ptp->start.dma_addr >> 32));
36238
36239 /* Clear flag that signals MC ready */
36240 - ACCESS_ONCE(*start) = 0;
36241 + ACCESS_ONCE_RW(*start) = 0;
36242 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
36243 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
36244
36245 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36246 index 0c74a70..3bc6f68 100644
36247 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36248 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36249 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
36250
36251 writel(value, ioaddr + MMC_CNTRL);
36252
36253 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36254 - MMC_CNTRL, value);
36255 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36256 +// MMC_CNTRL, value);
36257 }
36258
36259 /* To mask all all interrupts.*/
36260 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
36261 index 5fd6f46..ee1f265 100644
36262 --- a/drivers/net/hyperv/hyperv_net.h
36263 +++ b/drivers/net/hyperv/hyperv_net.h
36264 @@ -101,7 +101,7 @@ struct rndis_device {
36265
36266 enum rndis_device_state state;
36267 bool link_state;
36268 - atomic_t new_req_id;
36269 + atomic_unchecked_t new_req_id;
36270
36271 spinlock_t request_lock;
36272 struct list_head req_list;
36273 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
36274 index 928148c..d83298e 100644
36275 --- a/drivers/net/hyperv/rndis_filter.c
36276 +++ b/drivers/net/hyperv/rndis_filter.c
36277 @@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36278 * template
36279 */
36280 set = &rndis_msg->msg.set_req;
36281 - set->req_id = atomic_inc_return(&dev->new_req_id);
36282 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36283
36284 /* Add to the request list */
36285 spin_lock_irqsave(&dev->request_lock, flags);
36286 @@ -760,7 +760,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36287
36288 /* Setup the rndis set */
36289 halt = &request->request_msg.msg.halt_req;
36290 - halt->req_id = atomic_inc_return(&dev->new_req_id);
36291 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36292
36293 /* Ignore return since this msg is optional. */
36294 rndis_filter_send_request(dev, request);
36295 diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
36296 index 7d39add..037e1da 100644
36297 --- a/drivers/net/ieee802154/fakehard.c
36298 +++ b/drivers/net/ieee802154/fakehard.c
36299 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
36300 phy->transmit_power = 0xbf;
36301
36302 dev->netdev_ops = &fake_ops;
36303 - dev->ml_priv = &fake_mlme;
36304 + dev->ml_priv = (void *)&fake_mlme;
36305
36306 priv = netdev_priv(dev);
36307 priv->phy = phy;
36308 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
36309 index 0f0f9ce..0ca5819 100644
36310 --- a/drivers/net/macvtap.c
36311 +++ b/drivers/net/macvtap.c
36312 @@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
36313 return NOTIFY_DONE;
36314 }
36315
36316 -static struct notifier_block macvtap_notifier_block __read_mostly = {
36317 +static struct notifier_block macvtap_notifier_block = {
36318 .notifier_call = macvtap_device_event,
36319 };
36320
36321 diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
36322 index daec9b0..6428fcb 100644
36323 --- a/drivers/net/phy/mdio-bitbang.c
36324 +++ b/drivers/net/phy/mdio-bitbang.c
36325 @@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
36326 struct mdiobb_ctrl *ctrl = bus->priv;
36327
36328 module_put(ctrl->ops->owner);
36329 + mdiobus_unregister(bus);
36330 mdiobus_free(bus);
36331 }
36332 EXPORT_SYMBOL(free_mdio_bitbang);
36333 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
36334 index eb3f5ce..d773730 100644
36335 --- a/drivers/net/ppp/ppp_generic.c
36336 +++ b/drivers/net/ppp/ppp_generic.c
36337 @@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36338 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
36339 struct ppp_stats stats;
36340 struct ppp_comp_stats cstats;
36341 - char *vers;
36342
36343 switch (cmd) {
36344 case SIOCGPPPSTATS:
36345 @@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36346 break;
36347
36348 case SIOCGPPPVER:
36349 - vers = PPP_VERSION;
36350 - if (copy_to_user(addr, vers, strlen(vers) + 1))
36351 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
36352 break;
36353 err = 0;
36354 break;
36355 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
36356 index ad86660..9fd0884 100644
36357 --- a/drivers/net/team/team.c
36358 +++ b/drivers/net/team/team.c
36359 @@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
36360 return NOTIFY_DONE;
36361 }
36362
36363 -static struct notifier_block team_notifier_block __read_mostly = {
36364 +static struct notifier_block team_notifier_block = {
36365 .notifier_call = team_device_event,
36366 };
36367
36368 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
36369 index 0873cdc..ddb178e 100644
36370 --- a/drivers/net/tun.c
36371 +++ b/drivers/net/tun.c
36372 @@ -1374,7 +1374,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
36373 }
36374
36375 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36376 - unsigned long arg, int ifreq_len)
36377 + unsigned long arg, size_t ifreq_len)
36378 {
36379 struct tun_file *tfile = file->private_data;
36380 struct tun_struct *tun;
36381 @@ -1387,6 +1387,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36382 int vnet_hdr_sz;
36383 int ret;
36384
36385 + if (ifreq_len > sizeof ifr)
36386 + return -EFAULT;
36387 +
36388 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
36389 if (copy_from_user(&ifr, argp, ifreq_len))
36390 return -EFAULT;
36391 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
36392 index 605a4ba..a883dd1 100644
36393 --- a/drivers/net/usb/hso.c
36394 +++ b/drivers/net/usb/hso.c
36395 @@ -71,7 +71,7 @@
36396 #include <asm/byteorder.h>
36397 #include <linux/serial_core.h>
36398 #include <linux/serial.h>
36399 -
36400 +#include <asm/local.h>
36401
36402 #define MOD_AUTHOR "Option Wireless"
36403 #define MOD_DESCRIPTION "USB High Speed Option driver"
36404 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
36405 struct urb *urb;
36406
36407 urb = serial->rx_urb[0];
36408 - if (serial->port.count > 0) {
36409 + if (atomic_read(&serial->port.count) > 0) {
36410 count = put_rxbuf_data(urb, serial);
36411 if (count == -1)
36412 return;
36413 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
36414 DUMP1(urb->transfer_buffer, urb->actual_length);
36415
36416 /* Anyone listening? */
36417 - if (serial->port.count == 0)
36418 + if (atomic_read(&serial->port.count) == 0)
36419 return;
36420
36421 if (status == 0) {
36422 @@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36423 tty_port_tty_set(&serial->port, tty);
36424
36425 /* check for port already opened, if not set the termios */
36426 - serial->port.count++;
36427 - if (serial->port.count == 1) {
36428 + if (atomic_inc_return(&serial->port.count) == 1) {
36429 serial->rx_state = RX_IDLE;
36430 /* Force default termio settings */
36431 _hso_serial_set_termios(tty, NULL);
36432 @@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36433 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
36434 if (result) {
36435 hso_stop_serial_device(serial->parent);
36436 - serial->port.count--;
36437 + atomic_dec(&serial->port.count);
36438 kref_put(&serial->parent->ref, hso_serial_ref_free);
36439 }
36440 } else {
36441 @@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
36442
36443 /* reset the rts and dtr */
36444 /* do the actual close */
36445 - serial->port.count--;
36446 + atomic_dec(&serial->port.count);
36447
36448 - if (serial->port.count <= 0) {
36449 - serial->port.count = 0;
36450 + if (atomic_read(&serial->port.count) <= 0) {
36451 + atomic_set(&serial->port.count, 0);
36452 tty_port_tty_set(&serial->port, NULL);
36453 if (!usb_gone)
36454 hso_stop_serial_device(serial->parent);
36455 @@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
36456
36457 /* the actual setup */
36458 spin_lock_irqsave(&serial->serial_lock, flags);
36459 - if (serial->port.count)
36460 + if (atomic_read(&serial->port.count))
36461 _hso_serial_set_termios(tty, old);
36462 else
36463 tty->termios = *old;
36464 @@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
36465 D1("Pending read interrupt on port %d\n", i);
36466 spin_lock(&serial->serial_lock);
36467 if (serial->rx_state == RX_IDLE &&
36468 - serial->port.count > 0) {
36469 + atomic_read(&serial->port.count) > 0) {
36470 /* Setup and send a ctrl req read on
36471 * port i */
36472 if (!serial->rx_urb_filled[0]) {
36473 @@ -3078,7 +3077,7 @@ static int hso_resume(struct usb_interface *iface)
36474 /* Start all serial ports */
36475 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
36476 if (serial_table[i] && (serial_table[i]->interface == iface)) {
36477 - if (dev2ser(serial_table[i])->port.count) {
36478 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
36479 result =
36480 hso_start_serial_device(serial_table[i], GFP_NOIO);
36481 hso_kick_transmit(dev2ser(serial_table[i]));
36482 diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
36483 index 79c6505..3d957fd 100644
36484 --- a/drivers/net/wimax/i2400m/i2400m.h
36485 +++ b/drivers/net/wimax/i2400m/i2400m.h
36486 @@ -643,7 +643,7 @@ struct i2400m {
36487 struct i2400m_fw *fw_cached; /* protected by rx_lock */
36488 struct i2400m_barker_db *barker;
36489
36490 - struct notifier_block pm_notifier;
36491 + notifier_block_no_const pm_notifier;
36492
36493 /* counting bus reset retries in this boot */
36494 atomic_t bus_reset_retries;
36495 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
36496 index 4521342..9f0a994d 100644
36497 --- a/drivers/net/wireless/ath/ath.h
36498 +++ b/drivers/net/wireless/ath/ath.h
36499 @@ -119,6 +119,7 @@ struct ath_ops {
36500 void (*write_flush) (void *);
36501 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
36502 };
36503 +typedef struct ath_ops __no_const ath_ops_no_const;
36504
36505 struct ath_common;
36506 struct ath_bus_ops;
36507 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36508 index 8d78253..bebbb68 100644
36509 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36510 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36511 @@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36512 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
36513 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
36514
36515 - ACCESS_ONCE(ads->ds_link) = i->link;
36516 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
36517 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
36518 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
36519
36520 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
36521 ctl6 = SM(i->keytype, AR_EncrType);
36522 @@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36523
36524 if ((i->is_first || i->is_last) &&
36525 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
36526 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
36527 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
36528 | set11nTries(i->rates, 1)
36529 | set11nTries(i->rates, 2)
36530 | set11nTries(i->rates, 3)
36531 | (i->dur_update ? AR_DurUpdateEna : 0)
36532 | SM(0, AR_BurstDur);
36533
36534 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
36535 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
36536 | set11nRate(i->rates, 1)
36537 | set11nRate(i->rates, 2)
36538 | set11nRate(i->rates, 3);
36539 } else {
36540 - ACCESS_ONCE(ads->ds_ctl2) = 0;
36541 - ACCESS_ONCE(ads->ds_ctl3) = 0;
36542 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
36543 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
36544 }
36545
36546 if (!i->is_first) {
36547 - ACCESS_ONCE(ads->ds_ctl0) = 0;
36548 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36549 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36550 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
36551 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36552 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36553 return;
36554 }
36555
36556 @@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36557 break;
36558 }
36559
36560 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36561 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36562 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36563 | SM(i->txpower, AR_XmitPower)
36564 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36565 @@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36566 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
36567 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
36568
36569 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36570 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36571 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36572 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36573
36574 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
36575 return;
36576
36577 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36578 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36579 | set11nPktDurRTSCTS(i->rates, 1);
36580
36581 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36582 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36583 | set11nPktDurRTSCTS(i->rates, 3);
36584
36585 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36586 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36587 | set11nRateFlags(i->rates, 1)
36588 | set11nRateFlags(i->rates, 2)
36589 | set11nRateFlags(i->rates, 3)
36590 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36591 index 301bf72..3f5654f 100644
36592 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36593 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36594 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36595 (i->qcu << AR_TxQcuNum_S) | desc_len;
36596
36597 checksum += val;
36598 - ACCESS_ONCE(ads->info) = val;
36599 + ACCESS_ONCE_RW(ads->info) = val;
36600
36601 checksum += i->link;
36602 - ACCESS_ONCE(ads->link) = i->link;
36603 + ACCESS_ONCE_RW(ads->link) = i->link;
36604
36605 checksum += i->buf_addr[0];
36606 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
36607 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
36608 checksum += i->buf_addr[1];
36609 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
36610 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
36611 checksum += i->buf_addr[2];
36612 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
36613 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
36614 checksum += i->buf_addr[3];
36615 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
36616 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
36617
36618 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
36619 - ACCESS_ONCE(ads->ctl3) = val;
36620 + ACCESS_ONCE_RW(ads->ctl3) = val;
36621 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
36622 - ACCESS_ONCE(ads->ctl5) = val;
36623 + ACCESS_ONCE_RW(ads->ctl5) = val;
36624 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
36625 - ACCESS_ONCE(ads->ctl7) = val;
36626 + ACCESS_ONCE_RW(ads->ctl7) = val;
36627 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
36628 - ACCESS_ONCE(ads->ctl9) = val;
36629 + ACCESS_ONCE_RW(ads->ctl9) = val;
36630
36631 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
36632 - ACCESS_ONCE(ads->ctl10) = checksum;
36633 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
36634
36635 if (i->is_first || i->is_last) {
36636 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
36637 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
36638 | set11nTries(i->rates, 1)
36639 | set11nTries(i->rates, 2)
36640 | set11nTries(i->rates, 3)
36641 | (i->dur_update ? AR_DurUpdateEna : 0)
36642 | SM(0, AR_BurstDur);
36643
36644 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
36645 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
36646 | set11nRate(i->rates, 1)
36647 | set11nRate(i->rates, 2)
36648 | set11nRate(i->rates, 3);
36649 } else {
36650 - ACCESS_ONCE(ads->ctl13) = 0;
36651 - ACCESS_ONCE(ads->ctl14) = 0;
36652 + ACCESS_ONCE_RW(ads->ctl13) = 0;
36653 + ACCESS_ONCE_RW(ads->ctl14) = 0;
36654 }
36655
36656 ads->ctl20 = 0;
36657 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36658
36659 ctl17 = SM(i->keytype, AR_EncrType);
36660 if (!i->is_first) {
36661 - ACCESS_ONCE(ads->ctl11) = 0;
36662 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36663 - ACCESS_ONCE(ads->ctl15) = 0;
36664 - ACCESS_ONCE(ads->ctl16) = 0;
36665 - ACCESS_ONCE(ads->ctl17) = ctl17;
36666 - ACCESS_ONCE(ads->ctl18) = 0;
36667 - ACCESS_ONCE(ads->ctl19) = 0;
36668 + ACCESS_ONCE_RW(ads->ctl11) = 0;
36669 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36670 + ACCESS_ONCE_RW(ads->ctl15) = 0;
36671 + ACCESS_ONCE_RW(ads->ctl16) = 0;
36672 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36673 + ACCESS_ONCE_RW(ads->ctl18) = 0;
36674 + ACCESS_ONCE_RW(ads->ctl19) = 0;
36675 return;
36676 }
36677
36678 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36679 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36680 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36681 | SM(i->txpower, AR_XmitPower)
36682 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36683 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36684 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
36685 ctl12 |= SM(val, AR_PAPRDChainMask);
36686
36687 - ACCESS_ONCE(ads->ctl12) = ctl12;
36688 - ACCESS_ONCE(ads->ctl17) = ctl17;
36689 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
36690 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36691
36692 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36693 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36694 | set11nPktDurRTSCTS(i->rates, 1);
36695
36696 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36697 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36698 | set11nPktDurRTSCTS(i->rates, 3);
36699
36700 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
36701 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
36702 | set11nRateFlags(i->rates, 1)
36703 | set11nRateFlags(i->rates, 2)
36704 | set11nRateFlags(i->rates, 3)
36705 | SM(i->rtscts_rate, AR_RTSCTSRate);
36706
36707 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
36708 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
36709 }
36710
36711 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
36712 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
36713 index dbc1b7a..addccc0 100644
36714 --- a/drivers/net/wireless/ath/ath9k/hw.h
36715 +++ b/drivers/net/wireless/ath/ath9k/hw.h
36716 @@ -657,7 +657,7 @@ struct ath_hw_private_ops {
36717
36718 /* ANI */
36719 void (*ani_cache_ini_regs)(struct ath_hw *ah);
36720 -};
36721 +} __no_const;
36722
36723 /**
36724 * struct ath_hw_ops - callbacks used by hardware code and driver code
36725 @@ -687,7 +687,7 @@ struct ath_hw_ops {
36726 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
36727 struct ath_hw_antcomb_conf *antconf);
36728 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
36729 -};
36730 +} __no_const;
36731
36732 struct ath_nf_limits {
36733 s16 max;
36734 @@ -707,7 +707,7 @@ enum ath_cal_list {
36735 #define AH_FASTCC 0x4
36736
36737 struct ath_hw {
36738 - struct ath_ops reg_ops;
36739 + ath_ops_no_const reg_ops;
36740
36741 struct ieee80211_hw *hw;
36742 struct ath_common common;
36743 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36744 index af00e2c..ab04d34 100644
36745 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36746 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36747 @@ -545,7 +545,7 @@ struct phy_func_ptr {
36748 void (*carrsuppr)(struct brcms_phy *);
36749 s32 (*rxsigpwr)(struct brcms_phy *, s32);
36750 void (*detach)(struct brcms_phy *);
36751 -};
36752 +} __no_const;
36753
36754 struct brcms_phy {
36755 struct brcms_phy_pub pubpi_ro;
36756 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
36757 index e252acb..6ad1e65 100644
36758 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
36759 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
36760 @@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
36761 */
36762 if (il3945_mod_params.disable_hw_scan) {
36763 D_INFO("Disabling hw_scan\n");
36764 - il3945_mac_ops.hw_scan = NULL;
36765 + pax_open_kernel();
36766 + *(void **)&il3945_mac_ops.hw_scan = NULL;
36767 + pax_close_kernel();
36768 }
36769
36770 D_INFO("*** LOAD DRIVER ***\n");
36771 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
36772 index 1a98fa3..51e6661 100644
36773 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
36774 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
36775 @@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
36776 {
36777 struct iwl_priv *priv = file->private_data;
36778 char buf[64];
36779 - int buf_size;
36780 + size_t buf_size;
36781 u32 offset, len;
36782
36783 memset(buf, 0, sizeof(buf));
36784 @@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
36785 struct iwl_priv *priv = file->private_data;
36786
36787 char buf[8];
36788 - int buf_size;
36789 + size_t buf_size;
36790 u32 reset_flag;
36791
36792 memset(buf, 0, sizeof(buf));
36793 @@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
36794 {
36795 struct iwl_priv *priv = file->private_data;
36796 char buf[8];
36797 - int buf_size;
36798 + size_t buf_size;
36799 int ht40;
36800
36801 memset(buf, 0, sizeof(buf));
36802 @@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
36803 {
36804 struct iwl_priv *priv = file->private_data;
36805 char buf[8];
36806 - int buf_size;
36807 + size_t buf_size;
36808 int value;
36809
36810 memset(buf, 0, sizeof(buf));
36811 @@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
36812 {
36813 struct iwl_priv *priv = file->private_data;
36814 char buf[8];
36815 - int buf_size;
36816 + size_t buf_size;
36817 int clear;
36818
36819 memset(buf, 0, sizeof(buf));
36820 @@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
36821 {
36822 struct iwl_priv *priv = file->private_data;
36823 char buf[8];
36824 - int buf_size;
36825 + size_t buf_size;
36826 int trace;
36827
36828 memset(buf, 0, sizeof(buf));
36829 @@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
36830 {
36831 struct iwl_priv *priv = file->private_data;
36832 char buf[8];
36833 - int buf_size;
36834 + size_t buf_size;
36835 int missed;
36836
36837 memset(buf, 0, sizeof(buf));
36838 @@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
36839
36840 struct iwl_priv *priv = file->private_data;
36841 char buf[8];
36842 - int buf_size;
36843 + size_t buf_size;
36844 int plcp;
36845
36846 memset(buf, 0, sizeof(buf));
36847 @@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
36848
36849 struct iwl_priv *priv = file->private_data;
36850 char buf[8];
36851 - int buf_size;
36852 + size_t buf_size;
36853 int flush;
36854
36855 memset(buf, 0, sizeof(buf));
36856 @@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
36857
36858 struct iwl_priv *priv = file->private_data;
36859 char buf[8];
36860 - int buf_size;
36861 + size_t buf_size;
36862 int rts;
36863
36864 if (!priv->cfg->ht_params)
36865 @@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
36866 {
36867 struct iwl_priv *priv = file->private_data;
36868 char buf[8];
36869 - int buf_size;
36870 + size_t buf_size;
36871
36872 memset(buf, 0, sizeof(buf));
36873 buf_size = min(count, sizeof(buf) - 1);
36874 @@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
36875 struct iwl_priv *priv = file->private_data;
36876 u32 event_log_flag;
36877 char buf[8];
36878 - int buf_size;
36879 + size_t buf_size;
36880
36881 /* check that the interface is up */
36882 if (!iwl_is_ready(priv))
36883 @@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
36884 struct iwl_priv *priv = file->private_data;
36885 char buf[8];
36886 u32 calib_disabled;
36887 - int buf_size;
36888 + size_t buf_size;
36889
36890 memset(buf, 0, sizeof(buf));
36891 buf_size = min(count, sizeof(buf) - 1);
36892 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
36893 index fe0fffd..b4c5724 100644
36894 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
36895 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
36896 @@ -1967,7 +1967,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
36897 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
36898
36899 char buf[8];
36900 - int buf_size;
36901 + size_t buf_size;
36902 u32 reset_flag;
36903
36904 memset(buf, 0, sizeof(buf));
36905 @@ -1988,7 +1988,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
36906 {
36907 struct iwl_trans *trans = file->private_data;
36908 char buf[8];
36909 - int buf_size;
36910 + size_t buf_size;
36911 int csr;
36912
36913 memset(buf, 0, sizeof(buf));
36914 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
36915 index 429ca32..f86236b 100644
36916 --- a/drivers/net/wireless/mac80211_hwsim.c
36917 +++ b/drivers/net/wireless/mac80211_hwsim.c
36918 @@ -1751,9 +1751,11 @@ static int __init init_mac80211_hwsim(void)
36919 return -EINVAL;
36920
36921 if (fake_hw_scan) {
36922 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36923 - mac80211_hwsim_ops.sw_scan_start = NULL;
36924 - mac80211_hwsim_ops.sw_scan_complete = NULL;
36925 + pax_open_kernel();
36926 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36927 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
36928 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
36929 + pax_close_kernel();
36930 }
36931
36932 spin_lock_init(&hwsim_radio_lock);
36933 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
36934 index c2d0ab1..fb9afe2 100644
36935 --- a/drivers/net/wireless/mwifiex/main.h
36936 +++ b/drivers/net/wireless/mwifiex/main.h
36937 @@ -603,7 +603,7 @@ struct mwifiex_if_ops {
36938 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
36939 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
36940 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
36941 -};
36942 +} __no_const;
36943
36944 struct mwifiex_adapter {
36945 u8 iface_type;
36946 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
36947 index bd1f0cb..db85ab0 100644
36948 --- a/drivers/net/wireless/rndis_wlan.c
36949 +++ b/drivers/net/wireless/rndis_wlan.c
36950 @@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
36951
36952 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
36953
36954 - if (rts_threshold < 0 || rts_threshold > 2347)
36955 + if (rts_threshold > 2347)
36956 rts_threshold = 2347;
36957
36958 tmp = cpu_to_le32(rts_threshold);
36959 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
36960 index 0751b35..246ba3e 100644
36961 --- a/drivers/net/wireless/rt2x00/rt2x00.h
36962 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
36963 @@ -398,7 +398,7 @@ struct rt2x00_intf {
36964 * for hardware which doesn't support hardware
36965 * sequence counting.
36966 */
36967 - atomic_t seqno;
36968 + atomic_unchecked_t seqno;
36969 };
36970
36971 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
36972 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
36973 index e488b94..14b6a0c 100644
36974 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
36975 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
36976 @@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
36977 * sequence counter given by mac80211.
36978 */
36979 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
36980 - seqno = atomic_add_return(0x10, &intf->seqno);
36981 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
36982 else
36983 - seqno = atomic_read(&intf->seqno);
36984 + seqno = atomic_read_unchecked(&intf->seqno);
36985
36986 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
36987 hdr->seq_ctrl |= cpu_to_le16(seqno);
36988 diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
36989 index fd02060..74ee481 100644
36990 --- a/drivers/net/wireless/ti/wl1251/wl1251.h
36991 +++ b/drivers/net/wireless/ti/wl1251/wl1251.h
36992 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
36993 void (*reset)(struct wl1251 *wl);
36994 void (*enable_irq)(struct wl1251 *wl);
36995 void (*disable_irq)(struct wl1251 *wl);
36996 -};
36997 +} __no_const;
36998
36999 struct wl1251 {
37000 struct ieee80211_hw *hw;
37001 diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
37002 index 68584aa..10fc178 100644
37003 --- a/drivers/net/wireless/ti/wlcore/wlcore.h
37004 +++ b/drivers/net/wireless/ti/wlcore/wlcore.h
37005 @@ -88,7 +88,7 @@ struct wlcore_ops {
37006 struct ieee80211_sta *sta,
37007 struct ieee80211_key_conf *key_conf);
37008 u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
37009 -};
37010 +} __no_const;
37011
37012 enum wlcore_partitions {
37013 PART_DOWN,
37014 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
37015 index d93b2b6..ae50401 100644
37016 --- a/drivers/oprofile/buffer_sync.c
37017 +++ b/drivers/oprofile/buffer_sync.c
37018 @@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
37019 if (cookie == NO_COOKIE)
37020 offset = pc;
37021 if (cookie == INVALID_COOKIE) {
37022 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37023 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37024 offset = pc;
37025 }
37026 if (cookie != last_cookie) {
37027 @@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
37028 /* add userspace sample */
37029
37030 if (!mm) {
37031 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
37032 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
37033 return 0;
37034 }
37035
37036 cookie = lookup_dcookie(mm, s->eip, &offset);
37037
37038 if (cookie == INVALID_COOKIE) {
37039 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37040 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37041 return 0;
37042 }
37043
37044 @@ -552,7 +552,7 @@ void sync_buffer(int cpu)
37045 /* ignore backtraces if failed to add a sample */
37046 if (state == sb_bt_start) {
37047 state = sb_bt_ignore;
37048 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
37049 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
37050 }
37051 }
37052 release_mm(mm);
37053 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
37054 index c0cc4e7..44d4e54 100644
37055 --- a/drivers/oprofile/event_buffer.c
37056 +++ b/drivers/oprofile/event_buffer.c
37057 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
37058 }
37059
37060 if (buffer_pos == buffer_size) {
37061 - atomic_inc(&oprofile_stats.event_lost_overflow);
37062 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
37063 return;
37064 }
37065
37066 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
37067 index ed2c3ec..deda85a 100644
37068 --- a/drivers/oprofile/oprof.c
37069 +++ b/drivers/oprofile/oprof.c
37070 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
37071 if (oprofile_ops.switch_events())
37072 return;
37073
37074 - atomic_inc(&oprofile_stats.multiplex_counter);
37075 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
37076 start_switch_worker();
37077 }
37078
37079 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
37080 index 917d28e..d62d981 100644
37081 --- a/drivers/oprofile/oprofile_stats.c
37082 +++ b/drivers/oprofile/oprofile_stats.c
37083 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
37084 cpu_buf->sample_invalid_eip = 0;
37085 }
37086
37087 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
37088 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
37089 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
37090 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
37091 - atomic_set(&oprofile_stats.multiplex_counter, 0);
37092 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
37093 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
37094 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
37095 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
37096 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
37097 }
37098
37099
37100 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
37101 index 38b6fc0..b5cbfce 100644
37102 --- a/drivers/oprofile/oprofile_stats.h
37103 +++ b/drivers/oprofile/oprofile_stats.h
37104 @@ -13,11 +13,11 @@
37105 #include <linux/atomic.h>
37106
37107 struct oprofile_stat_struct {
37108 - atomic_t sample_lost_no_mm;
37109 - atomic_t sample_lost_no_mapping;
37110 - atomic_t bt_lost_no_mapping;
37111 - atomic_t event_lost_overflow;
37112 - atomic_t multiplex_counter;
37113 + atomic_unchecked_t sample_lost_no_mm;
37114 + atomic_unchecked_t sample_lost_no_mapping;
37115 + atomic_unchecked_t bt_lost_no_mapping;
37116 + atomic_unchecked_t event_lost_overflow;
37117 + atomic_unchecked_t multiplex_counter;
37118 };
37119
37120 extern struct oprofile_stat_struct oprofile_stats;
37121 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
37122 index 849357c..b83c1e0 100644
37123 --- a/drivers/oprofile/oprofilefs.c
37124 +++ b/drivers/oprofile/oprofilefs.c
37125 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
37126
37127
37128 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
37129 - char const *name, atomic_t *val)
37130 + char const *name, atomic_unchecked_t *val)
37131 {
37132 return __oprofilefs_create_file(sb, root, name,
37133 &atomic_ro_fops, 0444, val);
37134 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
37135 index 3f56bc0..707d642 100644
37136 --- a/drivers/parport/procfs.c
37137 +++ b/drivers/parport/procfs.c
37138 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
37139
37140 *ppos += len;
37141
37142 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
37143 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
37144 }
37145
37146 #ifdef CONFIG_PARPORT_1284
37147 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
37148
37149 *ppos += len;
37150
37151 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
37152 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
37153 }
37154 #endif /* IEEE1284.3 support. */
37155
37156 diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
37157 index a1afb5b..e6baac9 100644
37158 --- a/drivers/pci/hotplug/acpiphp.h
37159 +++ b/drivers/pci/hotplug/acpiphp.h
37160 @@ -123,7 +123,7 @@ struct acpiphp_func {
37161 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
37162
37163 struct list_head sibling;
37164 - struct notifier_block nb;
37165 + notifier_block_no_const nb;
37166 acpi_handle handle;
37167
37168 u8 function; /* pci function# */
37169 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
37170 index 9fff878..ad0ad53 100644
37171 --- a/drivers/pci/hotplug/cpci_hotplug.h
37172 +++ b/drivers/pci/hotplug/cpci_hotplug.h
37173 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
37174 int (*hardware_test) (struct slot* slot, u32 value);
37175 u8 (*get_power) (struct slot* slot);
37176 int (*set_power) (struct slot* slot, int value);
37177 -};
37178 +} __no_const;
37179
37180 struct cpci_hp_controller {
37181 unsigned int irq;
37182 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
37183 index 76ba8a1..20ca857 100644
37184 --- a/drivers/pci/hotplug/cpqphp_nvram.c
37185 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
37186 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
37187
37188 void compaq_nvram_init (void __iomem *rom_start)
37189 {
37190 +
37191 +#ifndef CONFIG_PAX_KERNEXEC
37192 if (rom_start) {
37193 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
37194 }
37195 +#endif
37196 +
37197 dbg("int15 entry = %p\n", compaq_int15_entry_point);
37198
37199 /* initialize our int15 lock */
37200 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
37201 index 213753b..b4abaac 100644
37202 --- a/drivers/pci/pcie/aspm.c
37203 +++ b/drivers/pci/pcie/aspm.c
37204 @@ -27,9 +27,9 @@
37205 #define MODULE_PARAM_PREFIX "pcie_aspm."
37206
37207 /* Note: those are not register definitions */
37208 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
37209 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
37210 -#define ASPM_STATE_L1 (4) /* L1 state */
37211 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
37212 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
37213 +#define ASPM_STATE_L1 (4U) /* L1 state */
37214 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
37215 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
37216
37217 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
37218 index ec909af..e7517f3 100644
37219 --- a/drivers/pci/probe.c
37220 +++ b/drivers/pci/probe.c
37221 @@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
37222 struct pci_bus_region region;
37223 bool bar_too_big = false, bar_disabled = false;
37224
37225 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
37226 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
37227
37228 /* No printks while decoding is disabled! */
37229 if (!dev->mmio_always_on) {
37230 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
37231 index 9b8505c..f00870a 100644
37232 --- a/drivers/pci/proc.c
37233 +++ b/drivers/pci/proc.c
37234 @@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
37235 static int __init pci_proc_init(void)
37236 {
37237 struct pci_dev *dev = NULL;
37238 +
37239 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
37240 +#ifdef CONFIG_GRKERNSEC_PROC_USER
37241 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
37242 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37243 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
37244 +#endif
37245 +#else
37246 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
37247 +#endif
37248 proc_create("devices", 0, proc_bus_pci_dir,
37249 &proc_bus_pci_dev_operations);
37250 proc_initialized = 1;
37251 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
37252 index 75dd651..2af4c9a 100644
37253 --- a/drivers/platform/x86/thinkpad_acpi.c
37254 +++ b/drivers/platform/x86/thinkpad_acpi.c
37255 @@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
37256 return 0;
37257 }
37258
37259 -void static hotkey_mask_warn_incomplete_mask(void)
37260 +static void hotkey_mask_warn_incomplete_mask(void)
37261 {
37262 /* log only what the user can fix... */
37263 const u32 wantedmask = hotkey_driver_mask &
37264 @@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
37265 }
37266 }
37267
37268 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37269 - struct tp_nvram_state *newn,
37270 - const u32 event_mask)
37271 -{
37272 -
37273 #define TPACPI_COMPARE_KEY(__scancode, __member) \
37274 do { \
37275 if ((event_mask & (1 << __scancode)) && \
37276 @@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37277 tpacpi_hotkey_send_key(__scancode); \
37278 } while (0)
37279
37280 - void issue_volchange(const unsigned int oldvol,
37281 - const unsigned int newvol)
37282 - {
37283 - unsigned int i = oldvol;
37284 +static void issue_volchange(const unsigned int oldvol,
37285 + const unsigned int newvol,
37286 + const u32 event_mask)
37287 +{
37288 + unsigned int i = oldvol;
37289
37290 - while (i > newvol) {
37291 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37292 - i--;
37293 - }
37294 - while (i < newvol) {
37295 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37296 - i++;
37297 - }
37298 + while (i > newvol) {
37299 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37300 + i--;
37301 }
37302 + while (i < newvol) {
37303 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37304 + i++;
37305 + }
37306 +}
37307
37308 - void issue_brightnesschange(const unsigned int oldbrt,
37309 - const unsigned int newbrt)
37310 - {
37311 - unsigned int i = oldbrt;
37312 +static void issue_brightnesschange(const unsigned int oldbrt,
37313 + const unsigned int newbrt,
37314 + const u32 event_mask)
37315 +{
37316 + unsigned int i = oldbrt;
37317
37318 - while (i > newbrt) {
37319 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37320 - i--;
37321 - }
37322 - while (i < newbrt) {
37323 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37324 - i++;
37325 - }
37326 + while (i > newbrt) {
37327 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37328 + i--;
37329 + }
37330 + while (i < newbrt) {
37331 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37332 + i++;
37333 }
37334 +}
37335
37336 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37337 + struct tp_nvram_state *newn,
37338 + const u32 event_mask)
37339 +{
37340 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
37341 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
37342 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
37343 @@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37344 oldn->volume_level != newn->volume_level) {
37345 /* recently muted, or repeated mute keypress, or
37346 * multiple presses ending in mute */
37347 - issue_volchange(oldn->volume_level, newn->volume_level);
37348 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37349 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
37350 }
37351 } else {
37352 @@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37353 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37354 }
37355 if (oldn->volume_level != newn->volume_level) {
37356 - issue_volchange(oldn->volume_level, newn->volume_level);
37357 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37358 } else if (oldn->volume_toggle != newn->volume_toggle) {
37359 /* repeated vol up/down keypress at end of scale ? */
37360 if (newn->volume_level == 0)
37361 @@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37362 /* handle brightness */
37363 if (oldn->brightness_level != newn->brightness_level) {
37364 issue_brightnesschange(oldn->brightness_level,
37365 - newn->brightness_level);
37366 + newn->brightness_level,
37367 + event_mask);
37368 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
37369 /* repeated key presses that didn't change state */
37370 if (newn->brightness_level == 0)
37371 @@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37372 && !tp_features.bright_unkfw)
37373 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37374 }
37375 +}
37376
37377 #undef TPACPI_COMPARE_KEY
37378 #undef TPACPI_MAY_SEND_KEY
37379 -}
37380
37381 /*
37382 * Polling driver
37383 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
37384 index 769d265..a3a05ca 100644
37385 --- a/drivers/pnp/pnpbios/bioscalls.c
37386 +++ b/drivers/pnp/pnpbios/bioscalls.c
37387 @@ -58,7 +58,7 @@ do { \
37388 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
37389 } while(0)
37390
37391 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
37392 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
37393 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
37394
37395 /*
37396 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37397
37398 cpu = get_cpu();
37399 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
37400 +
37401 + pax_open_kernel();
37402 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
37403 + pax_close_kernel();
37404
37405 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
37406 spin_lock_irqsave(&pnp_bios_lock, flags);
37407 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37408 :"memory");
37409 spin_unlock_irqrestore(&pnp_bios_lock, flags);
37410
37411 + pax_open_kernel();
37412 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
37413 + pax_close_kernel();
37414 +
37415 put_cpu();
37416
37417 /* If we get here and this is set then the PnP BIOS faulted on us. */
37418 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
37419 return status;
37420 }
37421
37422 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
37423 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
37424 {
37425 int i;
37426
37427 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37428 pnp_bios_callpoint.offset = header->fields.pm16offset;
37429 pnp_bios_callpoint.segment = PNP_CS16;
37430
37431 + pax_open_kernel();
37432 +
37433 for_each_possible_cpu(i) {
37434 struct desc_struct *gdt = get_cpu_gdt_table(i);
37435 if (!gdt)
37436 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37437 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
37438 (unsigned long)__va(header->fields.pm16dseg));
37439 }
37440 +
37441 + pax_close_kernel();
37442 }
37443 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
37444 index b0ecacb..7c9da2e 100644
37445 --- a/drivers/pnp/resource.c
37446 +++ b/drivers/pnp/resource.c
37447 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
37448 return 1;
37449
37450 /* check if the resource is valid */
37451 - if (*irq < 0 || *irq > 15)
37452 + if (*irq > 15)
37453 return 0;
37454
37455 /* check if the resource is reserved */
37456 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
37457 return 1;
37458
37459 /* check if the resource is valid */
37460 - if (*dma < 0 || *dma == 4 || *dma > 7)
37461 + if (*dma == 4 || *dma > 7)
37462 return 0;
37463
37464 /* check if the resource is reserved */
37465 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
37466 index 5860d4d..d90d268 100644
37467 --- a/drivers/power/bq27x00_battery.c
37468 +++ b/drivers/power/bq27x00_battery.c
37469 @@ -80,7 +80,7 @@
37470 struct bq27x00_device_info;
37471 struct bq27x00_access_methods {
37472 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
37473 -};
37474 +} __no_const;
37475
37476 enum bq27x00_chip { BQ27000, BQ27500, BQ27425};
37477
37478 diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
37479 index 94762e6..d35ff48 100644
37480 --- a/drivers/power/da9030_battery.c
37481 +++ b/drivers/power/da9030_battery.c
37482 @@ -110,7 +110,7 @@ struct da9030_charger {
37483 int mV;
37484 bool is_on;
37485
37486 - struct notifier_block nb;
37487 + notifier_block_no_const nb;
37488
37489 /* platform callbacks for battery low and critical events */
37490 void (*battery_low)(void);
37491 diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
37492 index d9d034d..3c9a5e1 100644
37493 --- a/drivers/power/da9052-battery.c
37494 +++ b/drivers/power/da9052-battery.c
37495 @@ -170,7 +170,7 @@ static u32 const vc_tbl[3][68][2] = {
37496 struct da9052_battery {
37497 struct da9052 *da9052;
37498 struct power_supply psy;
37499 - struct notifier_block nb;
37500 + notifier_block_no_const nb;
37501 int charger_type;
37502 int status;
37503 int health;
37504 diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
37505 index 1229119..f53b75d 100644
37506 --- a/drivers/power/isp1704_charger.c
37507 +++ b/drivers/power/isp1704_charger.c
37508 @@ -57,7 +57,7 @@ struct isp1704_charger {
37509 struct device *dev;
37510 struct power_supply psy;
37511 struct usb_phy *phy;
37512 - struct notifier_block nb;
37513 + notifier_block_no_const nb;
37514 struct work_struct work;
37515
37516 /* properties */
37517 diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
37518 index 7df7c5f..bd48c47 100644
37519 --- a/drivers/power/pda_power.c
37520 +++ b/drivers/power/pda_power.c
37521 @@ -37,7 +37,11 @@ static int polling;
37522
37523 #ifdef CONFIG_USB_OTG_UTILS
37524 static struct usb_phy *transceiver;
37525 -static struct notifier_block otg_nb;
37526 +static int otg_handle_notification(struct notifier_block *nb,
37527 + unsigned long event, void *unused);
37528 +static struct notifier_block otg_nb = {
37529 + .notifier_call = otg_handle_notification
37530 +};
37531 #endif
37532
37533 static struct regulator *ac_draw;
37534 @@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
37535
37536 #ifdef CONFIG_USB_OTG_UTILS
37537 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
37538 - otg_nb.notifier_call = otg_handle_notification;
37539 ret = usb_register_notifier(transceiver, &otg_nb);
37540 if (ret) {
37541 dev_err(dev, "failure to register otg notifier\n");
37542 diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
37543 index f9e70cf..2a21d8b 100644
37544 --- a/drivers/power/twl4030_charger.c
37545 +++ b/drivers/power/twl4030_charger.c
37546 @@ -84,7 +84,7 @@ struct twl4030_bci {
37547 struct power_supply ac;
37548 struct power_supply usb;
37549 struct usb_phy *transceiver;
37550 - struct notifier_block usb_nb;
37551 + notifier_block_no_const usb_nb;
37552 struct work_struct work;
37553 int irq_chg;
37554 int irq_bci;
37555 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
37556 index 8d53174..04c65de 100644
37557 --- a/drivers/regulator/max8660.c
37558 +++ b/drivers/regulator/max8660.c
37559 @@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
37560 max8660->shadow_regs[MAX8660_OVER1] = 5;
37561 } else {
37562 /* Otherwise devices can be toggled via software */
37563 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
37564 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
37565 + pax_open_kernel();
37566 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
37567 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
37568 + pax_close_kernel();
37569 }
37570
37571 /*
37572 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
37573 index 1fa6381..f58834e 100644
37574 --- a/drivers/regulator/mc13892-regulator.c
37575 +++ b/drivers/regulator/mc13892-regulator.c
37576 @@ -540,10 +540,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
37577 }
37578 mc13xxx_unlock(mc13892);
37579
37580 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37581 + pax_open_kernel();
37582 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37583 = mc13892_vcam_set_mode;
37584 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37585 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37586 = mc13892_vcam_get_mode;
37587 + pax_close_kernel();
37588
37589 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
37590 ARRAY_SIZE(mc13892_regulators));
37591 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
37592 index cace6d3..f623fda 100644
37593 --- a/drivers/rtc/rtc-dev.c
37594 +++ b/drivers/rtc/rtc-dev.c
37595 @@ -14,6 +14,7 @@
37596 #include <linux/module.h>
37597 #include <linux/rtc.h>
37598 #include <linux/sched.h>
37599 +#include <linux/grsecurity.h>
37600 #include "rtc-core.h"
37601
37602 static dev_t rtc_devt;
37603 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
37604 if (copy_from_user(&tm, uarg, sizeof(tm)))
37605 return -EFAULT;
37606
37607 + gr_log_timechange();
37608 +
37609 return rtc_set_time(rtc, &tm);
37610
37611 case RTC_PIE_ON:
37612 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
37613 index 9e933a8..4f969f7 100644
37614 --- a/drivers/scsi/aacraid/aacraid.h
37615 +++ b/drivers/scsi/aacraid/aacraid.h
37616 @@ -503,7 +503,7 @@ struct adapter_ops
37617 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
37618 /* Administrative operations */
37619 int (*adapter_comm)(struct aac_dev * dev, int comm);
37620 -};
37621 +} __no_const;
37622
37623 /*
37624 * Define which interrupt handler needs to be installed
37625 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
37626 index 4ad7e36..d004679 100644
37627 --- a/drivers/scsi/bfa/bfa.h
37628 +++ b/drivers/scsi/bfa/bfa.h
37629 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
37630 u32 *end);
37631 int cpe_vec_q0;
37632 int rme_vec_q0;
37633 -};
37634 +} __no_const;
37635 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
37636
37637 struct bfa_faa_cbfn_s {
37638 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
37639 index 27b5609..e08d9c4 100644
37640 --- a/drivers/scsi/bfa/bfa_fcpim.c
37641 +++ b/drivers/scsi/bfa/bfa_fcpim.c
37642 @@ -3731,7 +3731,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
37643
37644 bfa_iotag_attach(fcp);
37645
37646 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
37647 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
37648 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
37649 (fcp->num_itns * sizeof(struct bfa_itn_s));
37650 memset(fcp->itn_arr, 0,
37651 @@ -3799,7 +3799,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37652 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
37653 {
37654 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
37655 - struct bfa_itn_s *itn;
37656 + bfa_itn_s_no_const *itn;
37657
37658 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
37659 itn->isr = isr;
37660 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
37661 index e693af6..4faba24 100644
37662 --- a/drivers/scsi/bfa/bfa_fcpim.h
37663 +++ b/drivers/scsi/bfa/bfa_fcpim.h
37664 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
37665 struct bfa_itn_s {
37666 bfa_isr_func_t isr;
37667 };
37668 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
37669
37670 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
37671 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
37672 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
37673 struct list_head iotag_tio_free_q; /* free IO resources */
37674 struct list_head iotag_unused_q; /* unused IO resources*/
37675 struct bfa_iotag_s *iotag_arr;
37676 - struct bfa_itn_s *itn_arr;
37677 + bfa_itn_s_no_const *itn_arr;
37678 int max_ioim_reqs;
37679 int num_ioim_reqs;
37680 int num_fwtio_reqs;
37681 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
37682 index 23a90e7..9cf04ee 100644
37683 --- a/drivers/scsi/bfa/bfa_ioc.h
37684 +++ b/drivers/scsi/bfa/bfa_ioc.h
37685 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
37686 bfa_ioc_disable_cbfn_t disable_cbfn;
37687 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
37688 bfa_ioc_reset_cbfn_t reset_cbfn;
37689 -};
37690 +} __no_const;
37691
37692 /*
37693 * IOC event notification mechanism.
37694 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
37695 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
37696 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
37697 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
37698 -};
37699 +} __no_const;
37700
37701 /*
37702 * Queue element to wait for room in request queue. FIFO order is
37703 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
37704 index 593085a..47aa999 100644
37705 --- a/drivers/scsi/hosts.c
37706 +++ b/drivers/scsi/hosts.c
37707 @@ -42,7 +42,7 @@
37708 #include "scsi_logging.h"
37709
37710
37711 -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
37712 +static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
37713
37714
37715 static void scsi_host_cls_release(struct device *dev)
37716 @@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
37717 * subtract one because we increment first then return, but we need to
37718 * know what the next host number was before increment
37719 */
37720 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
37721 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
37722 shost->dma_channel = 0xff;
37723
37724 /* These three are default values which can be overridden */
37725 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
37726 index 4217e49..9c77e3e 100644
37727 --- a/drivers/scsi/hpsa.c
37728 +++ b/drivers/scsi/hpsa.c
37729 @@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
37730 unsigned long flags;
37731
37732 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37733 - return h->access.command_completed(h, q);
37734 + return h->access->command_completed(h, q);
37735
37736 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
37737 a = rq->head[rq->current_entry];
37738 @@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
37739 while (!list_empty(&h->reqQ)) {
37740 c = list_entry(h->reqQ.next, struct CommandList, list);
37741 /* can't do anything if fifo is full */
37742 - if ((h->access.fifo_full(h))) {
37743 + if ((h->access->fifo_full(h))) {
37744 dev_warn(&h->pdev->dev, "fifo full\n");
37745 break;
37746 }
37747 @@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
37748
37749 /* Tell the controller execute command */
37750 spin_unlock_irqrestore(&h->lock, flags);
37751 - h->access.submit_command(h, c);
37752 + h->access->submit_command(h, c);
37753 spin_lock_irqsave(&h->lock, flags);
37754 }
37755 spin_unlock_irqrestore(&h->lock, flags);
37756 @@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
37757
37758 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
37759 {
37760 - return h->access.command_completed(h, q);
37761 + return h->access->command_completed(h, q);
37762 }
37763
37764 static inline bool interrupt_pending(struct ctlr_info *h)
37765 {
37766 - return h->access.intr_pending(h);
37767 + return h->access->intr_pending(h);
37768 }
37769
37770 static inline long interrupt_not_for_us(struct ctlr_info *h)
37771 {
37772 - return (h->access.intr_pending(h) == 0) ||
37773 + return (h->access->intr_pending(h) == 0) ||
37774 (h->interrupts_enabled == 0);
37775 }
37776
37777 @@ -4318,7 +4318,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
37778 if (prod_index < 0)
37779 return -ENODEV;
37780 h->product_name = products[prod_index].product_name;
37781 - h->access = *(products[prod_index].access);
37782 + h->access = products[prod_index].access;
37783
37784 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
37785 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
37786 @@ -4600,7 +4600,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
37787
37788 assert_spin_locked(&lockup_detector_lock);
37789 remove_ctlr_from_lockup_detector_list(h);
37790 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37791 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37792 spin_lock_irqsave(&h->lock, flags);
37793 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
37794 spin_unlock_irqrestore(&h->lock, flags);
37795 @@ -4778,7 +4778,7 @@ reinit_after_soft_reset:
37796 }
37797
37798 /* make sure the board interrupts are off */
37799 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37800 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37801
37802 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
37803 goto clean2;
37804 @@ -4812,7 +4812,7 @@ reinit_after_soft_reset:
37805 * fake ones to scoop up any residual completions.
37806 */
37807 spin_lock_irqsave(&h->lock, flags);
37808 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37809 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37810 spin_unlock_irqrestore(&h->lock, flags);
37811 free_irqs(h);
37812 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
37813 @@ -4831,9 +4831,9 @@ reinit_after_soft_reset:
37814 dev_info(&h->pdev->dev, "Board READY.\n");
37815 dev_info(&h->pdev->dev,
37816 "Waiting for stale completions to drain.\n");
37817 - h->access.set_intr_mask(h, HPSA_INTR_ON);
37818 + h->access->set_intr_mask(h, HPSA_INTR_ON);
37819 msleep(10000);
37820 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37821 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37822
37823 rc = controller_reset_failed(h->cfgtable);
37824 if (rc)
37825 @@ -4854,7 +4854,7 @@ reinit_after_soft_reset:
37826 }
37827
37828 /* Turn the interrupts on so we can service requests */
37829 - h->access.set_intr_mask(h, HPSA_INTR_ON);
37830 + h->access->set_intr_mask(h, HPSA_INTR_ON);
37831
37832 hpsa_hba_inquiry(h);
37833 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
37834 @@ -4906,7 +4906,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
37835 * To write all data in the battery backed cache to disks
37836 */
37837 hpsa_flush_cache(h);
37838 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37839 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37840 hpsa_free_irqs_and_disable_msix(h);
37841 }
37842
37843 @@ -5075,7 +5075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
37844 return;
37845 }
37846 /* Change the access methods to the performant access methods */
37847 - h->access = SA5_performant_access;
37848 + h->access = &SA5_performant_access;
37849 h->transMethod = CFGTBL_Trans_Performant;
37850 }
37851
37852 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
37853 index 9816479..c5d4e97 100644
37854 --- a/drivers/scsi/hpsa.h
37855 +++ b/drivers/scsi/hpsa.h
37856 @@ -79,7 +79,7 @@ struct ctlr_info {
37857 unsigned int msix_vector;
37858 unsigned int msi_vector;
37859 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
37860 - struct access_method access;
37861 + struct access_method *access;
37862
37863 /* queue and queue Info */
37864 struct list_head reqQ;
37865 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
37866 index f2df059..a3a9930 100644
37867 --- a/drivers/scsi/ips.h
37868 +++ b/drivers/scsi/ips.h
37869 @@ -1027,7 +1027,7 @@ typedef struct {
37870 int (*intr)(struct ips_ha *);
37871 void (*enableint)(struct ips_ha *);
37872 uint32_t (*statupd)(struct ips_ha *);
37873 -} ips_hw_func_t;
37874 +} __no_const ips_hw_func_t;
37875
37876 typedef struct ips_ha {
37877 uint8_t ha_id[IPS_MAX_CHANNELS+1];
37878 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
37879 index c772d8d..35c362c 100644
37880 --- a/drivers/scsi/libfc/fc_exch.c
37881 +++ b/drivers/scsi/libfc/fc_exch.c
37882 @@ -100,12 +100,12 @@ struct fc_exch_mgr {
37883 u16 pool_max_index;
37884
37885 struct {
37886 - atomic_t no_free_exch;
37887 - atomic_t no_free_exch_xid;
37888 - atomic_t xid_not_found;
37889 - atomic_t xid_busy;
37890 - atomic_t seq_not_found;
37891 - atomic_t non_bls_resp;
37892 + atomic_unchecked_t no_free_exch;
37893 + atomic_unchecked_t no_free_exch_xid;
37894 + atomic_unchecked_t xid_not_found;
37895 + atomic_unchecked_t xid_busy;
37896 + atomic_unchecked_t seq_not_found;
37897 + atomic_unchecked_t non_bls_resp;
37898 } stats;
37899 };
37900
37901 @@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
37902 /* allocate memory for exchange */
37903 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37904 if (!ep) {
37905 - atomic_inc(&mp->stats.no_free_exch);
37906 + atomic_inc_unchecked(&mp->stats.no_free_exch);
37907 goto out;
37908 }
37909 memset(ep, 0, sizeof(*ep));
37910 @@ -786,7 +786,7 @@ out:
37911 return ep;
37912 err:
37913 spin_unlock_bh(&pool->lock);
37914 - atomic_inc(&mp->stats.no_free_exch_xid);
37915 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37916 mempool_free(ep, mp->ep_pool);
37917 return NULL;
37918 }
37919 @@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37920 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37921 ep = fc_exch_find(mp, xid);
37922 if (!ep) {
37923 - atomic_inc(&mp->stats.xid_not_found);
37924 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37925 reject = FC_RJT_OX_ID;
37926 goto out;
37927 }
37928 @@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37929 ep = fc_exch_find(mp, xid);
37930 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37931 if (ep) {
37932 - atomic_inc(&mp->stats.xid_busy);
37933 + atomic_inc_unchecked(&mp->stats.xid_busy);
37934 reject = FC_RJT_RX_ID;
37935 goto rel;
37936 }
37937 @@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37938 }
37939 xid = ep->xid; /* get our XID */
37940 } else if (!ep) {
37941 - atomic_inc(&mp->stats.xid_not_found);
37942 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37943 reject = FC_RJT_RX_ID; /* XID not found */
37944 goto out;
37945 }
37946 @@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37947 } else {
37948 sp = &ep->seq;
37949 if (sp->id != fh->fh_seq_id) {
37950 - atomic_inc(&mp->stats.seq_not_found);
37951 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37952 if (f_ctl & FC_FC_END_SEQ) {
37953 /*
37954 * Update sequence_id based on incoming last
37955 @@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37956
37957 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37958 if (!ep) {
37959 - atomic_inc(&mp->stats.xid_not_found);
37960 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37961 goto out;
37962 }
37963 if (ep->esb_stat & ESB_ST_COMPLETE) {
37964 - atomic_inc(&mp->stats.xid_not_found);
37965 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37966 goto rel;
37967 }
37968 if (ep->rxid == FC_XID_UNKNOWN)
37969 ep->rxid = ntohs(fh->fh_rx_id);
37970 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37971 - atomic_inc(&mp->stats.xid_not_found);
37972 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37973 goto rel;
37974 }
37975 if (ep->did != ntoh24(fh->fh_s_id) &&
37976 ep->did != FC_FID_FLOGI) {
37977 - atomic_inc(&mp->stats.xid_not_found);
37978 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37979 goto rel;
37980 }
37981 sof = fr_sof(fp);
37982 @@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37983 sp->ssb_stat |= SSB_ST_RESP;
37984 sp->id = fh->fh_seq_id;
37985 } else if (sp->id != fh->fh_seq_id) {
37986 - atomic_inc(&mp->stats.seq_not_found);
37987 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37988 goto rel;
37989 }
37990
37991 @@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37992 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37993
37994 if (!sp)
37995 - atomic_inc(&mp->stats.xid_not_found);
37996 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37997 else
37998 - atomic_inc(&mp->stats.non_bls_resp);
37999 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
38000
38001 fc_frame_free(fp);
38002 }
38003 @@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
38004
38005 list_for_each_entry(ema, &lport->ema_list, ema_list) {
38006 mp = ema->mp;
38007 - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
38008 + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
38009 st->fc_no_free_exch_xid +=
38010 - atomic_read(&mp->stats.no_free_exch_xid);
38011 - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
38012 - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
38013 - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
38014 - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
38015 + atomic_read_unchecked(&mp->stats.no_free_exch_xid);
38016 + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
38017 + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
38018 + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
38019 + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
38020 }
38021 }
38022 EXPORT_SYMBOL(fc_exch_update_stats);
38023 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
38024 index bdb81cd..d3c7c2c 100644
38025 --- a/drivers/scsi/libsas/sas_ata.c
38026 +++ b/drivers/scsi/libsas/sas_ata.c
38027 @@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
38028 .postreset = ata_std_postreset,
38029 .error_handler = ata_std_error_handler,
38030 .post_internal_cmd = sas_ata_post_internal,
38031 - .qc_defer = ata_std_qc_defer,
38032 + .qc_defer = ata_std_qc_defer,
38033 .qc_prep = ata_noop_qc_prep,
38034 .qc_issue = sas_ata_qc_issue,
38035 .qc_fill_rtf = sas_ata_qc_fill_rtf,
38036 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
38037 index 69b5993..1ac9dce 100644
38038 --- a/drivers/scsi/lpfc/lpfc.h
38039 +++ b/drivers/scsi/lpfc/lpfc.h
38040 @@ -424,7 +424,7 @@ struct lpfc_vport {
38041 struct dentry *debug_nodelist;
38042 struct dentry *vport_debugfs_root;
38043 struct lpfc_debugfs_trc *disc_trc;
38044 - atomic_t disc_trc_cnt;
38045 + atomic_unchecked_t disc_trc_cnt;
38046 #endif
38047 uint8_t stat_data_enabled;
38048 uint8_t stat_data_blocked;
38049 @@ -840,8 +840,8 @@ struct lpfc_hba {
38050 struct timer_list fabric_block_timer;
38051 unsigned long bit_flags;
38052 #define FABRIC_COMANDS_BLOCKED 0
38053 - atomic_t num_rsrc_err;
38054 - atomic_t num_cmd_success;
38055 + atomic_unchecked_t num_rsrc_err;
38056 + atomic_unchecked_t num_cmd_success;
38057 unsigned long last_rsrc_error_time;
38058 unsigned long last_ramp_down_time;
38059 unsigned long last_ramp_up_time;
38060 @@ -877,7 +877,7 @@ struct lpfc_hba {
38061
38062 struct dentry *debug_slow_ring_trc;
38063 struct lpfc_debugfs_trc *slow_ring_trc;
38064 - atomic_t slow_ring_trc_cnt;
38065 + atomic_unchecked_t slow_ring_trc_cnt;
38066 /* iDiag debugfs sub-directory */
38067 struct dentry *idiag_root;
38068 struct dentry *idiag_pci_cfg;
38069 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
38070 index f63f5ff..de29189 100644
38071 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
38072 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
38073 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
38074
38075 #include <linux/debugfs.h>
38076
38077 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38078 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38079 static unsigned long lpfc_debugfs_start_time = 0L;
38080
38081 /* iDiag */
38082 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
38083 lpfc_debugfs_enable = 0;
38084
38085 len = 0;
38086 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
38087 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
38088 (lpfc_debugfs_max_disc_trc - 1);
38089 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
38090 dtp = vport->disc_trc + i;
38091 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
38092 lpfc_debugfs_enable = 0;
38093
38094 len = 0;
38095 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
38096 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
38097 (lpfc_debugfs_max_slow_ring_trc - 1);
38098 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
38099 dtp = phba->slow_ring_trc + i;
38100 @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
38101 !vport || !vport->disc_trc)
38102 return;
38103
38104 - index = atomic_inc_return(&vport->disc_trc_cnt) &
38105 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
38106 (lpfc_debugfs_max_disc_trc - 1);
38107 dtp = vport->disc_trc + index;
38108 dtp->fmt = fmt;
38109 dtp->data1 = data1;
38110 dtp->data2 = data2;
38111 dtp->data3 = data3;
38112 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38113 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38114 dtp->jif = jiffies;
38115 #endif
38116 return;
38117 @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
38118 !phba || !phba->slow_ring_trc)
38119 return;
38120
38121 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
38122 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
38123 (lpfc_debugfs_max_slow_ring_trc - 1);
38124 dtp = phba->slow_ring_trc + index;
38125 dtp->fmt = fmt;
38126 dtp->data1 = data1;
38127 dtp->data2 = data2;
38128 dtp->data3 = data3;
38129 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38130 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38131 dtp->jif = jiffies;
38132 #endif
38133 return;
38134 @@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38135 "slow_ring buffer\n");
38136 goto debug_failed;
38137 }
38138 - atomic_set(&phba->slow_ring_trc_cnt, 0);
38139 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
38140 memset(phba->slow_ring_trc, 0,
38141 (sizeof(struct lpfc_debugfs_trc) *
38142 lpfc_debugfs_max_slow_ring_trc));
38143 @@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38144 "buffer\n");
38145 goto debug_failed;
38146 }
38147 - atomic_set(&vport->disc_trc_cnt, 0);
38148 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
38149
38150 snprintf(name, sizeof(name), "discovery_trace");
38151 vport->debug_disc_trc =
38152 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
38153 index 7dc4218..3436f08 100644
38154 --- a/drivers/scsi/lpfc/lpfc_init.c
38155 +++ b/drivers/scsi/lpfc/lpfc_init.c
38156 @@ -10589,8 +10589,10 @@ lpfc_init(void)
38157 "misc_register returned with status %d", error);
38158
38159 if (lpfc_enable_npiv) {
38160 - lpfc_transport_functions.vport_create = lpfc_vport_create;
38161 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38162 + pax_open_kernel();
38163 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
38164 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38165 + pax_close_kernel();
38166 }
38167 lpfc_transport_template =
38168 fc_attach_transport(&lpfc_transport_functions);
38169 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
38170 index 7f45ac9..cf62eda 100644
38171 --- a/drivers/scsi/lpfc/lpfc_scsi.c
38172 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
38173 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
38174 uint32_t evt_posted;
38175
38176 spin_lock_irqsave(&phba->hbalock, flags);
38177 - atomic_inc(&phba->num_rsrc_err);
38178 + atomic_inc_unchecked(&phba->num_rsrc_err);
38179 phba->last_rsrc_error_time = jiffies;
38180
38181 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
38182 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
38183 unsigned long flags;
38184 struct lpfc_hba *phba = vport->phba;
38185 uint32_t evt_posted;
38186 - atomic_inc(&phba->num_cmd_success);
38187 + atomic_inc_unchecked(&phba->num_cmd_success);
38188
38189 if (vport->cfg_lun_queue_depth <= queue_depth)
38190 return;
38191 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38192 unsigned long num_rsrc_err, num_cmd_success;
38193 int i;
38194
38195 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
38196 - num_cmd_success = atomic_read(&phba->num_cmd_success);
38197 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
38198 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
38199
38200 /*
38201 * The error and success command counters are global per
38202 @@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38203 }
38204 }
38205 lpfc_destroy_vport_work_array(phba, vports);
38206 - atomic_set(&phba->num_rsrc_err, 0);
38207 - atomic_set(&phba->num_cmd_success, 0);
38208 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
38209 + atomic_set_unchecked(&phba->num_cmd_success, 0);
38210 }
38211
38212 /**
38213 @@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
38214 }
38215 }
38216 lpfc_destroy_vport_work_array(phba, vports);
38217 - atomic_set(&phba->num_rsrc_err, 0);
38218 - atomic_set(&phba->num_cmd_success, 0);
38219 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
38220 + atomic_set_unchecked(&phba->num_cmd_success, 0);
38221 }
38222
38223 /**
38224 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
38225 index af763ea..41904f7 100644
38226 --- a/drivers/scsi/pmcraid.c
38227 +++ b/drivers/scsi/pmcraid.c
38228 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
38229 res->scsi_dev = scsi_dev;
38230 scsi_dev->hostdata = res;
38231 res->change_detected = 0;
38232 - atomic_set(&res->read_failures, 0);
38233 - atomic_set(&res->write_failures, 0);
38234 + atomic_set_unchecked(&res->read_failures, 0);
38235 + atomic_set_unchecked(&res->write_failures, 0);
38236 rc = 0;
38237 }
38238 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
38239 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
38240
38241 /* If this was a SCSI read/write command keep count of errors */
38242 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
38243 - atomic_inc(&res->read_failures);
38244 + atomic_inc_unchecked(&res->read_failures);
38245 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
38246 - atomic_inc(&res->write_failures);
38247 + atomic_inc_unchecked(&res->write_failures);
38248
38249 if (!RES_IS_GSCSI(res->cfg_entry) &&
38250 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
38251 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
38252 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38253 * hrrq_id assigned here in queuecommand
38254 */
38255 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38256 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38257 pinstance->num_hrrq;
38258 cmd->cmd_done = pmcraid_io_done;
38259
38260 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
38261 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38262 * hrrq_id assigned here in queuecommand
38263 */
38264 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38265 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38266 pinstance->num_hrrq;
38267
38268 if (request_size) {
38269 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
38270
38271 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
38272 /* add resources only after host is added into system */
38273 - if (!atomic_read(&pinstance->expose_resources))
38274 + if (!atomic_read_unchecked(&pinstance->expose_resources))
38275 return;
38276
38277 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
38278 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
38279 init_waitqueue_head(&pinstance->reset_wait_q);
38280
38281 atomic_set(&pinstance->outstanding_cmds, 0);
38282 - atomic_set(&pinstance->last_message_id, 0);
38283 - atomic_set(&pinstance->expose_resources, 0);
38284 + atomic_set_unchecked(&pinstance->last_message_id, 0);
38285 + atomic_set_unchecked(&pinstance->expose_resources, 0);
38286
38287 INIT_LIST_HEAD(&pinstance->free_res_q);
38288 INIT_LIST_HEAD(&pinstance->used_res_q);
38289 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
38290 /* Schedule worker thread to handle CCN and take care of adding and
38291 * removing devices to OS
38292 */
38293 - atomic_set(&pinstance->expose_resources, 1);
38294 + atomic_set_unchecked(&pinstance->expose_resources, 1);
38295 schedule_work(&pinstance->worker_q);
38296 return rc;
38297
38298 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
38299 index e1d150f..6c6df44 100644
38300 --- a/drivers/scsi/pmcraid.h
38301 +++ b/drivers/scsi/pmcraid.h
38302 @@ -748,7 +748,7 @@ struct pmcraid_instance {
38303 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
38304
38305 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
38306 - atomic_t last_message_id;
38307 + atomic_unchecked_t last_message_id;
38308
38309 /* configuration table */
38310 struct pmcraid_config_table *cfg_table;
38311 @@ -777,7 +777,7 @@ struct pmcraid_instance {
38312 atomic_t outstanding_cmds;
38313
38314 /* should add/delete resources to mid-layer now ?*/
38315 - atomic_t expose_resources;
38316 + atomic_unchecked_t expose_resources;
38317
38318
38319
38320 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
38321 struct pmcraid_config_table_entry_ext cfg_entry_ext;
38322 };
38323 struct scsi_device *scsi_dev; /* Link scsi_device structure */
38324 - atomic_t read_failures; /* count of failed READ commands */
38325 - atomic_t write_failures; /* count of failed WRITE commands */
38326 + atomic_unchecked_t read_failures; /* count of failed READ commands */
38327 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
38328
38329 /* To indicate add/delete/modify during CCN */
38330 u8 change_detected;
38331 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
38332 index 1c28215..86991a1 100644
38333 --- a/drivers/scsi/qla2xxx/qla_attr.c
38334 +++ b/drivers/scsi/qla2xxx/qla_attr.c
38335 @@ -1970,7 +1970,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
38336 return 0;
38337 }
38338
38339 -struct fc_function_template qla2xxx_transport_functions = {
38340 +fc_function_template_no_const qla2xxx_transport_functions = {
38341
38342 .show_host_node_name = 1,
38343 .show_host_port_name = 1,
38344 @@ -2017,7 +2017,7 @@ struct fc_function_template qla2xxx_transport_functions = {
38345 .bsg_timeout = qla24xx_bsg_timeout,
38346 };
38347
38348 -struct fc_function_template qla2xxx_transport_vport_functions = {
38349 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
38350
38351 .show_host_node_name = 1,
38352 .show_host_port_name = 1,
38353 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
38354 index a9725bf..18eeb73 100644
38355 --- a/drivers/scsi/qla2xxx/qla_def.h
38356 +++ b/drivers/scsi/qla2xxx/qla_def.h
38357 @@ -2385,7 +2385,7 @@ struct isp_operations {
38358 int (*start_scsi) (srb_t *);
38359 int (*abort_isp) (struct scsi_qla_host *);
38360 int (*iospace_config)(struct qla_hw_data*);
38361 -};
38362 +} __no_const;
38363
38364 /* MSI-X Support *************************************************************/
38365
38366 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
38367 index 6acb397..d86e3e0 100644
38368 --- a/drivers/scsi/qla2xxx/qla_gbl.h
38369 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
38370 @@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
38371 struct device_attribute;
38372 extern struct device_attribute *qla2x00_host_attrs[];
38373 struct fc_function_template;
38374 -extern struct fc_function_template qla2xxx_transport_functions;
38375 -extern struct fc_function_template qla2xxx_transport_vport_functions;
38376 +extern fc_function_template_no_const qla2xxx_transport_functions;
38377 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
38378 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
38379 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
38380 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
38381 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
38382 index 329d553..f20d31d 100644
38383 --- a/drivers/scsi/qla4xxx/ql4_def.h
38384 +++ b/drivers/scsi/qla4xxx/ql4_def.h
38385 @@ -273,7 +273,7 @@ struct ddb_entry {
38386 * (4000 only) */
38387 atomic_t relogin_timer; /* Max Time to wait for
38388 * relogin to complete */
38389 - atomic_t relogin_retry_count; /* Num of times relogin has been
38390 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
38391 * retried */
38392 uint32_t default_time2wait; /* Default Min time between
38393 * relogins (+aens) */
38394 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
38395 index fbc546e..c7d1b48 100644
38396 --- a/drivers/scsi/qla4xxx/ql4_os.c
38397 +++ b/drivers/scsi/qla4xxx/ql4_os.c
38398 @@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
38399 */
38400 if (!iscsi_is_session_online(cls_sess)) {
38401 /* Reset retry relogin timer */
38402 - atomic_inc(&ddb_entry->relogin_retry_count);
38403 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
38404 DEBUG2(ql4_printk(KERN_INFO, ha,
38405 "%s: index[%d] relogin timed out-retrying"
38406 " relogin (%d), retry (%d)\n", __func__,
38407 ddb_entry->fw_ddb_index,
38408 - atomic_read(&ddb_entry->relogin_retry_count),
38409 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
38410 ddb_entry->default_time2wait + 4));
38411 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
38412 atomic_set(&ddb_entry->retry_relogin_timer,
38413 @@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
38414
38415 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
38416 atomic_set(&ddb_entry->relogin_timer, 0);
38417 - atomic_set(&ddb_entry->relogin_retry_count, 0);
38418 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
38419 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
38420 ddb_entry->default_relogin_timeout =
38421 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
38422 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
38423 index 2c0d0ec..4e8681a 100644
38424 --- a/drivers/scsi/scsi.c
38425 +++ b/drivers/scsi/scsi.c
38426 @@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
38427 unsigned long timeout;
38428 int rtn = 0;
38429
38430 - atomic_inc(&cmd->device->iorequest_cnt);
38431 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38432
38433 /* check if the device is still usable */
38434 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
38435 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
38436 index 9032e91..7a805d0 100644
38437 --- a/drivers/scsi/scsi_lib.c
38438 +++ b/drivers/scsi/scsi_lib.c
38439 @@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
38440 shost = sdev->host;
38441 scsi_init_cmd_errh(cmd);
38442 cmd->result = DID_NO_CONNECT << 16;
38443 - atomic_inc(&cmd->device->iorequest_cnt);
38444 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38445
38446 /*
38447 * SCSI request completion path will do scsi_device_unbusy(),
38448 @@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
38449
38450 INIT_LIST_HEAD(&cmd->eh_entry);
38451
38452 - atomic_inc(&cmd->device->iodone_cnt);
38453 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
38454 if (cmd->result)
38455 - atomic_inc(&cmd->device->ioerr_cnt);
38456 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
38457
38458 disposition = scsi_decide_disposition(cmd);
38459 if (disposition != SUCCESS &&
38460 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
38461 index ce5224c..8c6d071 100644
38462 --- a/drivers/scsi/scsi_sysfs.c
38463 +++ b/drivers/scsi/scsi_sysfs.c
38464 @@ -661,7 +661,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
38465 char *buf) \
38466 { \
38467 struct scsi_device *sdev = to_scsi_device(dev); \
38468 - unsigned long long count = atomic_read(&sdev->field); \
38469 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
38470 return snprintf(buf, 20, "0x%llx\n", count); \
38471 } \
38472 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
38473 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
38474 index 84a1fdf..693b0d6 100644
38475 --- a/drivers/scsi/scsi_tgt_lib.c
38476 +++ b/drivers/scsi/scsi_tgt_lib.c
38477 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
38478 int err;
38479
38480 dprintk("%lx %u\n", uaddr, len);
38481 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
38482 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
38483 if (err) {
38484 /*
38485 * TODO: need to fixup sg_tablesize, max_segment_size,
38486 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
38487 index e894ca7..de9d7660 100644
38488 --- a/drivers/scsi/scsi_transport_fc.c
38489 +++ b/drivers/scsi/scsi_transport_fc.c
38490 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
38491 * Netlink Infrastructure
38492 */
38493
38494 -static atomic_t fc_event_seq;
38495 +static atomic_unchecked_t fc_event_seq;
38496
38497 /**
38498 * fc_get_event_number - Obtain the next sequential FC event number
38499 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
38500 u32
38501 fc_get_event_number(void)
38502 {
38503 - return atomic_add_return(1, &fc_event_seq);
38504 + return atomic_add_return_unchecked(1, &fc_event_seq);
38505 }
38506 EXPORT_SYMBOL(fc_get_event_number);
38507
38508 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
38509 {
38510 int error;
38511
38512 - atomic_set(&fc_event_seq, 0);
38513 + atomic_set_unchecked(&fc_event_seq, 0);
38514
38515 error = transport_class_register(&fc_host_class);
38516 if (error)
38517 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
38518 char *cp;
38519
38520 *val = simple_strtoul(buf, &cp, 0);
38521 - if ((*cp && (*cp != '\n')) || (*val < 0))
38522 + if (*cp && (*cp != '\n'))
38523 return -EINVAL;
38524 /*
38525 * Check for overflow; dev_loss_tmo is u32
38526 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
38527 index 31969f2..2b348f0 100644
38528 --- a/drivers/scsi/scsi_transport_iscsi.c
38529 +++ b/drivers/scsi/scsi_transport_iscsi.c
38530 @@ -79,7 +79,7 @@ struct iscsi_internal {
38531 struct transport_container session_cont;
38532 };
38533
38534 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
38535 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
38536 static struct workqueue_struct *iscsi_eh_timer_workq;
38537
38538 static DEFINE_IDA(iscsi_sess_ida);
38539 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
38540 int err;
38541
38542 ihost = shost->shost_data;
38543 - session->sid = atomic_add_return(1, &iscsi_session_nr);
38544 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
38545
38546 if (target_id == ISCSI_MAX_TARGET) {
38547 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
38548 @@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
38549 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
38550 ISCSI_TRANSPORT_VERSION);
38551
38552 - atomic_set(&iscsi_session_nr, 0);
38553 + atomic_set_unchecked(&iscsi_session_nr, 0);
38554
38555 err = class_register(&iscsi_transport_class);
38556 if (err)
38557 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
38558 index 21a045e..ec89e03 100644
38559 --- a/drivers/scsi/scsi_transport_srp.c
38560 +++ b/drivers/scsi/scsi_transport_srp.c
38561 @@ -33,7 +33,7 @@
38562 #include "scsi_transport_srp_internal.h"
38563
38564 struct srp_host_attrs {
38565 - atomic_t next_port_id;
38566 + atomic_unchecked_t next_port_id;
38567 };
38568 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
38569
38570 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
38571 struct Scsi_Host *shost = dev_to_shost(dev);
38572 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
38573
38574 - atomic_set(&srp_host->next_port_id, 0);
38575 + atomic_set_unchecked(&srp_host->next_port_id, 0);
38576 return 0;
38577 }
38578
38579 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
38580 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
38581 rport->roles = ids->roles;
38582
38583 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
38584 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
38585 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
38586
38587 transport_setup_device(&rport->dev);
38588 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
38589 index 352bc77..c049b14 100644
38590 --- a/drivers/scsi/sd.c
38591 +++ b/drivers/scsi/sd.c
38592 @@ -2899,7 +2899,7 @@ static int sd_probe(struct device *dev)
38593 sdkp->disk = gd;
38594 sdkp->index = index;
38595 atomic_set(&sdkp->openers, 0);
38596 - atomic_set(&sdkp->device->ioerr_cnt, 0);
38597 + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
38598
38599 if (!sdp->request_queue->rq_timeout) {
38600 if (sdp->type != TYPE_MOD)
38601 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
38602 index be2c9a6..275525c 100644
38603 --- a/drivers/scsi/sg.c
38604 +++ b/drivers/scsi/sg.c
38605 @@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
38606 sdp->disk->disk_name,
38607 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
38608 NULL,
38609 - (char *)arg);
38610 + (char __user *)arg);
38611 case BLKTRACESTART:
38612 return blk_trace_startstop(sdp->device->request_queue, 1);
38613 case BLKTRACESTOP:
38614 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
38615 index 84c2861..ece0a31 100644
38616 --- a/drivers/spi/spi.c
38617 +++ b/drivers/spi/spi.c
38618 @@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
38619 EXPORT_SYMBOL_GPL(spi_bus_unlock);
38620
38621 /* portable code must never pass more than 32 bytes */
38622 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38623 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
38624
38625 static u8 *buf;
38626
38627 diff --git a/drivers/staging/csr/sdio_mmc.c b/drivers/staging/csr/sdio_mmc.c
38628 index af3e40b..29de4f5 100644
38629 --- a/drivers/staging/csr/sdio_mmc.c
38630 +++ b/drivers/staging/csr/sdio_mmc.c
38631 @@ -898,7 +898,7 @@ struct uf_sdio_mmc_pm_notifier
38632 struct list_head list;
38633
38634 CsrSdioFunction *sdio_ctx;
38635 - struct notifier_block pm_notifier;
38636 + notifier_block_no_const pm_notifier;
38637 };
38638
38639 /* PM notifier list head */
38640 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
38641 index 34afc16..ffe44dd 100644
38642 --- a/drivers/staging/octeon/ethernet-rx.c
38643 +++ b/drivers/staging/octeon/ethernet-rx.c
38644 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38645 /* Increment RX stats for virtual ports */
38646 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38647 #ifdef CONFIG_64BIT
38648 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38649 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38650 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38651 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38652 #else
38653 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38654 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38655 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38656 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38657 #endif
38658 }
38659 netif_receive_skb(skb);
38660 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38661 dev->name);
38662 */
38663 #ifdef CONFIG_64BIT
38664 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38665 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38666 #else
38667 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38668 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
38669 #endif
38670 dev_kfree_skb_irq(skb);
38671 }
38672 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
38673 index 683bedc..86dba9a 100644
38674 --- a/drivers/staging/octeon/ethernet.c
38675 +++ b/drivers/staging/octeon/ethernet.c
38676 @@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
38677 * since the RX tasklet also increments it.
38678 */
38679 #ifdef CONFIG_64BIT
38680 - atomic64_add(rx_status.dropped_packets,
38681 - (atomic64_t *)&priv->stats.rx_dropped);
38682 + atomic64_add_unchecked(rx_status.dropped_packets,
38683 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38684 #else
38685 - atomic_add(rx_status.dropped_packets,
38686 - (atomic_t *)&priv->stats.rx_dropped);
38687 + atomic_add_unchecked(rx_status.dropped_packets,
38688 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
38689 #endif
38690 }
38691
38692 diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
38693 index a2b7e03..aaf3630 100644
38694 --- a/drivers/staging/ramster/tmem.c
38695 +++ b/drivers/staging/ramster/tmem.c
38696 @@ -50,25 +50,25 @@
38697 * A tmem host implementation must use this function to register callbacks
38698 * for memory allocation.
38699 */
38700 -static struct tmem_hostops tmem_hostops;
38701 +static struct tmem_hostops *tmem_hostops;
38702
38703 static void tmem_objnode_tree_init(void);
38704
38705 void tmem_register_hostops(struct tmem_hostops *m)
38706 {
38707 tmem_objnode_tree_init();
38708 - tmem_hostops = *m;
38709 + tmem_hostops = m;
38710 }
38711
38712 /*
38713 * A tmem host implementation must use this function to register
38714 * callbacks for a page-accessible memory (PAM) implementation.
38715 */
38716 -static struct tmem_pamops tmem_pamops;
38717 +static struct tmem_pamops *tmem_pamops;
38718
38719 void tmem_register_pamops(struct tmem_pamops *m)
38720 {
38721 - tmem_pamops = *m;
38722 + tmem_pamops = m;
38723 }
38724
38725 /*
38726 @@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
38727 obj->pampd_count = 0;
38728 #ifdef CONFIG_RAMSTER
38729 if (tmem_pamops.new_obj != NULL)
38730 - (*tmem_pamops.new_obj)(obj);
38731 + (tmem_pamops->new_obj)(obj);
38732 #endif
38733 SET_SENTINEL(obj, OBJ);
38734
38735 @@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
38736 rbnode = rb_next(rbnode);
38737 tmem_pampd_destroy_all_in_obj(obj, true);
38738 tmem_obj_free(obj, hb);
38739 - (*tmem_hostops.obj_free)(obj, pool);
38740 + (tmem_hostops->obj_free)(obj, pool);
38741 }
38742 spin_unlock(&hb->lock);
38743 }
38744 @@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
38745 ASSERT_SENTINEL(obj, OBJ);
38746 BUG_ON(obj->pool == NULL);
38747 ASSERT_SENTINEL(obj->pool, POOL);
38748 - objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
38749 + objnode = (tmem_hostops->objnode_alloc)(obj->pool);
38750 if (unlikely(objnode == NULL))
38751 goto out;
38752 objnode->obj = obj;
38753 @@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
38754 ASSERT_SENTINEL(pool, POOL);
38755 objnode->obj->objnode_count--;
38756 objnode->obj = NULL;
38757 - (*tmem_hostops.objnode_free)(objnode, pool);
38758 + (tmem_hostops->objnode_free)(objnode, pool);
38759 }
38760
38761 /*
38762 @@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
38763 void *old_pampd = *(void **)slot;
38764 *(void **)slot = new_pampd;
38765 if (!no_free)
38766 - (*tmem_pamops.free)(old_pampd, obj->pool,
38767 + (tmem_pamops->free)(old_pampd, obj->pool,
38768 NULL, 0, false);
38769 ret = new_pampd;
38770 }
38771 @@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
38772 if (objnode->slots[i]) {
38773 if (ht == 1) {
38774 obj->pampd_count--;
38775 - (*tmem_pamops.free)(objnode->slots[i],
38776 + (tmem_pamops->free)(objnode->slots[i],
38777 obj->pool, NULL, 0, true);
38778 objnode->slots[i] = NULL;
38779 continue;
38780 @@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
38781 return;
38782 if (obj->objnode_tree_height == 0) {
38783 obj->pampd_count--;
38784 - (*tmem_pamops.free)(obj->objnode_tree_root,
38785 + (tmem_pamops->free)(obj->objnode_tree_root,
38786 obj->pool, NULL, 0, true);
38787 } else {
38788 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
38789 @@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
38790 obj->objnode_tree_root = NULL;
38791 #ifdef CONFIG_RAMSTER
38792 if (tmem_pamops.free_obj != NULL)
38793 - (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
38794 + (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
38795 #endif
38796 }
38797
38798 @@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38799 /* if found, is a dup put, flush the old one */
38800 pampd_del = tmem_pampd_delete_from_obj(obj, index);
38801 BUG_ON(pampd_del != pampd);
38802 - (*tmem_pamops.free)(pampd, pool, oidp, index, true);
38803 + (tmem_pamops->free)(pampd, pool, oidp, index, true);
38804 if (obj->pampd_count == 0) {
38805 objnew = obj;
38806 objfound = NULL;
38807 @@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38808 pampd = NULL;
38809 }
38810 } else {
38811 - obj = objnew = (*tmem_hostops.obj_alloc)(pool);
38812 + obj = objnew = (tmem_hostops->obj_alloc)(pool);
38813 if (unlikely(obj == NULL)) {
38814 ret = -ENOMEM;
38815 goto out;
38816 @@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38817 if (unlikely(ret == -ENOMEM))
38818 /* may have partially built objnode tree ("stump") */
38819 goto delete_and_free;
38820 - (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
38821 + (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
38822 goto out;
38823
38824 delete_and_free:
38825 (void)tmem_pampd_delete_from_obj(obj, index);
38826 if (pampd)
38827 - (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
38828 + (tmem_pamops->free)(pampd, pool, NULL, 0, true);
38829 if (objnew) {
38830 tmem_obj_free(objnew, hb);
38831 - (*tmem_hostops.obj_free)(objnew, pool);
38832 + (tmem_hostops->obj_free)(objnew, pool);
38833 }
38834 out:
38835 spin_unlock(&hb->lock);
38836 @@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
38837 if (pampd != NULL) {
38838 BUG_ON(obj == NULL);
38839 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
38840 - (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
38841 + (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
38842 } else if (delete) {
38843 BUG_ON(obj == NULL);
38844 (void)tmem_pampd_delete_from_obj(obj, index);
38845 @@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
38846 int ret = 0;
38847
38848 if (!is_ephemeral(pool))
38849 - new_pampd = (*tmem_pamops.repatriate_preload)(
38850 + new_pampd = (tmem_pamops->repatriate_preload)(
38851 old_pampd, pool, oidp, index, &intransit);
38852 if (intransit)
38853 ret = -EAGAIN;
38854 @@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
38855 /* must release the hb->lock else repatriate can't sleep */
38856 spin_unlock(&hb->lock);
38857 if (!intransit)
38858 - ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
38859 + ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
38860 oidp, index, free, data);
38861 if (ret == -EAGAIN) {
38862 /* rare I think, but should cond_resched()??? */
38863 @@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
38864 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
38865 /* if we bug here, pamops wasn't properly set up for ramster */
38866 BUG_ON(tmem_pamops.replace_in_obj == NULL);
38867 - ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
38868 + ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
38869 out:
38870 spin_unlock(&hb->lock);
38871 return ret;
38872 @@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
38873 if (free) {
38874 if (obj->pampd_count == 0) {
38875 tmem_obj_free(obj, hb);
38876 - (*tmem_hostops.obj_free)(obj, pool);
38877 + (tmem_hostops->obj_free)(obj, pool);
38878 obj = NULL;
38879 }
38880 }
38881 if (free)
38882 - ret = (*tmem_pamops.get_data_and_free)(
38883 + ret = (tmem_pamops->get_data_and_free)(
38884 data, sizep, raw, pampd, pool, oidp, index);
38885 else
38886 - ret = (*tmem_pamops.get_data)(
38887 + ret = (tmem_pamops->get_data)(
38888 data, sizep, raw, pampd, pool, oidp, index);
38889 if (ret < 0)
38890 goto out;
38891 @@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
38892 pampd = tmem_pampd_delete_from_obj(obj, index);
38893 if (pampd == NULL)
38894 goto out;
38895 - (*tmem_pamops.free)(pampd, pool, oidp, index, true);
38896 + (tmem_pamops->free)(pampd, pool, oidp, index, true);
38897 if (obj->pampd_count == 0) {
38898 tmem_obj_free(obj, hb);
38899 - (*tmem_hostops.obj_free)(obj, pool);
38900 + (tmem_hostops->obj_free)(obj, pool);
38901 }
38902 ret = 0;
38903
38904 @@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
38905 goto out;
38906 tmem_pampd_destroy_all_in_obj(obj, false);
38907 tmem_obj_free(obj, hb);
38908 - (*tmem_hostops.obj_free)(obj, pool);
38909 + (tmem_hostops->obj_free)(obj, pool);
38910 ret = 0;
38911
38912 out:
38913 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
38914 index dc23395..cf7e9b1 100644
38915 --- a/drivers/staging/rtl8712/rtl871x_io.h
38916 +++ b/drivers/staging/rtl8712/rtl871x_io.h
38917 @@ -108,7 +108,7 @@ struct _io_ops {
38918 u8 *pmem);
38919 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
38920 u8 *pmem);
38921 -};
38922 +} __no_const;
38923
38924 struct io_req {
38925 struct list_head list;
38926 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
38927 index 180c963..1f18377 100644
38928 --- a/drivers/staging/sbe-2t3e3/netdev.c
38929 +++ b/drivers/staging/sbe-2t3e3/netdev.c
38930 @@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38931 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
38932
38933 if (rlen)
38934 - if (copy_to_user(data, &resp, rlen))
38935 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
38936 return -EFAULT;
38937
38938 return 0;
38939 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
38940 index 5d89c0f..9261317 100644
38941 --- a/drivers/staging/usbip/usbip_common.h
38942 +++ b/drivers/staging/usbip/usbip_common.h
38943 @@ -289,7 +289,7 @@ struct usbip_device {
38944 void (*shutdown)(struct usbip_device *);
38945 void (*reset)(struct usbip_device *);
38946 void (*unusable)(struct usbip_device *);
38947 - } eh_ops;
38948 + } __no_const eh_ops;
38949 };
38950
38951 #define kthread_get_run(threadfn, data, namefmt, ...) \
38952 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
38953 index c66b8b3..a4a035b 100644
38954 --- a/drivers/staging/usbip/vhci.h
38955 +++ b/drivers/staging/usbip/vhci.h
38956 @@ -83,7 +83,7 @@ struct vhci_hcd {
38957 unsigned resuming:1;
38958 unsigned long re_timeout;
38959
38960 - atomic_t seqnum;
38961 + atomic_unchecked_t seqnum;
38962
38963 /*
38964 * NOTE:
38965 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
38966 index 620d1be..1cd6711 100644
38967 --- a/drivers/staging/usbip/vhci_hcd.c
38968 +++ b/drivers/staging/usbip/vhci_hcd.c
38969 @@ -471,7 +471,7 @@ static void vhci_tx_urb(struct urb *urb)
38970 return;
38971 }
38972
38973 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38974 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38975 if (priv->seqnum == 0xffff)
38976 dev_info(&urb->dev->dev, "seqnum max\n");
38977
38978 @@ -723,7 +723,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
38979 return -ENOMEM;
38980 }
38981
38982 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38983 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38984 if (unlink->seqnum == 0xffff)
38985 pr_info("seqnum max\n");
38986
38987 @@ -924,7 +924,7 @@ static int vhci_start(struct usb_hcd *hcd)
38988 vdev->rhport = rhport;
38989 }
38990
38991 - atomic_set(&vhci->seqnum, 0);
38992 + atomic_set_unchecked(&vhci->seqnum, 0);
38993 spin_lock_init(&vhci->lock);
38994
38995 hcd->power_budget = 0; /* no limit */
38996 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
38997 index f0eaf04..5a82e06 100644
38998 --- a/drivers/staging/usbip/vhci_rx.c
38999 +++ b/drivers/staging/usbip/vhci_rx.c
39000 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
39001 if (!urb) {
39002 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
39003 pr_info("max seqnum %d\n",
39004 - atomic_read(&the_controller->seqnum));
39005 + atomic_read_unchecked(&the_controller->seqnum));
39006 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
39007 return;
39008 }
39009 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
39010 index 67b1b88..6392fe9 100644
39011 --- a/drivers/staging/vt6655/hostap.c
39012 +++ b/drivers/staging/vt6655/hostap.c
39013 @@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
39014 *
39015 */
39016
39017 +static net_device_ops_no_const apdev_netdev_ops;
39018 +
39019 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39020 {
39021 PSDevice apdev_priv;
39022 struct net_device *dev = pDevice->dev;
39023 int ret;
39024 - const struct net_device_ops apdev_netdev_ops = {
39025 - .ndo_start_xmit = pDevice->tx_80211,
39026 - };
39027
39028 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39029
39030 @@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39031 *apdev_priv = *pDevice;
39032 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39033
39034 + /* only half broken now */
39035 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39036 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39037
39038 pDevice->apdev->type = ARPHRD_IEEE80211;
39039 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
39040 index 0a73d40..6fda560 100644
39041 --- a/drivers/staging/vt6656/hostap.c
39042 +++ b/drivers/staging/vt6656/hostap.c
39043 @@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
39044 *
39045 */
39046
39047 +static net_device_ops_no_const apdev_netdev_ops;
39048 +
39049 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39050 {
39051 PSDevice apdev_priv;
39052 struct net_device *dev = pDevice->dev;
39053 int ret;
39054 - const struct net_device_ops apdev_netdev_ops = {
39055 - .ndo_start_xmit = pDevice->tx_80211,
39056 - };
39057
39058 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39059
39060 @@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39061 *apdev_priv = *pDevice;
39062 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39063
39064 + /* only half broken now */
39065 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39066 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39067
39068 pDevice->apdev->type = ARPHRD_IEEE80211;
39069 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
39070 index f180c3d..4b9ecfc 100644
39071 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
39072 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
39073 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
39074
39075 struct usbctlx_completor {
39076 int (*complete) (struct usbctlx_completor *);
39077 -};
39078 +} __no_const;
39079
39080 static int
39081 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
39082 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
39083 index 56c8e60..1920c63 100644
39084 --- a/drivers/staging/zcache/tmem.c
39085 +++ b/drivers/staging/zcache/tmem.c
39086 @@ -39,7 +39,7 @@
39087 * A tmem host implementation must use this function to register callbacks
39088 * for memory allocation.
39089 */
39090 -static struct tmem_hostops tmem_hostops;
39091 +static tmem_hostops_no_const tmem_hostops;
39092
39093 static void tmem_objnode_tree_init(void);
39094
39095 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
39096 * A tmem host implementation must use this function to register
39097 * callbacks for a page-accessible memory (PAM) implementation
39098 */
39099 -static struct tmem_pamops tmem_pamops;
39100 +static tmem_pamops_no_const tmem_pamops;
39101
39102 void tmem_register_pamops(struct tmem_pamops *m)
39103 {
39104 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
39105 index 0d4aa82..f7832d4 100644
39106 --- a/drivers/staging/zcache/tmem.h
39107 +++ b/drivers/staging/zcache/tmem.h
39108 @@ -180,6 +180,7 @@ struct tmem_pamops {
39109 void (*new_obj)(struct tmem_obj *);
39110 int (*replace_in_obj)(void *, struct tmem_obj *);
39111 };
39112 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
39113 extern void tmem_register_pamops(struct tmem_pamops *m);
39114
39115 /* memory allocation methods provided by the host implementation */
39116 @@ -189,6 +190,7 @@ struct tmem_hostops {
39117 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
39118 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
39119 };
39120 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
39121 extern void tmem_register_hostops(struct tmem_hostops *m);
39122
39123 /* core tmem accessor functions */
39124 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
39125 index dcecbfb..4fad7d5 100644
39126 --- a/drivers/target/target_core_transport.c
39127 +++ b/drivers/target/target_core_transport.c
39128 @@ -1087,7 +1087,7 @@ struct se_device *transport_add_device_to_core_hba(
39129 spin_lock_init(&dev->se_port_lock);
39130 spin_lock_init(&dev->se_tmr_lock);
39131 spin_lock_init(&dev->qf_cmd_lock);
39132 - atomic_set(&dev->dev_ordered_id, 0);
39133 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
39134
39135 se_dev_set_default_attribs(dev, dev_limits);
39136
39137 @@ -1277,7 +1277,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
39138 * Used to determine when ORDERED commands should go from
39139 * Dormant to Active status.
39140 */
39141 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
39142 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
39143 smp_mb__after_atomic_inc();
39144 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
39145 cmd->se_ordered_id, cmd->sam_task_attr,
39146 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
39147 index 0a6a0bc..5501b06 100644
39148 --- a/drivers/tty/cyclades.c
39149 +++ b/drivers/tty/cyclades.c
39150 @@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
39151 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
39152 info->port.count);
39153 #endif
39154 - info->port.count++;
39155 + atomic_inc(&info->port.count);
39156 #ifdef CY_DEBUG_COUNT
39157 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
39158 - current->pid, info->port.count);
39159 + current->pid, atomic_read(&info->port.count));
39160 #endif
39161
39162 /*
39163 @@ -3989,7 +3989,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
39164 for (j = 0; j < cy_card[i].nports; j++) {
39165 info = &cy_card[i].ports[j];
39166
39167 - if (info->port.count) {
39168 + if (atomic_read(&info->port.count)) {
39169 /* XXX is the ldisc num worth this? */
39170 struct tty_struct *tty;
39171 struct tty_ldisc *ld;
39172 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
39173 index 13ee53b..418d164 100644
39174 --- a/drivers/tty/hvc/hvc_console.c
39175 +++ b/drivers/tty/hvc/hvc_console.c
39176 @@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
39177
39178 spin_lock_irqsave(&hp->port.lock, flags);
39179 /* Check and then increment for fast path open. */
39180 - if (hp->port.count++ > 0) {
39181 + if (atomic_inc_return(&hp->port.count) > 1) {
39182 spin_unlock_irqrestore(&hp->port.lock, flags);
39183 hvc_kick();
39184 return 0;
39185 @@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39186
39187 spin_lock_irqsave(&hp->port.lock, flags);
39188
39189 - if (--hp->port.count == 0) {
39190 + if (atomic_dec_return(&hp->port.count) == 0) {
39191 spin_unlock_irqrestore(&hp->port.lock, flags);
39192 /* We are done with the tty pointer now. */
39193 tty_port_tty_set(&hp->port, NULL);
39194 @@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39195 */
39196 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
39197 } else {
39198 - if (hp->port.count < 0)
39199 + if (atomic_read(&hp->port.count) < 0)
39200 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
39201 - hp->vtermno, hp->port.count);
39202 + hp->vtermno, atomic_read(&hp->port.count));
39203 spin_unlock_irqrestore(&hp->port.lock, flags);
39204 }
39205 }
39206 @@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
39207 * open->hangup case this can be called after the final close so prevent
39208 * that from happening for now.
39209 */
39210 - if (hp->port.count <= 0) {
39211 + if (atomic_read(&hp->port.count) <= 0) {
39212 spin_unlock_irqrestore(&hp->port.lock, flags);
39213 return;
39214 }
39215
39216 - hp->port.count = 0;
39217 + atomic_set(&hp->port.count, 0);
39218 spin_unlock_irqrestore(&hp->port.lock, flags);
39219 tty_port_tty_set(&hp->port, NULL);
39220
39221 @@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
39222 return -EPIPE;
39223
39224 /* FIXME what's this (unprotected) check for? */
39225 - if (hp->port.count <= 0)
39226 + if (atomic_read(&hp->port.count) <= 0)
39227 return -EIO;
39228
39229 spin_lock_irqsave(&hp->lock, flags);
39230 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
39231 index cab5c7a..4cc66ea 100644
39232 --- a/drivers/tty/hvc/hvcs.c
39233 +++ b/drivers/tty/hvc/hvcs.c
39234 @@ -83,6 +83,7 @@
39235 #include <asm/hvcserver.h>
39236 #include <asm/uaccess.h>
39237 #include <asm/vio.h>
39238 +#include <asm/local.h>
39239
39240 /*
39241 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
39242 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
39243
39244 spin_lock_irqsave(&hvcsd->lock, flags);
39245
39246 - if (hvcsd->port.count > 0) {
39247 + if (atomic_read(&hvcsd->port.count) > 0) {
39248 spin_unlock_irqrestore(&hvcsd->lock, flags);
39249 printk(KERN_INFO "HVCS: vterm state unchanged. "
39250 "The hvcs device node is still in use.\n");
39251 @@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
39252 }
39253 }
39254
39255 - hvcsd->port.count = 0;
39256 + atomic_set(&hvcsd->port.count, 0);
39257 hvcsd->port.tty = tty;
39258 tty->driver_data = hvcsd;
39259
39260 @@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
39261 unsigned long flags;
39262
39263 spin_lock_irqsave(&hvcsd->lock, flags);
39264 - hvcsd->port.count++;
39265 + atomic_inc(&hvcsd->port.count);
39266 hvcsd->todo_mask |= HVCS_SCHED_READ;
39267 spin_unlock_irqrestore(&hvcsd->lock, flags);
39268
39269 @@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39270 hvcsd = tty->driver_data;
39271
39272 spin_lock_irqsave(&hvcsd->lock, flags);
39273 - if (--hvcsd->port.count == 0) {
39274 + if (atomic_dec_and_test(&hvcsd->port.count)) {
39275
39276 vio_disable_interrupts(hvcsd->vdev);
39277
39278 @@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39279
39280 free_irq(irq, hvcsd);
39281 return;
39282 - } else if (hvcsd->port.count < 0) {
39283 + } else if (atomic_read(&hvcsd->port.count) < 0) {
39284 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
39285 " is missmanaged.\n",
39286 - hvcsd->vdev->unit_address, hvcsd->port.count);
39287 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
39288 }
39289
39290 spin_unlock_irqrestore(&hvcsd->lock, flags);
39291 @@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39292
39293 spin_lock_irqsave(&hvcsd->lock, flags);
39294 /* Preserve this so that we know how many kref refs to put */
39295 - temp_open_count = hvcsd->port.count;
39296 + temp_open_count = atomic_read(&hvcsd->port.count);
39297
39298 /*
39299 * Don't kref put inside the spinlock because the destruction
39300 @@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39301 tty->driver_data = NULL;
39302 hvcsd->port.tty = NULL;
39303
39304 - hvcsd->port.count = 0;
39305 + atomic_set(&hvcsd->port.count, 0);
39306
39307 /* This will drop any buffered data on the floor which is OK in a hangup
39308 * scenario. */
39309 @@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
39310 * the middle of a write operation? This is a crummy place to do this
39311 * but we want to keep it all in the spinlock.
39312 */
39313 - if (hvcsd->port.count <= 0) {
39314 + if (atomic_read(&hvcsd->port.count) <= 0) {
39315 spin_unlock_irqrestore(&hvcsd->lock, flags);
39316 return -ENODEV;
39317 }
39318 @@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
39319 {
39320 struct hvcs_struct *hvcsd = tty->driver_data;
39321
39322 - if (!hvcsd || hvcsd->port.count <= 0)
39323 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
39324 return 0;
39325
39326 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
39327 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
39328 index 160f0ad..588b853 100644
39329 --- a/drivers/tty/ipwireless/tty.c
39330 +++ b/drivers/tty/ipwireless/tty.c
39331 @@ -29,6 +29,7 @@
39332 #include <linux/tty_driver.h>
39333 #include <linux/tty_flip.h>
39334 #include <linux/uaccess.h>
39335 +#include <asm/local.h>
39336
39337 #include "tty.h"
39338 #include "network.h"
39339 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39340 mutex_unlock(&tty->ipw_tty_mutex);
39341 return -ENODEV;
39342 }
39343 - if (tty->port.count == 0)
39344 + if (atomic_read(&tty->port.count) == 0)
39345 tty->tx_bytes_queued = 0;
39346
39347 - tty->port.count++;
39348 + atomic_inc(&tty->port.count);
39349
39350 tty->port.tty = linux_tty;
39351 linux_tty->driver_data = tty;
39352 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39353
39354 static void do_ipw_close(struct ipw_tty *tty)
39355 {
39356 - tty->port.count--;
39357 -
39358 - if (tty->port.count == 0) {
39359 + if (atomic_dec_return(&tty->port.count) == 0) {
39360 struct tty_struct *linux_tty = tty->port.tty;
39361
39362 if (linux_tty != NULL) {
39363 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
39364 return;
39365
39366 mutex_lock(&tty->ipw_tty_mutex);
39367 - if (tty->port.count == 0) {
39368 + if (atomic_read(&tty->port.count) == 0) {
39369 mutex_unlock(&tty->ipw_tty_mutex);
39370 return;
39371 }
39372 @@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
39373 return;
39374 }
39375
39376 - if (!tty->port.count) {
39377 + if (!atomic_read(&tty->port.count)) {
39378 mutex_unlock(&tty->ipw_tty_mutex);
39379 return;
39380 }
39381 @@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
39382 return -ENODEV;
39383
39384 mutex_lock(&tty->ipw_tty_mutex);
39385 - if (!tty->port.count) {
39386 + if (!atomic_read(&tty->port.count)) {
39387 mutex_unlock(&tty->ipw_tty_mutex);
39388 return -EINVAL;
39389 }
39390 @@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
39391 if (!tty)
39392 return -ENODEV;
39393
39394 - if (!tty->port.count)
39395 + if (!atomic_read(&tty->port.count))
39396 return -EINVAL;
39397
39398 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
39399 @@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
39400 if (!tty)
39401 return 0;
39402
39403 - if (!tty->port.count)
39404 + if (!atomic_read(&tty->port.count))
39405 return 0;
39406
39407 return tty->tx_bytes_queued;
39408 @@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
39409 if (!tty)
39410 return -ENODEV;
39411
39412 - if (!tty->port.count)
39413 + if (!atomic_read(&tty->port.count))
39414 return -EINVAL;
39415
39416 return get_control_lines(tty);
39417 @@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
39418 if (!tty)
39419 return -ENODEV;
39420
39421 - if (!tty->port.count)
39422 + if (!atomic_read(&tty->port.count))
39423 return -EINVAL;
39424
39425 return set_control_lines(tty, set, clear);
39426 @@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
39427 if (!tty)
39428 return -ENODEV;
39429
39430 - if (!tty->port.count)
39431 + if (!atomic_read(&tty->port.count))
39432 return -EINVAL;
39433
39434 /* FIXME: Exactly how is the tty object locked here .. */
39435 @@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
39436 * are gone */
39437 mutex_lock(&ttyj->ipw_tty_mutex);
39438 }
39439 - while (ttyj->port.count)
39440 + while (atomic_read(&ttyj->port.count))
39441 do_ipw_close(ttyj);
39442 ipwireless_disassociate_network_ttys(network,
39443 ttyj->channel_idx);
39444 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
39445 index 56e616b..9d9f10a 100644
39446 --- a/drivers/tty/moxa.c
39447 +++ b/drivers/tty/moxa.c
39448 @@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
39449 }
39450
39451 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
39452 - ch->port.count++;
39453 + atomic_inc(&ch->port.count);
39454 tty->driver_data = ch;
39455 tty_port_tty_set(&ch->port, tty);
39456 mutex_lock(&ch->port.mutex);
39457 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
39458 index 1e8e8ce..a9efc93 100644
39459 --- a/drivers/tty/n_gsm.c
39460 +++ b/drivers/tty/n_gsm.c
39461 @@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
39462 kref_init(&dlci->ref);
39463 mutex_init(&dlci->mutex);
39464 dlci->fifo = &dlci->_fifo;
39465 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
39466 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
39467 kfree(dlci);
39468 return NULL;
39469 }
39470 @@ -2925,7 +2925,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
39471 struct gsm_dlci *dlci = tty->driver_data;
39472 struct tty_port *port = &dlci->port;
39473
39474 - port->count++;
39475 + atomic_inc(&port->count);
39476 dlci_get(dlci);
39477 dlci_get(dlci->gsm->dlci[0]);
39478 mux_get(dlci->gsm);
39479 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
39480 index 8c0b7b4..e88f052 100644
39481 --- a/drivers/tty/n_tty.c
39482 +++ b/drivers/tty/n_tty.c
39483 @@ -2142,6 +2142,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
39484 {
39485 *ops = tty_ldisc_N_TTY;
39486 ops->owner = NULL;
39487 - ops->refcount = ops->flags = 0;
39488 + atomic_set(&ops->refcount, 0);
39489 + ops->flags = 0;
39490 }
39491 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
39492 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
39493 index a82b399..331a220 100644
39494 --- a/drivers/tty/pty.c
39495 +++ b/drivers/tty/pty.c
39496 @@ -728,8 +728,10 @@ static void __init unix98_pty_init(void)
39497 panic("Couldn't register Unix98 pts driver");
39498
39499 /* Now create the /dev/ptmx special device */
39500 + pax_open_kernel();
39501 tty_default_fops(&ptmx_fops);
39502 - ptmx_fops.open = ptmx_open;
39503 + *(void **)&ptmx_fops.open = ptmx_open;
39504 + pax_close_kernel();
39505
39506 cdev_init(&ptmx_cdev, &ptmx_fops);
39507 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
39508 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
39509 index 9700d34..df7520c 100644
39510 --- a/drivers/tty/rocket.c
39511 +++ b/drivers/tty/rocket.c
39512 @@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39513 tty->driver_data = info;
39514 tty_port_tty_set(port, tty);
39515
39516 - if (port->count++ == 0) {
39517 + if (atomic_inc_return(&port->count) == 1) {
39518 atomic_inc(&rp_num_ports_open);
39519
39520 #ifdef ROCKET_DEBUG_OPEN
39521 @@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39522 #endif
39523 }
39524 #ifdef ROCKET_DEBUG_OPEN
39525 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
39526 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
39527 #endif
39528
39529 /*
39530 @@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
39531 spin_unlock_irqrestore(&info->port.lock, flags);
39532 return;
39533 }
39534 - if (info->port.count)
39535 + if (atomic_read(&info->port.count))
39536 atomic_dec(&rp_num_ports_open);
39537 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
39538 spin_unlock_irqrestore(&info->port.lock, flags);
39539 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
39540 index 1002054..dd644a8 100644
39541 --- a/drivers/tty/serial/kgdboc.c
39542 +++ b/drivers/tty/serial/kgdboc.c
39543 @@ -24,8 +24,9 @@
39544 #define MAX_CONFIG_LEN 40
39545
39546 static struct kgdb_io kgdboc_io_ops;
39547 +static struct kgdb_io kgdboc_io_ops_console;
39548
39549 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
39550 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
39551 static int configured = -1;
39552
39553 static char config[MAX_CONFIG_LEN];
39554 @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
39555 kgdboc_unregister_kbd();
39556 if (configured == 1)
39557 kgdb_unregister_io_module(&kgdboc_io_ops);
39558 + else if (configured == 2)
39559 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
39560 }
39561
39562 static int configure_kgdboc(void)
39563 @@ -160,13 +163,13 @@ static int configure_kgdboc(void)
39564 int err;
39565 char *cptr = config;
39566 struct console *cons;
39567 + int is_console = 0;
39568
39569 err = kgdboc_option_setup(config);
39570 if (err || !strlen(config) || isspace(config[0]))
39571 goto noconfig;
39572
39573 err = -ENODEV;
39574 - kgdboc_io_ops.is_console = 0;
39575 kgdb_tty_driver = NULL;
39576
39577 kgdboc_use_kms = 0;
39578 @@ -187,7 +190,7 @@ static int configure_kgdboc(void)
39579 int idx;
39580 if (cons->device && cons->device(cons, &idx) == p &&
39581 idx == tty_line) {
39582 - kgdboc_io_ops.is_console = 1;
39583 + is_console = 1;
39584 break;
39585 }
39586 cons = cons->next;
39587 @@ -197,7 +200,13 @@ static int configure_kgdboc(void)
39588 kgdb_tty_line = tty_line;
39589
39590 do_register:
39591 - err = kgdb_register_io_module(&kgdboc_io_ops);
39592 + if (is_console) {
39593 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
39594 + configured = 2;
39595 + } else {
39596 + err = kgdb_register_io_module(&kgdboc_io_ops);
39597 + configured = 1;
39598 + }
39599 if (err)
39600 goto noconfig;
39601
39602 @@ -205,8 +214,6 @@ do_register:
39603 if (err)
39604 goto nmi_con_failed;
39605
39606 - configured = 1;
39607 -
39608 return 0;
39609
39610 nmi_con_failed:
39611 @@ -223,7 +230,7 @@ noconfig:
39612 static int __init init_kgdboc(void)
39613 {
39614 /* Already configured? */
39615 - if (configured == 1)
39616 + if (configured >= 1)
39617 return 0;
39618
39619 return configure_kgdboc();
39620 @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
39621 if (config[len - 1] == '\n')
39622 config[len - 1] = '\0';
39623
39624 - if (configured == 1)
39625 + if (configured >= 1)
39626 cleanup_kgdboc();
39627
39628 /* Go and configure with the new params. */
39629 @@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
39630 .post_exception = kgdboc_post_exp_handler,
39631 };
39632
39633 +static struct kgdb_io kgdboc_io_ops_console = {
39634 + .name = "kgdboc",
39635 + .read_char = kgdboc_get_char,
39636 + .write_char = kgdboc_put_char,
39637 + .pre_exception = kgdboc_pre_exp_handler,
39638 + .post_exception = kgdboc_post_exp_handler,
39639 + .is_console = 1
39640 +};
39641 +
39642 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
39643 /* This is only available if kgdboc is a built in for early debugging */
39644 static int __init kgdboc_early_init(char *opt)
39645 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
39646 index 7f04717..0f3794f 100644
39647 --- a/drivers/tty/serial/samsung.c
39648 +++ b/drivers/tty/serial/samsung.c
39649 @@ -445,11 +445,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
39650 }
39651 }
39652
39653 +static int s3c64xx_serial_startup(struct uart_port *port);
39654 static int s3c24xx_serial_startup(struct uart_port *port)
39655 {
39656 struct s3c24xx_uart_port *ourport = to_ourport(port);
39657 int ret;
39658
39659 + /* Startup sequence is different for s3c64xx and higher SoC's */
39660 + if (s3c24xx_serial_has_interrupt_mask(port))
39661 + return s3c64xx_serial_startup(port);
39662 +
39663 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
39664 port->mapbase, port->membase);
39665
39666 @@ -1115,10 +1120,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
39667 /* setup info for port */
39668 port->dev = &platdev->dev;
39669
39670 - /* Startup sequence is different for s3c64xx and higher SoC's */
39671 - if (s3c24xx_serial_has_interrupt_mask(port))
39672 - s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
39673 -
39674 port->uartclk = 1;
39675
39676 if (cfg->uart_flags & UPF_CONS_FLOW) {
39677 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
39678 index 0fcfd98..8244fce 100644
39679 --- a/drivers/tty/serial/serial_core.c
39680 +++ b/drivers/tty/serial/serial_core.c
39681 @@ -1408,7 +1408,7 @@ static void uart_hangup(struct tty_struct *tty)
39682 uart_flush_buffer(tty);
39683 uart_shutdown(tty, state);
39684 spin_lock_irqsave(&port->lock, flags);
39685 - port->count = 0;
39686 + atomic_set(&port->count, 0);
39687 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
39688 spin_unlock_irqrestore(&port->lock, flags);
39689 tty_port_tty_set(port, NULL);
39690 @@ -1504,7 +1504,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39691 goto end;
39692 }
39693
39694 - port->count++;
39695 + atomic_inc(&port->count);
39696 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
39697 retval = -ENXIO;
39698 goto err_dec_count;
39699 @@ -1531,7 +1531,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39700 /*
39701 * Make sure the device is in D0 state.
39702 */
39703 - if (port->count == 1)
39704 + if (atomic_read(&port->count) == 1)
39705 uart_change_pm(state, 0);
39706
39707 /*
39708 @@ -1549,7 +1549,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
39709 end:
39710 return retval;
39711 err_dec_count:
39712 - port->count--;
39713 + atomic_inc(&port->count);
39714 mutex_unlock(&port->mutex);
39715 goto end;
39716 }
39717 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
39718 index 70e3a52..5742052 100644
39719 --- a/drivers/tty/synclink.c
39720 +++ b/drivers/tty/synclink.c
39721 @@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
39722
39723 if (debug_level >= DEBUG_LEVEL_INFO)
39724 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
39725 - __FILE__,__LINE__, info->device_name, info->port.count);
39726 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
39727
39728 if (tty_port_close_start(&info->port, tty, filp) == 0)
39729 goto cleanup;
39730 @@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
39731 cleanup:
39732 if (debug_level >= DEBUG_LEVEL_INFO)
39733 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
39734 - tty->driver->name, info->port.count);
39735 + tty->driver->name, atomic_read(&info->port.count));
39736
39737 } /* end of mgsl_close() */
39738
39739 @@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
39740
39741 mgsl_flush_buffer(tty);
39742 shutdown(info);
39743 -
39744 - info->port.count = 0;
39745 +
39746 + atomic_set(&info->port.count, 0);
39747 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
39748 info->port.tty = NULL;
39749
39750 @@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39751
39752 if (debug_level >= DEBUG_LEVEL_INFO)
39753 printk("%s(%d):block_til_ready before block on %s count=%d\n",
39754 - __FILE__,__LINE__, tty->driver->name, port->count );
39755 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39756
39757 spin_lock_irqsave(&info->irq_spinlock, flags);
39758 if (!tty_hung_up_p(filp)) {
39759 extra_count = true;
39760 - port->count--;
39761 + atomic_dec(&port->count);
39762 }
39763 spin_unlock_irqrestore(&info->irq_spinlock, flags);
39764 port->blocked_open++;
39765 @@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39766
39767 if (debug_level >= DEBUG_LEVEL_INFO)
39768 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
39769 - __FILE__,__LINE__, tty->driver->name, port->count );
39770 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39771
39772 tty_unlock(tty);
39773 schedule();
39774 @@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
39775
39776 /* FIXME: Racy on hangup during close wait */
39777 if (extra_count)
39778 - port->count++;
39779 + atomic_inc(&port->count);
39780 port->blocked_open--;
39781
39782 if (debug_level >= DEBUG_LEVEL_INFO)
39783 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
39784 - __FILE__,__LINE__, tty->driver->name, port->count );
39785 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
39786
39787 if (!retval)
39788 port->flags |= ASYNC_NORMAL_ACTIVE;
39789 @@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
39790
39791 if (debug_level >= DEBUG_LEVEL_INFO)
39792 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
39793 - __FILE__,__LINE__,tty->driver->name, info->port.count);
39794 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
39795
39796 /* If port is closing, signal caller to try again */
39797 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
39798 @@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
39799 spin_unlock_irqrestore(&info->netlock, flags);
39800 goto cleanup;
39801 }
39802 - info->port.count++;
39803 + atomic_inc(&info->port.count);
39804 spin_unlock_irqrestore(&info->netlock, flags);
39805
39806 - if (info->port.count == 1) {
39807 + if (atomic_read(&info->port.count) == 1) {
39808 /* 1st open on this device, init hardware */
39809 retval = startup(info);
39810 if (retval < 0)
39811 @@ -3451,8 +3451,8 @@ cleanup:
39812 if (retval) {
39813 if (tty->count == 1)
39814 info->port.tty = NULL; /* tty layer will release tty struct */
39815 - if(info->port.count)
39816 - info->port.count--;
39817 + if (atomic_read(&info->port.count))
39818 + atomic_dec(&info->port.count);
39819 }
39820
39821 return retval;
39822 @@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39823 unsigned short new_crctype;
39824
39825 /* return error if TTY interface open */
39826 - if (info->port.count)
39827 + if (atomic_read(&info->port.count))
39828 return -EBUSY;
39829
39830 switch (encoding)
39831 @@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
39832
39833 /* arbitrate between network and tty opens */
39834 spin_lock_irqsave(&info->netlock, flags);
39835 - if (info->port.count != 0 || info->netcount != 0) {
39836 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39837 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39838 spin_unlock_irqrestore(&info->netlock, flags);
39839 return -EBUSY;
39840 @@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39841 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
39842
39843 /* return error if TTY interface open */
39844 - if (info->port.count)
39845 + if (atomic_read(&info->port.count))
39846 return -EBUSY;
39847
39848 if (cmd != SIOCWANDEV)
39849 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
39850 index b38e954..ce45b38 100644
39851 --- a/drivers/tty/synclink_gt.c
39852 +++ b/drivers/tty/synclink_gt.c
39853 @@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
39854 tty->driver_data = info;
39855 info->port.tty = tty;
39856
39857 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
39858 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
39859
39860 /* If port is closing, signal caller to try again */
39861 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
39862 @@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
39863 mutex_unlock(&info->port.mutex);
39864 goto cleanup;
39865 }
39866 - info->port.count++;
39867 + atomic_inc(&info->port.count);
39868 spin_unlock_irqrestore(&info->netlock, flags);
39869
39870 - if (info->port.count == 1) {
39871 + if (atomic_read(&info->port.count) == 1) {
39872 /* 1st open on this device, init hardware */
39873 retval = startup(info);
39874 if (retval < 0) {
39875 @@ -716,8 +716,8 @@ cleanup:
39876 if (retval) {
39877 if (tty->count == 1)
39878 info->port.tty = NULL; /* tty layer will release tty struct */
39879 - if(info->port.count)
39880 - info->port.count--;
39881 + if(atomic_read(&info->port.count))
39882 + atomic_dec(&info->port.count);
39883 }
39884
39885 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
39886 @@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
39887
39888 if (sanity_check(info, tty->name, "close"))
39889 return;
39890 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
39891 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
39892
39893 if (tty_port_close_start(&info->port, tty, filp) == 0)
39894 goto cleanup;
39895 @@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
39896 tty_port_close_end(&info->port, tty);
39897 info->port.tty = NULL;
39898 cleanup:
39899 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
39900 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
39901 }
39902
39903 static void hangup(struct tty_struct *tty)
39904 @@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
39905 shutdown(info);
39906
39907 spin_lock_irqsave(&info->port.lock, flags);
39908 - info->port.count = 0;
39909 + atomic_set(&info->port.count, 0);
39910 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
39911 info->port.tty = NULL;
39912 spin_unlock_irqrestore(&info->port.lock, flags);
39913 @@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39914 unsigned short new_crctype;
39915
39916 /* return error if TTY interface open */
39917 - if (info->port.count)
39918 + if (atomic_read(&info->port.count))
39919 return -EBUSY;
39920
39921 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
39922 @@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
39923
39924 /* arbitrate between network and tty opens */
39925 spin_lock_irqsave(&info->netlock, flags);
39926 - if (info->port.count != 0 || info->netcount != 0) {
39927 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39928 DBGINFO(("%s hdlc_open busy\n", dev->name));
39929 spin_unlock_irqrestore(&info->netlock, flags);
39930 return -EBUSY;
39931 @@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39932 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
39933
39934 /* return error if TTY interface open */
39935 - if (info->port.count)
39936 + if (atomic_read(&info->port.count))
39937 return -EBUSY;
39938
39939 if (cmd != SIOCWANDEV)
39940 @@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
39941 if (port == NULL)
39942 continue;
39943 spin_lock(&port->lock);
39944 - if ((port->port.count || port->netcount) &&
39945 + if ((atomic_read(&port->port.count) || port->netcount) &&
39946 port->pending_bh && !port->bh_running &&
39947 !port->bh_requested) {
39948 DBGISR(("%s bh queued\n", port->device_name));
39949 @@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
39950 spin_lock_irqsave(&info->lock, flags);
39951 if (!tty_hung_up_p(filp)) {
39952 extra_count = true;
39953 - port->count--;
39954 + atomic_dec(&port->count);
39955 }
39956 spin_unlock_irqrestore(&info->lock, flags);
39957 port->blocked_open++;
39958 @@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
39959 remove_wait_queue(&port->open_wait, &wait);
39960
39961 if (extra_count)
39962 - port->count++;
39963 + atomic_inc(&port->count);
39964 port->blocked_open--;
39965
39966 if (!retval)
39967 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
39968 index f17d9f3..27a041b 100644
39969 --- a/drivers/tty/synclinkmp.c
39970 +++ b/drivers/tty/synclinkmp.c
39971 @@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
39972
39973 if (debug_level >= DEBUG_LEVEL_INFO)
39974 printk("%s(%d):%s open(), old ref count = %d\n",
39975 - __FILE__,__LINE__,tty->driver->name, info->port.count);
39976 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
39977
39978 /* If port is closing, signal caller to try again */
39979 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
39980 @@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
39981 spin_unlock_irqrestore(&info->netlock, flags);
39982 goto cleanup;
39983 }
39984 - info->port.count++;
39985 + atomic_inc(&info->port.count);
39986 spin_unlock_irqrestore(&info->netlock, flags);
39987
39988 - if (info->port.count == 1) {
39989 + if (atomic_read(&info->port.count) == 1) {
39990 /* 1st open on this device, init hardware */
39991 retval = startup(info);
39992 if (retval < 0)
39993 @@ -797,8 +797,8 @@ cleanup:
39994 if (retval) {
39995 if (tty->count == 1)
39996 info->port.tty = NULL; /* tty layer will release tty struct */
39997 - if(info->port.count)
39998 - info->port.count--;
39999 + if(atomic_read(&info->port.count))
40000 + atomic_dec(&info->port.count);
40001 }
40002
40003 return retval;
40004 @@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40005
40006 if (debug_level >= DEBUG_LEVEL_INFO)
40007 printk("%s(%d):%s close() entry, count=%d\n",
40008 - __FILE__,__LINE__, info->device_name, info->port.count);
40009 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
40010
40011 if (tty_port_close_start(&info->port, tty, filp) == 0)
40012 goto cleanup;
40013 @@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40014 cleanup:
40015 if (debug_level >= DEBUG_LEVEL_INFO)
40016 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
40017 - tty->driver->name, info->port.count);
40018 + tty->driver->name, atomic_read(&info->port.count));
40019 }
40020
40021 /* Called by tty_hangup() when a hangup is signaled.
40022 @@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
40023 shutdown(info);
40024
40025 spin_lock_irqsave(&info->port.lock, flags);
40026 - info->port.count = 0;
40027 + atomic_set(&info->port.count, 0);
40028 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40029 info->port.tty = NULL;
40030 spin_unlock_irqrestore(&info->port.lock, flags);
40031 @@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40032 unsigned short new_crctype;
40033
40034 /* return error if TTY interface open */
40035 - if (info->port.count)
40036 + if (atomic_read(&info->port.count))
40037 return -EBUSY;
40038
40039 switch (encoding)
40040 @@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
40041
40042 /* arbitrate between network and tty opens */
40043 spin_lock_irqsave(&info->netlock, flags);
40044 - if (info->port.count != 0 || info->netcount != 0) {
40045 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40046 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40047 spin_unlock_irqrestore(&info->netlock, flags);
40048 return -EBUSY;
40049 @@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40050 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
40051
40052 /* return error if TTY interface open */
40053 - if (info->port.count)
40054 + if (atomic_read(&info->port.count))
40055 return -EBUSY;
40056
40057 if (cmd != SIOCWANDEV)
40058 @@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
40059 * do not request bottom half processing if the
40060 * device is not open in a normal mode.
40061 */
40062 - if ( port && (port->port.count || port->netcount) &&
40063 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
40064 port->pending_bh && !port->bh_running &&
40065 !port->bh_requested ) {
40066 if ( debug_level >= DEBUG_LEVEL_ISR )
40067 @@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40068
40069 if (debug_level >= DEBUG_LEVEL_INFO)
40070 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
40071 - __FILE__,__LINE__, tty->driver->name, port->count );
40072 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40073
40074 spin_lock_irqsave(&info->lock, flags);
40075 if (!tty_hung_up_p(filp)) {
40076 extra_count = true;
40077 - port->count--;
40078 + atomic_dec(&port->count);
40079 }
40080 spin_unlock_irqrestore(&info->lock, flags);
40081 port->blocked_open++;
40082 @@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40083
40084 if (debug_level >= DEBUG_LEVEL_INFO)
40085 printk("%s(%d):%s block_til_ready() count=%d\n",
40086 - __FILE__,__LINE__, tty->driver->name, port->count );
40087 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40088
40089 tty_unlock(tty);
40090 schedule();
40091 @@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40092 remove_wait_queue(&port->open_wait, &wait);
40093
40094 if (extra_count)
40095 - port->count++;
40096 + atomic_inc(&port->count);
40097 port->blocked_open--;
40098
40099 if (debug_level >= DEBUG_LEVEL_INFO)
40100 printk("%s(%d):%s block_til_ready() after, count=%d\n",
40101 - __FILE__,__LINE__, tty->driver->name, port->count );
40102 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40103
40104 if (!retval)
40105 port->flags |= ASYNC_NORMAL_ACTIVE;
40106 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
40107 index 16ee6ce..bfcac57 100644
40108 --- a/drivers/tty/sysrq.c
40109 +++ b/drivers/tty/sysrq.c
40110 @@ -866,7 +866,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
40111 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
40112 size_t count, loff_t *ppos)
40113 {
40114 - if (count) {
40115 + if (count && capable(CAP_SYS_ADMIN)) {
40116 char c;
40117
40118 if (get_user(c, buf))
40119 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
40120 index 2ea176b..2877bc8 100644
40121 --- a/drivers/tty/tty_io.c
40122 +++ b/drivers/tty/tty_io.c
40123 @@ -3395,7 +3395,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
40124
40125 void tty_default_fops(struct file_operations *fops)
40126 {
40127 - *fops = tty_fops;
40128 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
40129 }
40130
40131 /*
40132 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
40133 index 0f2a2c5..471e228 100644
40134 --- a/drivers/tty/tty_ldisc.c
40135 +++ b/drivers/tty/tty_ldisc.c
40136 @@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
40137 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
40138 struct tty_ldisc_ops *ldo = ld->ops;
40139
40140 - ldo->refcount--;
40141 + atomic_dec(&ldo->refcount);
40142 module_put(ldo->owner);
40143 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40144
40145 @@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
40146 spin_lock_irqsave(&tty_ldisc_lock, flags);
40147 tty_ldiscs[disc] = new_ldisc;
40148 new_ldisc->num = disc;
40149 - new_ldisc->refcount = 0;
40150 + atomic_set(&new_ldisc->refcount, 0);
40151 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40152
40153 return ret;
40154 @@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
40155 return -EINVAL;
40156
40157 spin_lock_irqsave(&tty_ldisc_lock, flags);
40158 - if (tty_ldiscs[disc]->refcount)
40159 + if (atomic_read(&tty_ldiscs[disc]->refcount))
40160 ret = -EBUSY;
40161 else
40162 tty_ldiscs[disc] = NULL;
40163 @@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
40164 if (ldops) {
40165 ret = ERR_PTR(-EAGAIN);
40166 if (try_module_get(ldops->owner)) {
40167 - ldops->refcount++;
40168 + atomic_inc(&ldops->refcount);
40169 ret = ldops;
40170 }
40171 }
40172 @@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
40173 unsigned long flags;
40174
40175 spin_lock_irqsave(&tty_ldisc_lock, flags);
40176 - ldops->refcount--;
40177 + atomic_dec(&ldops->refcount);
40178 module_put(ldops->owner);
40179 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40180 }
40181 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
40182 index d7bdd8d..feaef30 100644
40183 --- a/drivers/tty/tty_port.c
40184 +++ b/drivers/tty/tty_port.c
40185 @@ -202,7 +202,7 @@ void tty_port_hangup(struct tty_port *port)
40186 unsigned long flags;
40187
40188 spin_lock_irqsave(&port->lock, flags);
40189 - port->count = 0;
40190 + atomic_set(&port->count, 0);
40191 port->flags &= ~ASYNC_NORMAL_ACTIVE;
40192 if (port->tty) {
40193 set_bit(TTY_IO_ERROR, &port->tty->flags);
40194 @@ -328,7 +328,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40195 /* The port lock protects the port counts */
40196 spin_lock_irqsave(&port->lock, flags);
40197 if (!tty_hung_up_p(filp))
40198 - port->count--;
40199 + atomic_dec(&port->count);
40200 port->blocked_open++;
40201 spin_unlock_irqrestore(&port->lock, flags);
40202
40203 @@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40204 we must not mess that up further */
40205 spin_lock_irqsave(&port->lock, flags);
40206 if (!tty_hung_up_p(filp))
40207 - port->count++;
40208 + atomic_inc(&port->count);
40209 port->blocked_open--;
40210 if (retval == 0)
40211 port->flags |= ASYNC_NORMAL_ACTIVE;
40212 @@ -390,19 +390,19 @@ int tty_port_close_start(struct tty_port *port,
40213 return 0;
40214 }
40215
40216 - if (tty->count == 1 && port->count != 1) {
40217 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
40218 printk(KERN_WARNING
40219 "tty_port_close_start: tty->count = 1 port count = %d.\n",
40220 - port->count);
40221 - port->count = 1;
40222 + atomic_read(&port->count));
40223 + atomic_set(&port->count, 1);
40224 }
40225 - if (--port->count < 0) {
40226 + if (atomic_dec_return(&port->count) < 0) {
40227 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
40228 - port->count);
40229 - port->count = 0;
40230 + atomic_read(&port->count));
40231 + atomic_set(&port->count, 0);
40232 }
40233
40234 - if (port->count) {
40235 + if (atomic_read(&port->count)) {
40236 spin_unlock_irqrestore(&port->lock, flags);
40237 if (port->ops->drop)
40238 port->ops->drop(port);
40239 @@ -500,7 +500,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
40240 {
40241 spin_lock_irq(&port->lock);
40242 if (!tty_hung_up_p(filp))
40243 - ++port->count;
40244 + atomic_inc(&port->count);
40245 spin_unlock_irq(&port->lock);
40246 tty_port_tty_set(port, tty);
40247
40248 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
40249 index 681765b..d3ccdf2 100644
40250 --- a/drivers/tty/vt/keyboard.c
40251 +++ b/drivers/tty/vt/keyboard.c
40252 @@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
40253 kbd->kbdmode == VC_OFF) &&
40254 value != KVAL(K_SAK))
40255 return; /* SAK is allowed even in raw mode */
40256 +
40257 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40258 + {
40259 + void *func = fn_handler[value];
40260 + if (func == fn_show_state || func == fn_show_ptregs ||
40261 + func == fn_show_mem)
40262 + return;
40263 + }
40264 +#endif
40265 +
40266 fn_handler[value](vc);
40267 }
40268
40269 @@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40270 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
40271 return -EFAULT;
40272
40273 - if (!capable(CAP_SYS_TTY_CONFIG))
40274 - perm = 0;
40275 -
40276 switch (cmd) {
40277 case KDGKBENT:
40278 /* Ensure another thread doesn't free it under us */
40279 @@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40280 spin_unlock_irqrestore(&kbd_event_lock, flags);
40281 return put_user(val, &user_kbe->kb_value);
40282 case KDSKBENT:
40283 + if (!capable(CAP_SYS_TTY_CONFIG))
40284 + perm = 0;
40285 +
40286 if (!perm)
40287 return -EPERM;
40288 if (!i && v == K_NOSUCHMAP) {
40289 @@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40290 int i, j, k;
40291 int ret;
40292
40293 - if (!capable(CAP_SYS_TTY_CONFIG))
40294 - perm = 0;
40295 -
40296 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
40297 if (!kbs) {
40298 ret = -ENOMEM;
40299 @@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40300 kfree(kbs);
40301 return ((p && *p) ? -EOVERFLOW : 0);
40302 case KDSKBSENT:
40303 + if (!capable(CAP_SYS_TTY_CONFIG))
40304 + perm = 0;
40305 +
40306 if (!perm) {
40307 ret = -EPERM;
40308 goto reterr;
40309 diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
40310 index fa7268a..cb08d48 100644
40311 --- a/drivers/tty/vt/vc_screen.c
40312 +++ b/drivers/tty/vt/vc_screen.c
40313 @@ -51,7 +51,7 @@
40314 #define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
40315
40316 struct vcs_poll_data {
40317 - struct notifier_block notifier;
40318 + notifier_block_no_const notifier;
40319 unsigned int cons_num;
40320 bool seen_last_update;
40321 wait_queue_head_t waitq;
40322 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
40323 index 5110f36..8dc0a74 100644
40324 --- a/drivers/uio/uio.c
40325 +++ b/drivers/uio/uio.c
40326 @@ -25,6 +25,7 @@
40327 #include <linux/kobject.h>
40328 #include <linux/cdev.h>
40329 #include <linux/uio_driver.h>
40330 +#include <asm/local.h>
40331
40332 #define UIO_MAX_DEVICES (1U << MINORBITS)
40333
40334 @@ -32,10 +33,10 @@ struct uio_device {
40335 struct module *owner;
40336 struct device *dev;
40337 int minor;
40338 - atomic_t event;
40339 + atomic_unchecked_t event;
40340 struct fasync_struct *async_queue;
40341 wait_queue_head_t wait;
40342 - int vma_count;
40343 + local_t vma_count;
40344 struct uio_info *info;
40345 struct kobject *map_dir;
40346 struct kobject *portio_dir;
40347 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
40348 struct device_attribute *attr, char *buf)
40349 {
40350 struct uio_device *idev = dev_get_drvdata(dev);
40351 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
40352 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
40353 }
40354
40355 static struct device_attribute uio_class_attributes[] = {
40356 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
40357 {
40358 struct uio_device *idev = info->uio_dev;
40359
40360 - atomic_inc(&idev->event);
40361 + atomic_inc_unchecked(&idev->event);
40362 wake_up_interruptible(&idev->wait);
40363 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
40364 }
40365 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
40366 }
40367
40368 listener->dev = idev;
40369 - listener->event_count = atomic_read(&idev->event);
40370 + listener->event_count = atomic_read_unchecked(&idev->event);
40371 filep->private_data = listener;
40372
40373 if (idev->info->open) {
40374 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
40375 return -EIO;
40376
40377 poll_wait(filep, &idev->wait, wait);
40378 - if (listener->event_count != atomic_read(&idev->event))
40379 + if (listener->event_count != atomic_read_unchecked(&idev->event))
40380 return POLLIN | POLLRDNORM;
40381 return 0;
40382 }
40383 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
40384 do {
40385 set_current_state(TASK_INTERRUPTIBLE);
40386
40387 - event_count = atomic_read(&idev->event);
40388 + event_count = atomic_read_unchecked(&idev->event);
40389 if (event_count != listener->event_count) {
40390 if (copy_to_user(buf, &event_count, count))
40391 retval = -EFAULT;
40392 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
40393 static void uio_vma_open(struct vm_area_struct *vma)
40394 {
40395 struct uio_device *idev = vma->vm_private_data;
40396 - idev->vma_count++;
40397 + local_inc(&idev->vma_count);
40398 }
40399
40400 static void uio_vma_close(struct vm_area_struct *vma)
40401 {
40402 struct uio_device *idev = vma->vm_private_data;
40403 - idev->vma_count--;
40404 + local_dec(&idev->vma_count);
40405 }
40406
40407 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40408 @@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
40409 idev->owner = owner;
40410 idev->info = info;
40411 init_waitqueue_head(&idev->wait);
40412 - atomic_set(&idev->event, 0);
40413 + atomic_set_unchecked(&idev->event, 0);
40414
40415 ret = uio_get_minor(idev);
40416 if (ret)
40417 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
40418 index b7eb86a..36d28af 100644
40419 --- a/drivers/usb/atm/cxacru.c
40420 +++ b/drivers/usb/atm/cxacru.c
40421 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
40422 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
40423 if (ret < 2)
40424 return -EINVAL;
40425 - if (index < 0 || index > 0x7f)
40426 + if (index > 0x7f)
40427 return -EINVAL;
40428 pos += tmp;
40429
40430 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
40431 index 35f10bf..6a38a0b 100644
40432 --- a/drivers/usb/atm/usbatm.c
40433 +++ b/drivers/usb/atm/usbatm.c
40434 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40435 if (printk_ratelimit())
40436 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
40437 __func__, vpi, vci);
40438 - atomic_inc(&vcc->stats->rx_err);
40439 + atomic_inc_unchecked(&vcc->stats->rx_err);
40440 return;
40441 }
40442
40443 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40444 if (length > ATM_MAX_AAL5_PDU) {
40445 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
40446 __func__, length, vcc);
40447 - atomic_inc(&vcc->stats->rx_err);
40448 + atomic_inc_unchecked(&vcc->stats->rx_err);
40449 goto out;
40450 }
40451
40452 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40453 if (sarb->len < pdu_length) {
40454 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
40455 __func__, pdu_length, sarb->len, vcc);
40456 - atomic_inc(&vcc->stats->rx_err);
40457 + atomic_inc_unchecked(&vcc->stats->rx_err);
40458 goto out;
40459 }
40460
40461 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
40462 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
40463 __func__, vcc);
40464 - atomic_inc(&vcc->stats->rx_err);
40465 + atomic_inc_unchecked(&vcc->stats->rx_err);
40466 goto out;
40467 }
40468
40469 @@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40470 if (printk_ratelimit())
40471 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
40472 __func__, length);
40473 - atomic_inc(&vcc->stats->rx_drop);
40474 + atomic_inc_unchecked(&vcc->stats->rx_drop);
40475 goto out;
40476 }
40477
40478 @@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40479
40480 vcc->push(vcc, skb);
40481
40482 - atomic_inc(&vcc->stats->rx);
40483 + atomic_inc_unchecked(&vcc->stats->rx);
40484 out:
40485 skb_trim(sarb, 0);
40486 }
40487 @@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
40488 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
40489
40490 usbatm_pop(vcc, skb);
40491 - atomic_inc(&vcc->stats->tx);
40492 + atomic_inc_unchecked(&vcc->stats->tx);
40493
40494 skb = skb_dequeue(&instance->sndqueue);
40495 }
40496 @@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
40497 if (!left--)
40498 return sprintf(page,
40499 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
40500 - atomic_read(&atm_dev->stats.aal5.tx),
40501 - atomic_read(&atm_dev->stats.aal5.tx_err),
40502 - atomic_read(&atm_dev->stats.aal5.rx),
40503 - atomic_read(&atm_dev->stats.aal5.rx_err),
40504 - atomic_read(&atm_dev->stats.aal5.rx_drop));
40505 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
40506 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
40507 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
40508 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
40509 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
40510
40511 if (!left--) {
40512 if (instance->disconnected)
40513 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
40514 index f460de3..95ba1f6 100644
40515 --- a/drivers/usb/core/devices.c
40516 +++ b/drivers/usb/core/devices.c
40517 @@ -126,7 +126,7 @@ static const char format_endpt[] =
40518 * time it gets called.
40519 */
40520 static struct device_connect_event {
40521 - atomic_t count;
40522 + atomic_unchecked_t count;
40523 wait_queue_head_t wait;
40524 } device_event = {
40525 .count = ATOMIC_INIT(1),
40526 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
40527
40528 void usbfs_conn_disc_event(void)
40529 {
40530 - atomic_add(2, &device_event.count);
40531 + atomic_add_unchecked(2, &device_event.count);
40532 wake_up(&device_event.wait);
40533 }
40534
40535 @@ -647,7 +647,7 @@ static unsigned int usb_device_poll(struct file *file,
40536
40537 poll_wait(file, &device_event.wait, wait);
40538
40539 - event_count = atomic_read(&device_event.count);
40540 + event_count = atomic_read_unchecked(&device_event.count);
40541 if (file->f_version != event_count) {
40542 file->f_version = event_count;
40543 return POLLIN | POLLRDNORM;
40544 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
40545 index f034716..aed0368 100644
40546 --- a/drivers/usb/core/hcd.c
40547 +++ b/drivers/usb/core/hcd.c
40548 @@ -1478,7 +1478,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40549 */
40550 usb_get_urb(urb);
40551 atomic_inc(&urb->use_count);
40552 - atomic_inc(&urb->dev->urbnum);
40553 + atomic_inc_unchecked(&urb->dev->urbnum);
40554 usbmon_urb_submit(&hcd->self, urb);
40555
40556 /* NOTE requirements on root-hub callers (usbfs and the hub
40557 @@ -1505,7 +1505,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40558 urb->hcpriv = NULL;
40559 INIT_LIST_HEAD(&urb->urb_list);
40560 atomic_dec(&urb->use_count);
40561 - atomic_dec(&urb->dev->urbnum);
40562 + atomic_dec_unchecked(&urb->dev->urbnum);
40563 if (atomic_read(&urb->reject))
40564 wake_up(&usb_kill_urb_queue);
40565 usb_put_urb(urb);
40566 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
40567 index 818e4a0..0fc9589 100644
40568 --- a/drivers/usb/core/sysfs.c
40569 +++ b/drivers/usb/core/sysfs.c
40570 @@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
40571 struct usb_device *udev;
40572
40573 udev = to_usb_device(dev);
40574 - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
40575 + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
40576 }
40577 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
40578
40579 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
40580 index cd8fb44..17fbe0c 100644
40581 --- a/drivers/usb/core/usb.c
40582 +++ b/drivers/usb/core/usb.c
40583 @@ -397,7 +397,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
40584 set_dev_node(&dev->dev, dev_to_node(bus->controller));
40585 dev->state = USB_STATE_ATTACHED;
40586 dev->lpm_disable_count = 1;
40587 - atomic_set(&dev->urbnum, 0);
40588 + atomic_set_unchecked(&dev->urbnum, 0);
40589
40590 INIT_LIST_HEAD(&dev->ep0.urb_list);
40591 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
40592 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
40593 index 4bfa78a..902bfbd 100644
40594 --- a/drivers/usb/early/ehci-dbgp.c
40595 +++ b/drivers/usb/early/ehci-dbgp.c
40596 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
40597
40598 #ifdef CONFIG_KGDB
40599 static struct kgdb_io kgdbdbgp_io_ops;
40600 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
40601 +static struct kgdb_io kgdbdbgp_io_ops_console;
40602 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
40603 #else
40604 #define dbgp_kgdb_mode (0)
40605 #endif
40606 @@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
40607 .write_char = kgdbdbgp_write_char,
40608 };
40609
40610 +static struct kgdb_io kgdbdbgp_io_ops_console = {
40611 + .name = "kgdbdbgp",
40612 + .read_char = kgdbdbgp_read_char,
40613 + .write_char = kgdbdbgp_write_char,
40614 + .is_console = 1
40615 +};
40616 +
40617 static int kgdbdbgp_wait_time;
40618
40619 static int __init kgdbdbgp_parse_config(char *str)
40620 @@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
40621 ptr++;
40622 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
40623 }
40624 - kgdb_register_io_module(&kgdbdbgp_io_ops);
40625 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
40626 + if (early_dbgp_console.index != -1)
40627 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
40628 + else
40629 + kgdb_register_io_module(&kgdbdbgp_io_ops);
40630
40631 return 0;
40632 }
40633 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
40634 index f173952..83d6ec0 100644
40635 --- a/drivers/usb/gadget/u_serial.c
40636 +++ b/drivers/usb/gadget/u_serial.c
40637 @@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40638 spin_lock_irq(&port->port_lock);
40639
40640 /* already open? Great. */
40641 - if (port->port.count) {
40642 + if (atomic_read(&port->port.count)) {
40643 status = 0;
40644 - port->port.count++;
40645 + atomic_inc(&port->port.count);
40646
40647 /* currently opening/closing? wait ... */
40648 } else if (port->openclose) {
40649 @@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40650 tty->driver_data = port;
40651 port->port.tty = tty;
40652
40653 - port->port.count = 1;
40654 + atomic_set(&port->port.count, 1);
40655 port->openclose = false;
40656
40657 /* if connected, start the I/O stream */
40658 @@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40659
40660 spin_lock_irq(&port->port_lock);
40661
40662 - if (port->port.count != 1) {
40663 - if (port->port.count == 0)
40664 + if (atomic_read(&port->port.count) != 1) {
40665 + if (atomic_read(&port->port.count) == 0)
40666 WARN_ON(1);
40667 else
40668 - --port->port.count;
40669 + atomic_dec(&port->port.count);
40670 goto exit;
40671 }
40672
40673 @@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40674 * and sleep if necessary
40675 */
40676 port->openclose = true;
40677 - port->port.count = 0;
40678 + atomic_set(&port->port.count, 0);
40679
40680 gser = port->port_usb;
40681 if (gser && gser->disconnect)
40682 @@ -1157,7 +1157,7 @@ static int gs_closed(struct gs_port *port)
40683 int cond;
40684
40685 spin_lock_irq(&port->port_lock);
40686 - cond = (port->port.count == 0) && !port->openclose;
40687 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
40688 spin_unlock_irq(&port->port_lock);
40689 return cond;
40690 }
40691 @@ -1270,7 +1270,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
40692 /* if it's already open, start I/O ... and notify the serial
40693 * protocol about open/close status (connect/disconnect).
40694 */
40695 - if (port->port.count) {
40696 + if (atomic_read(&port->port.count)) {
40697 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
40698 gs_start_io(port);
40699 if (gser->connect)
40700 @@ -1317,7 +1317,7 @@ void gserial_disconnect(struct gserial *gser)
40701
40702 port->port_usb = NULL;
40703 gser->ioport = NULL;
40704 - if (port->port.count > 0 || port->openclose) {
40705 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
40706 wake_up_interruptible(&port->drain_wait);
40707 if (port->port.tty)
40708 tty_hangup(port->port.tty);
40709 @@ -1333,7 +1333,7 @@ void gserial_disconnect(struct gserial *gser)
40710
40711 /* finally, free any unused/unusable I/O buffers */
40712 spin_lock_irqsave(&port->port_lock, flags);
40713 - if (port->port.count == 0 && !port->openclose)
40714 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
40715 gs_buf_free(&port->port_write_buf);
40716 gs_free_requests(gser->out, &port->read_pool, NULL);
40717 gs_free_requests(gser->out, &port->read_queue, NULL);
40718 diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
40719 index 24d3921..0c652ed 100644
40720 --- a/drivers/usb/musb/musb_dma.h
40721 +++ b/drivers/usb/musb/musb_dma.h
40722 @@ -172,7 +172,7 @@ struct dma_controller {
40723 int (*is_compatible)(struct dma_channel *channel,
40724 u16 maxpacket,
40725 void *buf, u32 length);
40726 -};
40727 +} __no_const;
40728
40729 /* called after channel_program(), may indicate a fault */
40730 extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
40731 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
40732 index 5f3bcd3..bfca43f 100644
40733 --- a/drivers/usb/serial/console.c
40734 +++ b/drivers/usb/serial/console.c
40735 @@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
40736
40737 info->port = port;
40738
40739 - ++port->port.count;
40740 + atomic_inc(&port->port.count);
40741 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
40742 if (serial->type->set_termios) {
40743 /*
40744 @@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
40745 }
40746 /* Now that any required fake tty operations are completed restore
40747 * the tty port count */
40748 - --port->port.count;
40749 + atomic_dec(&port->port.count);
40750 /* The console is special in terms of closing the device so
40751 * indicate this port is now acting as a system console. */
40752 port->port.console = 1;
40753 @@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
40754 free_tty:
40755 kfree(tty);
40756 reset_open_count:
40757 - port->port.count = 0;
40758 + atomic_set(&port->port.count, 0);
40759 usb_autopm_put_interface(serial->interface);
40760 error_get_interface:
40761 usb_serial_put(serial);
40762 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
40763 index d6bea3e..60b250e 100644
40764 --- a/drivers/usb/wusbcore/wa-hc.h
40765 +++ b/drivers/usb/wusbcore/wa-hc.h
40766 @@ -192,7 +192,7 @@ struct wahc {
40767 struct list_head xfer_delayed_list;
40768 spinlock_t xfer_list_lock;
40769 struct work_struct xfer_work;
40770 - atomic_t xfer_id_count;
40771 + atomic_unchecked_t xfer_id_count;
40772 };
40773
40774
40775 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
40776 INIT_LIST_HEAD(&wa->xfer_delayed_list);
40777 spin_lock_init(&wa->xfer_list_lock);
40778 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
40779 - atomic_set(&wa->xfer_id_count, 1);
40780 + atomic_set_unchecked(&wa->xfer_id_count, 1);
40781 }
40782
40783 /**
40784 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
40785 index 57c01ab..8a05959 100644
40786 --- a/drivers/usb/wusbcore/wa-xfer.c
40787 +++ b/drivers/usb/wusbcore/wa-xfer.c
40788 @@ -296,7 +296,7 @@ out:
40789 */
40790 static void wa_xfer_id_init(struct wa_xfer *xfer)
40791 {
40792 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
40793 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
40794 }
40795
40796 /*
40797 diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
40798 index 56097c6..4f38643 100644
40799 --- a/drivers/vfio/vfio.c
40800 +++ b/drivers/vfio/vfio.c
40801 @@ -71,7 +71,7 @@ struct vfio_group {
40802 struct list_head device_list;
40803 struct mutex device_lock;
40804 struct device *dev;
40805 - struct notifier_block nb;
40806 + notifier_block_no_const nb;
40807 struct list_head vfio_next;
40808 struct list_head container_next;
40809 };
40810 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
40811 index dedaf81..b0f11ab 100644
40812 --- a/drivers/vhost/vhost.c
40813 +++ b/drivers/vhost/vhost.c
40814 @@ -634,7 +634,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
40815 return 0;
40816 }
40817
40818 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
40819 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
40820 {
40821 struct file *eventfp, *filep = NULL;
40822 bool pollstart = false, pollstop = false;
40823 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
40824 index 0fefa84..7a9d581 100644
40825 --- a/drivers/video/aty/aty128fb.c
40826 +++ b/drivers/video/aty/aty128fb.c
40827 @@ -149,7 +149,7 @@ enum {
40828 };
40829
40830 /* Must match above enum */
40831 -static char * const r128_family[] __devinitconst = {
40832 +static const char * const r128_family[] __devinitconst = {
40833 "AGP",
40834 "PCI",
40835 "PRO AGP",
40836 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
40837 index 5c3960d..15cf8fc 100644
40838 --- a/drivers/video/fbcmap.c
40839 +++ b/drivers/video/fbcmap.c
40840 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
40841 rc = -ENODEV;
40842 goto out;
40843 }
40844 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
40845 - !info->fbops->fb_setcmap)) {
40846 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
40847 rc = -EINVAL;
40848 goto out1;
40849 }
40850 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
40851 index 3ff0105..7589d98 100644
40852 --- a/drivers/video/fbmem.c
40853 +++ b/drivers/video/fbmem.c
40854 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
40855 image->dx += image->width + 8;
40856 }
40857 } else if (rotate == FB_ROTATE_UD) {
40858 - for (x = 0; x < num && image->dx >= 0; x++) {
40859 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
40860 info->fbops->fb_imageblit(info, image);
40861 image->dx -= image->width + 8;
40862 }
40863 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
40864 image->dy += image->height + 8;
40865 }
40866 } else if (rotate == FB_ROTATE_CCW) {
40867 - for (x = 0; x < num && image->dy >= 0; x++) {
40868 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
40869 info->fbops->fb_imageblit(info, image);
40870 image->dy -= image->height + 8;
40871 }
40872 @@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
40873 return -EFAULT;
40874 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
40875 return -EINVAL;
40876 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
40877 + if (con2fb.framebuffer >= FB_MAX)
40878 return -EINVAL;
40879 if (!registered_fb[con2fb.framebuffer])
40880 request_module("fb%d", con2fb.framebuffer);
40881 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
40882 index 7672d2e..b56437f 100644
40883 --- a/drivers/video/i810/i810_accel.c
40884 +++ b/drivers/video/i810/i810_accel.c
40885 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
40886 }
40887 }
40888 printk("ringbuffer lockup!!!\n");
40889 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
40890 i810_report_error(mmio);
40891 par->dev_flags |= LOCKUP;
40892 info->pixmap.scan_align = 1;
40893 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
40894 index 3c14e43..eafa544 100644
40895 --- a/drivers/video/logo/logo_linux_clut224.ppm
40896 +++ b/drivers/video/logo/logo_linux_clut224.ppm
40897 @@ -1,1604 +1,1123 @@
40898 P3
40899 -# Standard 224-color Linux logo
40900 80 80
40901 255
40902 - 0 0 0 0 0 0 0 0 0 0 0 0
40903 - 0 0 0 0 0 0 0 0 0 0 0 0
40904 - 0 0 0 0 0 0 0 0 0 0 0 0
40905 - 0 0 0 0 0 0 0 0 0 0 0 0
40906 - 0 0 0 0 0 0 0 0 0 0 0 0
40907 - 0 0 0 0 0 0 0 0 0 0 0 0
40908 - 0 0 0 0 0 0 0 0 0 0 0 0
40909 - 0 0 0 0 0 0 0 0 0 0 0 0
40910 - 0 0 0 0 0 0 0 0 0 0 0 0
40911 - 6 6 6 6 6 6 10 10 10 10 10 10
40912 - 10 10 10 6 6 6 6 6 6 6 6 6
40913 - 0 0 0 0 0 0 0 0 0 0 0 0
40914 - 0 0 0 0 0 0 0 0 0 0 0 0
40915 - 0 0 0 0 0 0 0 0 0 0 0 0
40916 - 0 0 0 0 0 0 0 0 0 0 0 0
40917 - 0 0 0 0 0 0 0 0 0 0 0 0
40918 - 0 0 0 0 0 0 0 0 0 0 0 0
40919 - 0 0 0 0 0 0 0 0 0 0 0 0
40920 - 0 0 0 0 0 0 0 0 0 0 0 0
40921 - 0 0 0 0 0 0 0 0 0 0 0 0
40922 - 0 0 0 0 0 0 0 0 0 0 0 0
40923 - 0 0 0 0 0 0 0 0 0 0 0 0
40924 - 0 0 0 0 0 0 0 0 0 0 0 0
40925 - 0 0 0 0 0 0 0 0 0 0 0 0
40926 - 0 0 0 0 0 0 0 0 0 0 0 0
40927 - 0 0 0 0 0 0 0 0 0 0 0 0
40928 - 0 0 0 0 0 0 0 0 0 0 0 0
40929 - 0 0 0 0 0 0 0 0 0 0 0 0
40930 - 0 0 0 6 6 6 10 10 10 14 14 14
40931 - 22 22 22 26 26 26 30 30 30 34 34 34
40932 - 30 30 30 30 30 30 26 26 26 18 18 18
40933 - 14 14 14 10 10 10 6 6 6 0 0 0
40934 - 0 0 0 0 0 0 0 0 0 0 0 0
40935 - 0 0 0 0 0 0 0 0 0 0 0 0
40936 - 0 0 0 0 0 0 0 0 0 0 0 0
40937 - 0 0 0 0 0 0 0 0 0 0 0 0
40938 - 0 0 0 0 0 0 0 0 0 0 0 0
40939 - 0 0 0 0 0 0 0 0 0 0 0 0
40940 - 0 0 0 0 0 0 0 0 0 0 0 0
40941 - 0 0 0 0 0 0 0 0 0 0 0 0
40942 - 0 0 0 0 0 0 0 0 0 0 0 0
40943 - 0 0 0 0 0 1 0 0 1 0 0 0
40944 - 0 0 0 0 0 0 0 0 0 0 0 0
40945 - 0 0 0 0 0 0 0 0 0 0 0 0
40946 - 0 0 0 0 0 0 0 0 0 0 0 0
40947 - 0 0 0 0 0 0 0 0 0 0 0 0
40948 - 0 0 0 0 0 0 0 0 0 0 0 0
40949 - 0 0 0 0 0 0 0 0 0 0 0 0
40950 - 6 6 6 14 14 14 26 26 26 42 42 42
40951 - 54 54 54 66 66 66 78 78 78 78 78 78
40952 - 78 78 78 74 74 74 66 66 66 54 54 54
40953 - 42 42 42 26 26 26 18 18 18 10 10 10
40954 - 6 6 6 0 0 0 0 0 0 0 0 0
40955 - 0 0 0 0 0 0 0 0 0 0 0 0
40956 - 0 0 0 0 0 0 0 0 0 0 0 0
40957 - 0 0 0 0 0 0 0 0 0 0 0 0
40958 - 0 0 0 0 0 0 0 0 0 0 0 0
40959 - 0 0 0 0 0 0 0 0 0 0 0 0
40960 - 0 0 0 0 0 0 0 0 0 0 0 0
40961 - 0 0 0 0 0 0 0 0 0 0 0 0
40962 - 0 0 0 0 0 0 0 0 0 0 0 0
40963 - 0 0 1 0 0 0 0 0 0 0 0 0
40964 - 0 0 0 0 0 0 0 0 0 0 0 0
40965 - 0 0 0 0 0 0 0 0 0 0 0 0
40966 - 0 0 0 0 0 0 0 0 0 0 0 0
40967 - 0 0 0 0 0 0 0 0 0 0 0 0
40968 - 0 0 0 0 0 0 0 0 0 0 0 0
40969 - 0 0 0 0 0 0 0 0 0 10 10 10
40970 - 22 22 22 42 42 42 66 66 66 86 86 86
40971 - 66 66 66 38 38 38 38 38 38 22 22 22
40972 - 26 26 26 34 34 34 54 54 54 66 66 66
40973 - 86 86 86 70 70 70 46 46 46 26 26 26
40974 - 14 14 14 6 6 6 0 0 0 0 0 0
40975 - 0 0 0 0 0 0 0 0 0 0 0 0
40976 - 0 0 0 0 0 0 0 0 0 0 0 0
40977 - 0 0 0 0 0 0 0 0 0 0 0 0
40978 - 0 0 0 0 0 0 0 0 0 0 0 0
40979 - 0 0 0 0 0 0 0 0 0 0 0 0
40980 - 0 0 0 0 0 0 0 0 0 0 0 0
40981 - 0 0 0 0 0 0 0 0 0 0 0 0
40982 - 0 0 0 0 0 0 0 0 0 0 0 0
40983 - 0 0 1 0 0 1 0 0 1 0 0 0
40984 - 0 0 0 0 0 0 0 0 0 0 0 0
40985 - 0 0 0 0 0 0 0 0 0 0 0 0
40986 - 0 0 0 0 0 0 0 0 0 0 0 0
40987 - 0 0 0 0 0 0 0 0 0 0 0 0
40988 - 0 0 0 0 0 0 0 0 0 0 0 0
40989 - 0 0 0 0 0 0 10 10 10 26 26 26
40990 - 50 50 50 82 82 82 58 58 58 6 6 6
40991 - 2 2 6 2 2 6 2 2 6 2 2 6
40992 - 2 2 6 2 2 6 2 2 6 2 2 6
40993 - 6 6 6 54 54 54 86 86 86 66 66 66
40994 - 38 38 38 18 18 18 6 6 6 0 0 0
40995 - 0 0 0 0 0 0 0 0 0 0 0 0
40996 - 0 0 0 0 0 0 0 0 0 0 0 0
40997 - 0 0 0 0 0 0 0 0 0 0 0 0
40998 - 0 0 0 0 0 0 0 0 0 0 0 0
40999 - 0 0 0 0 0 0 0 0 0 0 0 0
41000 - 0 0 0 0 0 0 0 0 0 0 0 0
41001 - 0 0 0 0 0 0 0 0 0 0 0 0
41002 - 0 0 0 0 0 0 0 0 0 0 0 0
41003 - 0 0 0 0 0 0 0 0 0 0 0 0
41004 - 0 0 0 0 0 0 0 0 0 0 0 0
41005 - 0 0 0 0 0 0 0 0 0 0 0 0
41006 - 0 0 0 0 0 0 0 0 0 0 0 0
41007 - 0 0 0 0 0 0 0 0 0 0 0 0
41008 - 0 0 0 0 0 0 0 0 0 0 0 0
41009 - 0 0 0 6 6 6 22 22 22 50 50 50
41010 - 78 78 78 34 34 34 2 2 6 2 2 6
41011 - 2 2 6 2 2 6 2 2 6 2 2 6
41012 - 2 2 6 2 2 6 2 2 6 2 2 6
41013 - 2 2 6 2 2 6 6 6 6 70 70 70
41014 - 78 78 78 46 46 46 22 22 22 6 6 6
41015 - 0 0 0 0 0 0 0 0 0 0 0 0
41016 - 0 0 0 0 0 0 0 0 0 0 0 0
41017 - 0 0 0 0 0 0 0 0 0 0 0 0
41018 - 0 0 0 0 0 0 0 0 0 0 0 0
41019 - 0 0 0 0 0 0 0 0 0 0 0 0
41020 - 0 0 0 0 0 0 0 0 0 0 0 0
41021 - 0 0 0 0 0 0 0 0 0 0 0 0
41022 - 0 0 0 0 0 0 0 0 0 0 0 0
41023 - 0 0 1 0 0 1 0 0 1 0 0 0
41024 - 0 0 0 0 0 0 0 0 0 0 0 0
41025 - 0 0 0 0 0 0 0 0 0 0 0 0
41026 - 0 0 0 0 0 0 0 0 0 0 0 0
41027 - 0 0 0 0 0 0 0 0 0 0 0 0
41028 - 0 0 0 0 0 0 0 0 0 0 0 0
41029 - 6 6 6 18 18 18 42 42 42 82 82 82
41030 - 26 26 26 2 2 6 2 2 6 2 2 6
41031 - 2 2 6 2 2 6 2 2 6 2 2 6
41032 - 2 2 6 2 2 6 2 2 6 14 14 14
41033 - 46 46 46 34 34 34 6 6 6 2 2 6
41034 - 42 42 42 78 78 78 42 42 42 18 18 18
41035 - 6 6 6 0 0 0 0 0 0 0 0 0
41036 - 0 0 0 0 0 0 0 0 0 0 0 0
41037 - 0 0 0 0 0 0 0 0 0 0 0 0
41038 - 0 0 0 0 0 0 0 0 0 0 0 0
41039 - 0 0 0 0 0 0 0 0 0 0 0 0
41040 - 0 0 0 0 0 0 0 0 0 0 0 0
41041 - 0 0 0 0 0 0 0 0 0 0 0 0
41042 - 0 0 0 0 0 0 0 0 0 0 0 0
41043 - 0 0 1 0 0 0 0 0 1 0 0 0
41044 - 0 0 0 0 0 0 0 0 0 0 0 0
41045 - 0 0 0 0 0 0 0 0 0 0 0 0
41046 - 0 0 0 0 0 0 0 0 0 0 0 0
41047 - 0 0 0 0 0 0 0 0 0 0 0 0
41048 - 0 0 0 0 0 0 0 0 0 0 0 0
41049 - 10 10 10 30 30 30 66 66 66 58 58 58
41050 - 2 2 6 2 2 6 2 2 6 2 2 6
41051 - 2 2 6 2 2 6 2 2 6 2 2 6
41052 - 2 2 6 2 2 6 2 2 6 26 26 26
41053 - 86 86 86 101 101 101 46 46 46 10 10 10
41054 - 2 2 6 58 58 58 70 70 70 34 34 34
41055 - 10 10 10 0 0 0 0 0 0 0 0 0
41056 - 0 0 0 0 0 0 0 0 0 0 0 0
41057 - 0 0 0 0 0 0 0 0 0 0 0 0
41058 - 0 0 0 0 0 0 0 0 0 0 0 0
41059 - 0 0 0 0 0 0 0 0 0 0 0 0
41060 - 0 0 0 0 0 0 0 0 0 0 0 0
41061 - 0 0 0 0 0 0 0 0 0 0 0 0
41062 - 0 0 0 0 0 0 0 0 0 0 0 0
41063 - 0 0 1 0 0 1 0 0 1 0 0 0
41064 - 0 0 0 0 0 0 0 0 0 0 0 0
41065 - 0 0 0 0 0 0 0 0 0 0 0 0
41066 - 0 0 0 0 0 0 0 0 0 0 0 0
41067 - 0 0 0 0 0 0 0 0 0 0 0 0
41068 - 0 0 0 0 0 0 0 0 0 0 0 0
41069 - 14 14 14 42 42 42 86 86 86 10 10 10
41070 - 2 2 6 2 2 6 2 2 6 2 2 6
41071 - 2 2 6 2 2 6 2 2 6 2 2 6
41072 - 2 2 6 2 2 6 2 2 6 30 30 30
41073 - 94 94 94 94 94 94 58 58 58 26 26 26
41074 - 2 2 6 6 6 6 78 78 78 54 54 54
41075 - 22 22 22 6 6 6 0 0 0 0 0 0
41076 - 0 0 0 0 0 0 0 0 0 0 0 0
41077 - 0 0 0 0 0 0 0 0 0 0 0 0
41078 - 0 0 0 0 0 0 0 0 0 0 0 0
41079 - 0 0 0 0 0 0 0 0 0 0 0 0
41080 - 0 0 0 0 0 0 0 0 0 0 0 0
41081 - 0 0 0 0 0 0 0 0 0 0 0 0
41082 - 0 0 0 0 0 0 0 0 0 0 0 0
41083 - 0 0 0 0 0 0 0 0 0 0 0 0
41084 - 0 0 0 0 0 0 0 0 0 0 0 0
41085 - 0 0 0 0 0 0 0 0 0 0 0 0
41086 - 0 0 0 0 0 0 0 0 0 0 0 0
41087 - 0 0 0 0 0 0 0 0 0 0 0 0
41088 - 0 0 0 0 0 0 0 0 0 6 6 6
41089 - 22 22 22 62 62 62 62 62 62 2 2 6
41090 - 2 2 6 2 2 6 2 2 6 2 2 6
41091 - 2 2 6 2 2 6 2 2 6 2 2 6
41092 - 2 2 6 2 2 6 2 2 6 26 26 26
41093 - 54 54 54 38 38 38 18 18 18 10 10 10
41094 - 2 2 6 2 2 6 34 34 34 82 82 82
41095 - 38 38 38 14 14 14 0 0 0 0 0 0
41096 - 0 0 0 0 0 0 0 0 0 0 0 0
41097 - 0 0 0 0 0 0 0 0 0 0 0 0
41098 - 0 0 0 0 0 0 0 0 0 0 0 0
41099 - 0 0 0 0 0 0 0 0 0 0 0 0
41100 - 0 0 0 0 0 0 0 0 0 0 0 0
41101 - 0 0 0 0 0 0 0 0 0 0 0 0
41102 - 0 0 0 0 0 0 0 0 0 0 0 0
41103 - 0 0 0 0 0 1 0 0 1 0 0 0
41104 - 0 0 0 0 0 0 0 0 0 0 0 0
41105 - 0 0 0 0 0 0 0 0 0 0 0 0
41106 - 0 0 0 0 0 0 0 0 0 0 0 0
41107 - 0 0 0 0 0 0 0 0 0 0 0 0
41108 - 0 0 0 0 0 0 0 0 0 6 6 6
41109 - 30 30 30 78 78 78 30 30 30 2 2 6
41110 - 2 2 6 2 2 6 2 2 6 2 2 6
41111 - 2 2 6 2 2 6 2 2 6 2 2 6
41112 - 2 2 6 2 2 6 2 2 6 10 10 10
41113 - 10 10 10 2 2 6 2 2 6 2 2 6
41114 - 2 2 6 2 2 6 2 2 6 78 78 78
41115 - 50 50 50 18 18 18 6 6 6 0 0 0
41116 - 0 0 0 0 0 0 0 0 0 0 0 0
41117 - 0 0 0 0 0 0 0 0 0 0 0 0
41118 - 0 0 0 0 0 0 0 0 0 0 0 0
41119 - 0 0 0 0 0 0 0 0 0 0 0 0
41120 - 0 0 0 0 0 0 0 0 0 0 0 0
41121 - 0 0 0 0 0 0 0 0 0 0 0 0
41122 - 0 0 0 0 0 0 0 0 0 0 0 0
41123 - 0 0 1 0 0 0 0 0 0 0 0 0
41124 - 0 0 0 0 0 0 0 0 0 0 0 0
41125 - 0 0 0 0 0 0 0 0 0 0 0 0
41126 - 0 0 0 0 0 0 0 0 0 0 0 0
41127 - 0 0 0 0 0 0 0 0 0 0 0 0
41128 - 0 0 0 0 0 0 0 0 0 10 10 10
41129 - 38 38 38 86 86 86 14 14 14 2 2 6
41130 - 2 2 6 2 2 6 2 2 6 2 2 6
41131 - 2 2 6 2 2 6 2 2 6 2 2 6
41132 - 2 2 6 2 2 6 2 2 6 2 2 6
41133 - 2 2 6 2 2 6 2 2 6 2 2 6
41134 - 2 2 6 2 2 6 2 2 6 54 54 54
41135 - 66 66 66 26 26 26 6 6 6 0 0 0
41136 - 0 0 0 0 0 0 0 0 0 0 0 0
41137 - 0 0 0 0 0 0 0 0 0 0 0 0
41138 - 0 0 0 0 0 0 0 0 0 0 0 0
41139 - 0 0 0 0 0 0 0 0 0 0 0 0
41140 - 0 0 0 0 0 0 0 0 0 0 0 0
41141 - 0 0 0 0 0 0 0 0 0 0 0 0
41142 - 0 0 0 0 0 0 0 0 0 0 0 0
41143 - 0 0 0 0 0 1 0 0 1 0 0 0
41144 - 0 0 0 0 0 0 0 0 0 0 0 0
41145 - 0 0 0 0 0 0 0 0 0 0 0 0
41146 - 0 0 0 0 0 0 0 0 0 0 0 0
41147 - 0 0 0 0 0 0 0 0 0 0 0 0
41148 - 0 0 0 0 0 0 0 0 0 14 14 14
41149 - 42 42 42 82 82 82 2 2 6 2 2 6
41150 - 2 2 6 6 6 6 10 10 10 2 2 6
41151 - 2 2 6 2 2 6 2 2 6 2 2 6
41152 - 2 2 6 2 2 6 2 2 6 6 6 6
41153 - 14 14 14 10 10 10 2 2 6 2 2 6
41154 - 2 2 6 2 2 6 2 2 6 18 18 18
41155 - 82 82 82 34 34 34 10 10 10 0 0 0
41156 - 0 0 0 0 0 0 0 0 0 0 0 0
41157 - 0 0 0 0 0 0 0 0 0 0 0 0
41158 - 0 0 0 0 0 0 0 0 0 0 0 0
41159 - 0 0 0 0 0 0 0 0 0 0 0 0
41160 - 0 0 0 0 0 0 0 0 0 0 0 0
41161 - 0 0 0 0 0 0 0 0 0 0 0 0
41162 - 0 0 0 0 0 0 0 0 0 0 0 0
41163 - 0 0 1 0 0 0 0 0 0 0 0 0
41164 - 0 0 0 0 0 0 0 0 0 0 0 0
41165 - 0 0 0 0 0 0 0 0 0 0 0 0
41166 - 0 0 0 0 0 0 0 0 0 0 0 0
41167 - 0 0 0 0 0 0 0 0 0 0 0 0
41168 - 0 0 0 0 0 0 0 0 0 14 14 14
41169 - 46 46 46 86 86 86 2 2 6 2 2 6
41170 - 6 6 6 6 6 6 22 22 22 34 34 34
41171 - 6 6 6 2 2 6 2 2 6 2 2 6
41172 - 2 2 6 2 2 6 18 18 18 34 34 34
41173 - 10 10 10 50 50 50 22 22 22 2 2 6
41174 - 2 2 6 2 2 6 2 2 6 10 10 10
41175 - 86 86 86 42 42 42 14 14 14 0 0 0
41176 - 0 0 0 0 0 0 0 0 0 0 0 0
41177 - 0 0 0 0 0 0 0 0 0 0 0 0
41178 - 0 0 0 0 0 0 0 0 0 0 0 0
41179 - 0 0 0 0 0 0 0 0 0 0 0 0
41180 - 0 0 0 0 0 0 0 0 0 0 0 0
41181 - 0 0 0 0 0 0 0 0 0 0 0 0
41182 - 0 0 0 0 0 0 0 0 0 0 0 0
41183 - 0 0 1 0 0 1 0 0 1 0 0 0
41184 - 0 0 0 0 0 0 0 0 0 0 0 0
41185 - 0 0 0 0 0 0 0 0 0 0 0 0
41186 - 0 0 0 0 0 0 0 0 0 0 0 0
41187 - 0 0 0 0 0 0 0 0 0 0 0 0
41188 - 0 0 0 0 0 0 0 0 0 14 14 14
41189 - 46 46 46 86 86 86 2 2 6 2 2 6
41190 - 38 38 38 116 116 116 94 94 94 22 22 22
41191 - 22 22 22 2 2 6 2 2 6 2 2 6
41192 - 14 14 14 86 86 86 138 138 138 162 162 162
41193 -154 154 154 38 38 38 26 26 26 6 6 6
41194 - 2 2 6 2 2 6 2 2 6 2 2 6
41195 - 86 86 86 46 46 46 14 14 14 0 0 0
41196 - 0 0 0 0 0 0 0 0 0 0 0 0
41197 - 0 0 0 0 0 0 0 0 0 0 0 0
41198 - 0 0 0 0 0 0 0 0 0 0 0 0
41199 - 0 0 0 0 0 0 0 0 0 0 0 0
41200 - 0 0 0 0 0 0 0 0 0 0 0 0
41201 - 0 0 0 0 0 0 0 0 0 0 0 0
41202 - 0 0 0 0 0 0 0 0 0 0 0 0
41203 - 0 0 0 0 0 0 0 0 0 0 0 0
41204 - 0 0 0 0 0 0 0 0 0 0 0 0
41205 - 0 0 0 0 0 0 0 0 0 0 0 0
41206 - 0 0 0 0 0 0 0 0 0 0 0 0
41207 - 0 0 0 0 0 0 0 0 0 0 0 0
41208 - 0 0 0 0 0 0 0 0 0 14 14 14
41209 - 46 46 46 86 86 86 2 2 6 14 14 14
41210 -134 134 134 198 198 198 195 195 195 116 116 116
41211 - 10 10 10 2 2 6 2 2 6 6 6 6
41212 -101 98 89 187 187 187 210 210 210 218 218 218
41213 -214 214 214 134 134 134 14 14 14 6 6 6
41214 - 2 2 6 2 2 6 2 2 6 2 2 6
41215 - 86 86 86 50 50 50 18 18 18 6 6 6
41216 - 0 0 0 0 0 0 0 0 0 0 0 0
41217 - 0 0 0 0 0 0 0 0 0 0 0 0
41218 - 0 0 0 0 0 0 0 0 0 0 0 0
41219 - 0 0 0 0 0 0 0 0 0 0 0 0
41220 - 0 0 0 0 0 0 0 0 0 0 0 0
41221 - 0 0 0 0 0 0 0 0 0 0 0 0
41222 - 0 0 0 0 0 0 0 0 1 0 0 0
41223 - 0 0 1 0 0 1 0 0 1 0 0 0
41224 - 0 0 0 0 0 0 0 0 0 0 0 0
41225 - 0 0 0 0 0 0 0 0 0 0 0 0
41226 - 0 0 0 0 0 0 0 0 0 0 0 0
41227 - 0 0 0 0 0 0 0 0 0 0 0 0
41228 - 0 0 0 0 0 0 0 0 0 14 14 14
41229 - 46 46 46 86 86 86 2 2 6 54 54 54
41230 -218 218 218 195 195 195 226 226 226 246 246 246
41231 - 58 58 58 2 2 6 2 2 6 30 30 30
41232 -210 210 210 253 253 253 174 174 174 123 123 123
41233 -221 221 221 234 234 234 74 74 74 2 2 6
41234 - 2 2 6 2 2 6 2 2 6 2 2 6
41235 - 70 70 70 58 58 58 22 22 22 6 6 6
41236 - 0 0 0 0 0 0 0 0 0 0 0 0
41237 - 0 0 0 0 0 0 0 0 0 0 0 0
41238 - 0 0 0 0 0 0 0 0 0 0 0 0
41239 - 0 0 0 0 0 0 0 0 0 0 0 0
41240 - 0 0 0 0 0 0 0 0 0 0 0 0
41241 - 0 0 0 0 0 0 0 0 0 0 0 0
41242 - 0 0 0 0 0 0 0 0 0 0 0 0
41243 - 0 0 0 0 0 0 0 0 0 0 0 0
41244 - 0 0 0 0 0 0 0 0 0 0 0 0
41245 - 0 0 0 0 0 0 0 0 0 0 0 0
41246 - 0 0 0 0 0 0 0 0 0 0 0 0
41247 - 0 0 0 0 0 0 0 0 0 0 0 0
41248 - 0 0 0 0 0 0 0 0 0 14 14 14
41249 - 46 46 46 82 82 82 2 2 6 106 106 106
41250 -170 170 170 26 26 26 86 86 86 226 226 226
41251 -123 123 123 10 10 10 14 14 14 46 46 46
41252 -231 231 231 190 190 190 6 6 6 70 70 70
41253 - 90 90 90 238 238 238 158 158 158 2 2 6
41254 - 2 2 6 2 2 6 2 2 6 2 2 6
41255 - 70 70 70 58 58 58 22 22 22 6 6 6
41256 - 0 0 0 0 0 0 0 0 0 0 0 0
41257 - 0 0 0 0 0 0 0 0 0 0 0 0
41258 - 0 0 0 0 0 0 0 0 0 0 0 0
41259 - 0 0 0 0 0 0 0 0 0 0 0 0
41260 - 0 0 0 0 0 0 0 0 0 0 0 0
41261 - 0 0 0 0 0 0 0 0 0 0 0 0
41262 - 0 0 0 0 0 0 0 0 1 0 0 0
41263 - 0 0 1 0 0 1 0 0 1 0 0 0
41264 - 0 0 0 0 0 0 0 0 0 0 0 0
41265 - 0 0 0 0 0 0 0 0 0 0 0 0
41266 - 0 0 0 0 0 0 0 0 0 0 0 0
41267 - 0 0 0 0 0 0 0 0 0 0 0 0
41268 - 0 0 0 0 0 0 0 0 0 14 14 14
41269 - 42 42 42 86 86 86 6 6 6 116 116 116
41270 -106 106 106 6 6 6 70 70 70 149 149 149
41271 -128 128 128 18 18 18 38 38 38 54 54 54
41272 -221 221 221 106 106 106 2 2 6 14 14 14
41273 - 46 46 46 190 190 190 198 198 198 2 2 6
41274 - 2 2 6 2 2 6 2 2 6 2 2 6
41275 - 74 74 74 62 62 62 22 22 22 6 6 6
41276 - 0 0 0 0 0 0 0 0 0 0 0 0
41277 - 0 0 0 0 0 0 0 0 0 0 0 0
41278 - 0 0 0 0 0 0 0 0 0 0 0 0
41279 - 0 0 0 0 0 0 0 0 0 0 0 0
41280 - 0 0 0 0 0 0 0 0 0 0 0 0
41281 - 0 0 0 0 0 0 0 0 0 0 0 0
41282 - 0 0 0 0 0 0 0 0 1 0 0 0
41283 - 0 0 1 0 0 0 0 0 1 0 0 0
41284 - 0 0 0 0 0 0 0 0 0 0 0 0
41285 - 0 0 0 0 0 0 0 0 0 0 0 0
41286 - 0 0 0 0 0 0 0 0 0 0 0 0
41287 - 0 0 0 0 0 0 0 0 0 0 0 0
41288 - 0 0 0 0 0 0 0 0 0 14 14 14
41289 - 42 42 42 94 94 94 14 14 14 101 101 101
41290 -128 128 128 2 2 6 18 18 18 116 116 116
41291 -118 98 46 121 92 8 121 92 8 98 78 10
41292 -162 162 162 106 106 106 2 2 6 2 2 6
41293 - 2 2 6 195 195 195 195 195 195 6 6 6
41294 - 2 2 6 2 2 6 2 2 6 2 2 6
41295 - 74 74 74 62 62 62 22 22 22 6 6 6
41296 - 0 0 0 0 0 0 0 0 0 0 0 0
41297 - 0 0 0 0 0 0 0 0 0 0 0 0
41298 - 0 0 0 0 0 0 0 0 0 0 0 0
41299 - 0 0 0 0 0 0 0 0 0 0 0 0
41300 - 0 0 0 0 0 0 0 0 0 0 0 0
41301 - 0 0 0 0 0 0 0 0 0 0 0 0
41302 - 0 0 0 0 0 0 0 0 1 0 0 1
41303 - 0 0 1 0 0 0 0 0 1 0 0 0
41304 - 0 0 0 0 0 0 0 0 0 0 0 0
41305 - 0 0 0 0 0 0 0 0 0 0 0 0
41306 - 0 0 0 0 0 0 0 0 0 0 0 0
41307 - 0 0 0 0 0 0 0 0 0 0 0 0
41308 - 0 0 0 0 0 0 0 0 0 10 10 10
41309 - 38 38 38 90 90 90 14 14 14 58 58 58
41310 -210 210 210 26 26 26 54 38 6 154 114 10
41311 -226 170 11 236 186 11 225 175 15 184 144 12
41312 -215 174 15 175 146 61 37 26 9 2 2 6
41313 - 70 70 70 246 246 246 138 138 138 2 2 6
41314 - 2 2 6 2 2 6 2 2 6 2 2 6
41315 - 70 70 70 66 66 66 26 26 26 6 6 6
41316 - 0 0 0 0 0 0 0 0 0 0 0 0
41317 - 0 0 0 0 0 0 0 0 0 0 0 0
41318 - 0 0 0 0 0 0 0 0 0 0 0 0
41319 - 0 0 0 0 0 0 0 0 0 0 0 0
41320 - 0 0 0 0 0 0 0 0 0 0 0 0
41321 - 0 0 0 0 0 0 0 0 0 0 0 0
41322 - 0 0 0 0 0 0 0 0 0 0 0 0
41323 - 0 0 0 0 0 0 0 0 0 0 0 0
41324 - 0 0 0 0 0 0 0 0 0 0 0 0
41325 - 0 0 0 0 0 0 0 0 0 0 0 0
41326 - 0 0 0 0 0 0 0 0 0 0 0 0
41327 - 0 0 0 0 0 0 0 0 0 0 0 0
41328 - 0 0 0 0 0 0 0 0 0 10 10 10
41329 - 38 38 38 86 86 86 14 14 14 10 10 10
41330 -195 195 195 188 164 115 192 133 9 225 175 15
41331 -239 182 13 234 190 10 232 195 16 232 200 30
41332 -245 207 45 241 208 19 232 195 16 184 144 12
41333 -218 194 134 211 206 186 42 42 42 2 2 6
41334 - 2 2 6 2 2 6 2 2 6 2 2 6
41335 - 50 50 50 74 74 74 30 30 30 6 6 6
41336 - 0 0 0 0 0 0 0 0 0 0 0 0
41337 - 0 0 0 0 0 0 0 0 0 0 0 0
41338 - 0 0 0 0 0 0 0 0 0 0 0 0
41339 - 0 0 0 0 0 0 0 0 0 0 0 0
41340 - 0 0 0 0 0 0 0 0 0 0 0 0
41341 - 0 0 0 0 0 0 0 0 0 0 0 0
41342 - 0 0 0 0 0 0 0 0 0 0 0 0
41343 - 0 0 0 0 0 0 0 0 0 0 0 0
41344 - 0 0 0 0 0 0 0 0 0 0 0 0
41345 - 0 0 0 0 0 0 0 0 0 0 0 0
41346 - 0 0 0 0 0 0 0 0 0 0 0 0
41347 - 0 0 0 0 0 0 0 0 0 0 0 0
41348 - 0 0 0 0 0 0 0 0 0 10 10 10
41349 - 34 34 34 86 86 86 14 14 14 2 2 6
41350 -121 87 25 192 133 9 219 162 10 239 182 13
41351 -236 186 11 232 195 16 241 208 19 244 214 54
41352 -246 218 60 246 218 38 246 215 20 241 208 19
41353 -241 208 19 226 184 13 121 87 25 2 2 6
41354 - 2 2 6 2 2 6 2 2 6 2 2 6
41355 - 50 50 50 82 82 82 34 34 34 10 10 10
41356 - 0 0 0 0 0 0 0 0 0 0 0 0
41357 - 0 0 0 0 0 0 0 0 0 0 0 0
41358 - 0 0 0 0 0 0 0 0 0 0 0 0
41359 - 0 0 0 0 0 0 0 0 0 0 0 0
41360 - 0 0 0 0 0 0 0 0 0 0 0 0
41361 - 0 0 0 0 0 0 0 0 0 0 0 0
41362 - 0 0 0 0 0 0 0 0 0 0 0 0
41363 - 0 0 0 0 0 0 0 0 0 0 0 0
41364 - 0 0 0 0 0 0 0 0 0 0 0 0
41365 - 0 0 0 0 0 0 0 0 0 0 0 0
41366 - 0 0 0 0 0 0 0 0 0 0 0 0
41367 - 0 0 0 0 0 0 0 0 0 0 0 0
41368 - 0 0 0 0 0 0 0 0 0 10 10 10
41369 - 34 34 34 82 82 82 30 30 30 61 42 6
41370 -180 123 7 206 145 10 230 174 11 239 182 13
41371 -234 190 10 238 202 15 241 208 19 246 218 74
41372 -246 218 38 246 215 20 246 215 20 246 215 20
41373 -226 184 13 215 174 15 184 144 12 6 6 6
41374 - 2 2 6 2 2 6 2 2 6 2 2 6
41375 - 26 26 26 94 94 94 42 42 42 14 14 14
41376 - 0 0 0 0 0 0 0 0 0 0 0 0
41377 - 0 0 0 0 0 0 0 0 0 0 0 0
41378 - 0 0 0 0 0 0 0 0 0 0 0 0
41379 - 0 0 0 0 0 0 0 0 0 0 0 0
41380 - 0 0 0 0 0 0 0 0 0 0 0 0
41381 - 0 0 0 0 0 0 0 0 0 0 0 0
41382 - 0 0 0 0 0 0 0 0 0 0 0 0
41383 - 0 0 0 0 0 0 0 0 0 0 0 0
41384 - 0 0 0 0 0 0 0 0 0 0 0 0
41385 - 0 0 0 0 0 0 0 0 0 0 0 0
41386 - 0 0 0 0 0 0 0 0 0 0 0 0
41387 - 0 0 0 0 0 0 0 0 0 0 0 0
41388 - 0 0 0 0 0 0 0 0 0 10 10 10
41389 - 30 30 30 78 78 78 50 50 50 104 69 6
41390 -192 133 9 216 158 10 236 178 12 236 186 11
41391 -232 195 16 241 208 19 244 214 54 245 215 43
41392 -246 215 20 246 215 20 241 208 19 198 155 10
41393 -200 144 11 216 158 10 156 118 10 2 2 6
41394 - 2 2 6 2 2 6 2 2 6 2 2 6
41395 - 6 6 6 90 90 90 54 54 54 18 18 18
41396 - 6 6 6 0 0 0 0 0 0 0 0 0
41397 - 0 0 0 0 0 0 0 0 0 0 0 0
41398 - 0 0 0 0 0 0 0 0 0 0 0 0
41399 - 0 0 0 0 0 0 0 0 0 0 0 0
41400 - 0 0 0 0 0 0 0 0 0 0 0 0
41401 - 0 0 0 0 0 0 0 0 0 0 0 0
41402 - 0 0 0 0 0 0 0 0 0 0 0 0
41403 - 0 0 0 0 0 0 0 0 0 0 0 0
41404 - 0 0 0 0 0 0 0 0 0 0 0 0
41405 - 0 0 0 0 0 0 0 0 0 0 0 0
41406 - 0 0 0 0 0 0 0 0 0 0 0 0
41407 - 0 0 0 0 0 0 0 0 0 0 0 0
41408 - 0 0 0 0 0 0 0 0 0 10 10 10
41409 - 30 30 30 78 78 78 46 46 46 22 22 22
41410 -137 92 6 210 162 10 239 182 13 238 190 10
41411 -238 202 15 241 208 19 246 215 20 246 215 20
41412 -241 208 19 203 166 17 185 133 11 210 150 10
41413 -216 158 10 210 150 10 102 78 10 2 2 6
41414 - 6 6 6 54 54 54 14 14 14 2 2 6
41415 - 2 2 6 62 62 62 74 74 74 30 30 30
41416 - 10 10 10 0 0 0 0 0 0 0 0 0
41417 - 0 0 0 0 0 0 0 0 0 0 0 0
41418 - 0 0 0 0 0 0 0 0 0 0 0 0
41419 - 0 0 0 0 0 0 0 0 0 0 0 0
41420 - 0 0 0 0 0 0 0 0 0 0 0 0
41421 - 0 0 0 0 0 0 0 0 0 0 0 0
41422 - 0 0 0 0 0 0 0 0 0 0 0 0
41423 - 0 0 0 0 0 0 0 0 0 0 0 0
41424 - 0 0 0 0 0 0 0 0 0 0 0 0
41425 - 0 0 0 0 0 0 0 0 0 0 0 0
41426 - 0 0 0 0 0 0 0 0 0 0 0 0
41427 - 0 0 0 0 0 0 0 0 0 0 0 0
41428 - 0 0 0 0 0 0 0 0 0 10 10 10
41429 - 34 34 34 78 78 78 50 50 50 6 6 6
41430 - 94 70 30 139 102 15 190 146 13 226 184 13
41431 -232 200 30 232 195 16 215 174 15 190 146 13
41432 -168 122 10 192 133 9 210 150 10 213 154 11
41433 -202 150 34 182 157 106 101 98 89 2 2 6
41434 - 2 2 6 78 78 78 116 116 116 58 58 58
41435 - 2 2 6 22 22 22 90 90 90 46 46 46
41436 - 18 18 18 6 6 6 0 0 0 0 0 0
41437 - 0 0 0 0 0 0 0 0 0 0 0 0
41438 - 0 0 0 0 0 0 0 0 0 0 0 0
41439 - 0 0 0 0 0 0 0 0 0 0 0 0
41440 - 0 0 0 0 0 0 0 0 0 0 0 0
41441 - 0 0 0 0 0 0 0 0 0 0 0 0
41442 - 0 0 0 0 0 0 0 0 0 0 0 0
41443 - 0 0 0 0 0 0 0 0 0 0 0 0
41444 - 0 0 0 0 0 0 0 0 0 0 0 0
41445 - 0 0 0 0 0 0 0 0 0 0 0 0
41446 - 0 0 0 0 0 0 0 0 0 0 0 0
41447 - 0 0 0 0 0 0 0 0 0 0 0 0
41448 - 0 0 0 0 0 0 0 0 0 10 10 10
41449 - 38 38 38 86 86 86 50 50 50 6 6 6
41450 -128 128 128 174 154 114 156 107 11 168 122 10
41451 -198 155 10 184 144 12 197 138 11 200 144 11
41452 -206 145 10 206 145 10 197 138 11 188 164 115
41453 -195 195 195 198 198 198 174 174 174 14 14 14
41454 - 2 2 6 22 22 22 116 116 116 116 116 116
41455 - 22 22 22 2 2 6 74 74 74 70 70 70
41456 - 30 30 30 10 10 10 0 0 0 0 0 0
41457 - 0 0 0 0 0 0 0 0 0 0 0 0
41458 - 0 0 0 0 0 0 0 0 0 0 0 0
41459 - 0 0 0 0 0 0 0 0 0 0 0 0
41460 - 0 0 0 0 0 0 0 0 0 0 0 0
41461 - 0 0 0 0 0 0 0 0 0 0 0 0
41462 - 0 0 0 0 0 0 0 0 0 0 0 0
41463 - 0 0 0 0 0 0 0 0 0 0 0 0
41464 - 0 0 0 0 0 0 0 0 0 0 0 0
41465 - 0 0 0 0 0 0 0 0 0 0 0 0
41466 - 0 0 0 0 0 0 0 0 0 0 0 0
41467 - 0 0 0 0 0 0 0 0 0 0 0 0
41468 - 0 0 0 0 0 0 6 6 6 18 18 18
41469 - 50 50 50 101 101 101 26 26 26 10 10 10
41470 -138 138 138 190 190 190 174 154 114 156 107 11
41471 -197 138 11 200 144 11 197 138 11 192 133 9
41472 -180 123 7 190 142 34 190 178 144 187 187 187
41473 -202 202 202 221 221 221 214 214 214 66 66 66
41474 - 2 2 6 2 2 6 50 50 50 62 62 62
41475 - 6 6 6 2 2 6 10 10 10 90 90 90
41476 - 50 50 50 18 18 18 6 6 6 0 0 0
41477 - 0 0 0 0 0 0 0 0 0 0 0 0
41478 - 0 0 0 0 0 0 0 0 0 0 0 0
41479 - 0 0 0 0 0 0 0 0 0 0 0 0
41480 - 0 0 0 0 0 0 0 0 0 0 0 0
41481 - 0 0 0 0 0 0 0 0 0 0 0 0
41482 - 0 0 0 0 0 0 0 0 0 0 0 0
41483 - 0 0 0 0 0 0 0 0 0 0 0 0
41484 - 0 0 0 0 0 0 0 0 0 0 0 0
41485 - 0 0 0 0 0 0 0 0 0 0 0 0
41486 - 0 0 0 0 0 0 0 0 0 0 0 0
41487 - 0 0 0 0 0 0 0 0 0 0 0 0
41488 - 0 0 0 0 0 0 10 10 10 34 34 34
41489 - 74 74 74 74 74 74 2 2 6 6 6 6
41490 -144 144 144 198 198 198 190 190 190 178 166 146
41491 -154 121 60 156 107 11 156 107 11 168 124 44
41492 -174 154 114 187 187 187 190 190 190 210 210 210
41493 -246 246 246 253 253 253 253 253 253 182 182 182
41494 - 6 6 6 2 2 6 2 2 6 2 2 6
41495 - 2 2 6 2 2 6 2 2 6 62 62 62
41496 - 74 74 74 34 34 34 14 14 14 0 0 0
41497 - 0 0 0 0 0 0 0 0 0 0 0 0
41498 - 0 0 0 0 0 0 0 0 0 0 0 0
41499 - 0 0 0 0 0 0 0 0 0 0 0 0
41500 - 0 0 0 0 0 0 0 0 0 0 0 0
41501 - 0 0 0 0 0 0 0 0 0 0 0 0
41502 - 0 0 0 0 0 0 0 0 0 0 0 0
41503 - 0 0 0 0 0 0 0 0 0 0 0 0
41504 - 0 0 0 0 0 0 0 0 0 0 0 0
41505 - 0 0 0 0 0 0 0 0 0 0 0 0
41506 - 0 0 0 0 0 0 0 0 0 0 0 0
41507 - 0 0 0 0 0 0 0 0 0 0 0 0
41508 - 0 0 0 10 10 10 22 22 22 54 54 54
41509 - 94 94 94 18 18 18 2 2 6 46 46 46
41510 -234 234 234 221 221 221 190 190 190 190 190 190
41511 -190 190 190 187 187 187 187 187 187 190 190 190
41512 -190 190 190 195 195 195 214 214 214 242 242 242
41513 -253 253 253 253 253 253 253 253 253 253 253 253
41514 - 82 82 82 2 2 6 2 2 6 2 2 6
41515 - 2 2 6 2 2 6 2 2 6 14 14 14
41516 - 86 86 86 54 54 54 22 22 22 6 6 6
41517 - 0 0 0 0 0 0 0 0 0 0 0 0
41518 - 0 0 0 0 0 0 0 0 0 0 0 0
41519 - 0 0 0 0 0 0 0 0 0 0 0 0
41520 - 0 0 0 0 0 0 0 0 0 0 0 0
41521 - 0 0 0 0 0 0 0 0 0 0 0 0
41522 - 0 0 0 0 0 0 0 0 0 0 0 0
41523 - 0 0 0 0 0 0 0 0 0 0 0 0
41524 - 0 0 0 0 0 0 0 0 0 0 0 0
41525 - 0 0 0 0 0 0 0 0 0 0 0 0
41526 - 0 0 0 0 0 0 0 0 0 0 0 0
41527 - 0 0 0 0 0 0 0 0 0 0 0 0
41528 - 6 6 6 18 18 18 46 46 46 90 90 90
41529 - 46 46 46 18 18 18 6 6 6 182 182 182
41530 -253 253 253 246 246 246 206 206 206 190 190 190
41531 -190 190 190 190 190 190 190 190 190 190 190 190
41532 -206 206 206 231 231 231 250 250 250 253 253 253
41533 -253 253 253 253 253 253 253 253 253 253 253 253
41534 -202 202 202 14 14 14 2 2 6 2 2 6
41535 - 2 2 6 2 2 6 2 2 6 2 2 6
41536 - 42 42 42 86 86 86 42 42 42 18 18 18
41537 - 6 6 6 0 0 0 0 0 0 0 0 0
41538 - 0 0 0 0 0 0 0 0 0 0 0 0
41539 - 0 0 0 0 0 0 0 0 0 0 0 0
41540 - 0 0 0 0 0 0 0 0 0 0 0 0
41541 - 0 0 0 0 0 0 0 0 0 0 0 0
41542 - 0 0 0 0 0 0 0 0 0 0 0 0
41543 - 0 0 0 0 0 0 0 0 0 0 0 0
41544 - 0 0 0 0 0 0 0 0 0 0 0 0
41545 - 0 0 0 0 0 0 0 0 0 0 0 0
41546 - 0 0 0 0 0 0 0 0 0 0 0 0
41547 - 0 0 0 0 0 0 0 0 0 6 6 6
41548 - 14 14 14 38 38 38 74 74 74 66 66 66
41549 - 2 2 6 6 6 6 90 90 90 250 250 250
41550 -253 253 253 253 253 253 238 238 238 198 198 198
41551 -190 190 190 190 190 190 195 195 195 221 221 221
41552 -246 246 246 253 253 253 253 253 253 253 253 253
41553 -253 253 253 253 253 253 253 253 253 253 253 253
41554 -253 253 253 82 82 82 2 2 6 2 2 6
41555 - 2 2 6 2 2 6 2 2 6 2 2 6
41556 - 2 2 6 78 78 78 70 70 70 34 34 34
41557 - 14 14 14 6 6 6 0 0 0 0 0 0
41558 - 0 0 0 0 0 0 0 0 0 0 0 0
41559 - 0 0 0 0 0 0 0 0 0 0 0 0
41560 - 0 0 0 0 0 0 0 0 0 0 0 0
41561 - 0 0 0 0 0 0 0 0 0 0 0 0
41562 - 0 0 0 0 0 0 0 0 0 0 0 0
41563 - 0 0 0 0 0 0 0 0 0 0 0 0
41564 - 0 0 0 0 0 0 0 0 0 0 0 0
41565 - 0 0 0 0 0 0 0 0 0 0 0 0
41566 - 0 0 0 0 0 0 0 0 0 0 0 0
41567 - 0 0 0 0 0 0 0 0 0 14 14 14
41568 - 34 34 34 66 66 66 78 78 78 6 6 6
41569 - 2 2 6 18 18 18 218 218 218 253 253 253
41570 -253 253 253 253 253 253 253 253 253 246 246 246
41571 -226 226 226 231 231 231 246 246 246 253 253 253
41572 -253 253 253 253 253 253 253 253 253 253 253 253
41573 -253 253 253 253 253 253 253 253 253 253 253 253
41574 -253 253 253 178 178 178 2 2 6 2 2 6
41575 - 2 2 6 2 2 6 2 2 6 2 2 6
41576 - 2 2 6 18 18 18 90 90 90 62 62 62
41577 - 30 30 30 10 10 10 0 0 0 0 0 0
41578 - 0 0 0 0 0 0 0 0 0 0 0 0
41579 - 0 0 0 0 0 0 0 0 0 0 0 0
41580 - 0 0 0 0 0 0 0 0 0 0 0 0
41581 - 0 0 0 0 0 0 0 0 0 0 0 0
41582 - 0 0 0 0 0 0 0 0 0 0 0 0
41583 - 0 0 0 0 0 0 0 0 0 0 0 0
41584 - 0 0 0 0 0 0 0 0 0 0 0 0
41585 - 0 0 0 0 0 0 0 0 0 0 0 0
41586 - 0 0 0 0 0 0 0 0 0 0 0 0
41587 - 0 0 0 0 0 0 10 10 10 26 26 26
41588 - 58 58 58 90 90 90 18 18 18 2 2 6
41589 - 2 2 6 110 110 110 253 253 253 253 253 253
41590 -253 253 253 253 253 253 253 253 253 253 253 253
41591 -250 250 250 253 253 253 253 253 253 253 253 253
41592 -253 253 253 253 253 253 253 253 253 253 253 253
41593 -253 253 253 253 253 253 253 253 253 253 253 253
41594 -253 253 253 231 231 231 18 18 18 2 2 6
41595 - 2 2 6 2 2 6 2 2 6 2 2 6
41596 - 2 2 6 2 2 6 18 18 18 94 94 94
41597 - 54 54 54 26 26 26 10 10 10 0 0 0
41598 - 0 0 0 0 0 0 0 0 0 0 0 0
41599 - 0 0 0 0 0 0 0 0 0 0 0 0
41600 - 0 0 0 0 0 0 0 0 0 0 0 0
41601 - 0 0 0 0 0 0 0 0 0 0 0 0
41602 - 0 0 0 0 0 0 0 0 0 0 0 0
41603 - 0 0 0 0 0 0 0 0 0 0 0 0
41604 - 0 0 0 0 0 0 0 0 0 0 0 0
41605 - 0 0 0 0 0 0 0 0 0 0 0 0
41606 - 0 0 0 0 0 0 0 0 0 0 0 0
41607 - 0 0 0 6 6 6 22 22 22 50 50 50
41608 - 90 90 90 26 26 26 2 2 6 2 2 6
41609 - 14 14 14 195 195 195 250 250 250 253 253 253
41610 -253 253 253 253 253 253 253 253 253 253 253 253
41611 -253 253 253 253 253 253 253 253 253 253 253 253
41612 -253 253 253 253 253 253 253 253 253 253 253 253
41613 -253 253 253 253 253 253 253 253 253 253 253 253
41614 -250 250 250 242 242 242 54 54 54 2 2 6
41615 - 2 2 6 2 2 6 2 2 6 2 2 6
41616 - 2 2 6 2 2 6 2 2 6 38 38 38
41617 - 86 86 86 50 50 50 22 22 22 6 6 6
41618 - 0 0 0 0 0 0 0 0 0 0 0 0
41619 - 0 0 0 0 0 0 0 0 0 0 0 0
41620 - 0 0 0 0 0 0 0 0 0 0 0 0
41621 - 0 0 0 0 0 0 0 0 0 0 0 0
41622 - 0 0 0 0 0 0 0 0 0 0 0 0
41623 - 0 0 0 0 0 0 0 0 0 0 0 0
41624 - 0 0 0 0 0 0 0 0 0 0 0 0
41625 - 0 0 0 0 0 0 0 0 0 0 0 0
41626 - 0 0 0 0 0 0 0 0 0 0 0 0
41627 - 6 6 6 14 14 14 38 38 38 82 82 82
41628 - 34 34 34 2 2 6 2 2 6 2 2 6
41629 - 42 42 42 195 195 195 246 246 246 253 253 253
41630 -253 253 253 253 253 253 253 253 253 250 250 250
41631 -242 242 242 242 242 242 250 250 250 253 253 253
41632 -253 253 253 253 253 253 253 253 253 253 253 253
41633 -253 253 253 250 250 250 246 246 246 238 238 238
41634 -226 226 226 231 231 231 101 101 101 6 6 6
41635 - 2 2 6 2 2 6 2 2 6 2 2 6
41636 - 2 2 6 2 2 6 2 2 6 2 2 6
41637 - 38 38 38 82 82 82 42 42 42 14 14 14
41638 - 6 6 6 0 0 0 0 0 0 0 0 0
41639 - 0 0 0 0 0 0 0 0 0 0 0 0
41640 - 0 0 0 0 0 0 0 0 0 0 0 0
41641 - 0 0 0 0 0 0 0 0 0 0 0 0
41642 - 0 0 0 0 0 0 0 0 0 0 0 0
41643 - 0 0 0 0 0 0 0 0 0 0 0 0
41644 - 0 0 0 0 0 0 0 0 0 0 0 0
41645 - 0 0 0 0 0 0 0 0 0 0 0 0
41646 - 0 0 0 0 0 0 0 0 0 0 0 0
41647 - 10 10 10 26 26 26 62 62 62 66 66 66
41648 - 2 2 6 2 2 6 2 2 6 6 6 6
41649 - 70 70 70 170 170 170 206 206 206 234 234 234
41650 -246 246 246 250 250 250 250 250 250 238 238 238
41651 -226 226 226 231 231 231 238 238 238 250 250 250
41652 -250 250 250 250 250 250 246 246 246 231 231 231
41653 -214 214 214 206 206 206 202 202 202 202 202 202
41654 -198 198 198 202 202 202 182 182 182 18 18 18
41655 - 2 2 6 2 2 6 2 2 6 2 2 6
41656 - 2 2 6 2 2 6 2 2 6 2 2 6
41657 - 2 2 6 62 62 62 66 66 66 30 30 30
41658 - 10 10 10 0 0 0 0 0 0 0 0 0
41659 - 0 0 0 0 0 0 0 0 0 0 0 0
41660 - 0 0 0 0 0 0 0 0 0 0 0 0
41661 - 0 0 0 0 0 0 0 0 0 0 0 0
41662 - 0 0 0 0 0 0 0 0 0 0 0 0
41663 - 0 0 0 0 0 0 0 0 0 0 0 0
41664 - 0 0 0 0 0 0 0 0 0 0 0 0
41665 - 0 0 0 0 0 0 0 0 0 0 0 0
41666 - 0 0 0 0 0 0 0 0 0 0 0 0
41667 - 14 14 14 42 42 42 82 82 82 18 18 18
41668 - 2 2 6 2 2 6 2 2 6 10 10 10
41669 - 94 94 94 182 182 182 218 218 218 242 242 242
41670 -250 250 250 253 253 253 253 253 253 250 250 250
41671 -234 234 234 253 253 253 253 253 253 253 253 253
41672 -253 253 253 253 253 253 253 253 253 246 246 246
41673 -238 238 238 226 226 226 210 210 210 202 202 202
41674 -195 195 195 195 195 195 210 210 210 158 158 158
41675 - 6 6 6 14 14 14 50 50 50 14 14 14
41676 - 2 2 6 2 2 6 2 2 6 2 2 6
41677 - 2 2 6 6 6 6 86 86 86 46 46 46
41678 - 18 18 18 6 6 6 0 0 0 0 0 0
41679 - 0 0 0 0 0 0 0 0 0 0 0 0
41680 - 0 0 0 0 0 0 0 0 0 0 0 0
41681 - 0 0 0 0 0 0 0 0 0 0 0 0
41682 - 0 0 0 0 0 0 0 0 0 0 0 0
41683 - 0 0 0 0 0 0 0 0 0 0 0 0
41684 - 0 0 0 0 0 0 0 0 0 0 0 0
41685 - 0 0 0 0 0 0 0 0 0 0 0 0
41686 - 0 0 0 0 0 0 0 0 0 6 6 6
41687 - 22 22 22 54 54 54 70 70 70 2 2 6
41688 - 2 2 6 10 10 10 2 2 6 22 22 22
41689 -166 166 166 231 231 231 250 250 250 253 253 253
41690 -253 253 253 253 253 253 253 253 253 250 250 250
41691 -242 242 242 253 253 253 253 253 253 253 253 253
41692 -253 253 253 253 253 253 253 253 253 253 253 253
41693 -253 253 253 253 253 253 253 253 253 246 246 246
41694 -231 231 231 206 206 206 198 198 198 226 226 226
41695 - 94 94 94 2 2 6 6 6 6 38 38 38
41696 - 30 30 30 2 2 6 2 2 6 2 2 6
41697 - 2 2 6 2 2 6 62 62 62 66 66 66
41698 - 26 26 26 10 10 10 0 0 0 0 0 0
41699 - 0 0 0 0 0 0 0 0 0 0 0 0
41700 - 0 0 0 0 0 0 0 0 0 0 0 0
41701 - 0 0 0 0 0 0 0 0 0 0 0 0
41702 - 0 0 0 0 0 0 0 0 0 0 0 0
41703 - 0 0 0 0 0 0 0 0 0 0 0 0
41704 - 0 0 0 0 0 0 0 0 0 0 0 0
41705 - 0 0 0 0 0 0 0 0 0 0 0 0
41706 - 0 0 0 0 0 0 0 0 0 10 10 10
41707 - 30 30 30 74 74 74 50 50 50 2 2 6
41708 - 26 26 26 26 26 26 2 2 6 106 106 106
41709 -238 238 238 253 253 253 253 253 253 253 253 253
41710 -253 253 253 253 253 253 253 253 253 253 253 253
41711 -253 253 253 253 253 253 253 253 253 253 253 253
41712 -253 253 253 253 253 253 253 253 253 253 253 253
41713 -253 253 253 253 253 253 253 253 253 253 253 253
41714 -253 253 253 246 246 246 218 218 218 202 202 202
41715 -210 210 210 14 14 14 2 2 6 2 2 6
41716 - 30 30 30 22 22 22 2 2 6 2 2 6
41717 - 2 2 6 2 2 6 18 18 18 86 86 86
41718 - 42 42 42 14 14 14 0 0 0 0 0 0
41719 - 0 0 0 0 0 0 0 0 0 0 0 0
41720 - 0 0 0 0 0 0 0 0 0 0 0 0
41721 - 0 0 0 0 0 0 0 0 0 0 0 0
41722 - 0 0 0 0 0 0 0 0 0 0 0 0
41723 - 0 0 0 0 0 0 0 0 0 0 0 0
41724 - 0 0 0 0 0 0 0 0 0 0 0 0
41725 - 0 0 0 0 0 0 0 0 0 0 0 0
41726 - 0 0 0 0 0 0 0 0 0 14 14 14
41727 - 42 42 42 90 90 90 22 22 22 2 2 6
41728 - 42 42 42 2 2 6 18 18 18 218 218 218
41729 -253 253 253 253 253 253 253 253 253 253 253 253
41730 -253 253 253 253 253 253 253 253 253 253 253 253
41731 -253 253 253 253 253 253 253 253 253 253 253 253
41732 -253 253 253 253 253 253 253 253 253 253 253 253
41733 -253 253 253 253 253 253 253 253 253 253 253 253
41734 -253 253 253 253 253 253 250 250 250 221 221 221
41735 -218 218 218 101 101 101 2 2 6 14 14 14
41736 - 18 18 18 38 38 38 10 10 10 2 2 6
41737 - 2 2 6 2 2 6 2 2 6 78 78 78
41738 - 58 58 58 22 22 22 6 6 6 0 0 0
41739 - 0 0 0 0 0 0 0 0 0 0 0 0
41740 - 0 0 0 0 0 0 0 0 0 0 0 0
41741 - 0 0 0 0 0 0 0 0 0 0 0 0
41742 - 0 0 0 0 0 0 0 0 0 0 0 0
41743 - 0 0 0 0 0 0 0 0 0 0 0 0
41744 - 0 0 0 0 0 0 0 0 0 0 0 0
41745 - 0 0 0 0 0 0 0 0 0 0 0 0
41746 - 0 0 0 0 0 0 6 6 6 18 18 18
41747 - 54 54 54 82 82 82 2 2 6 26 26 26
41748 - 22 22 22 2 2 6 123 123 123 253 253 253
41749 -253 253 253 253 253 253 253 253 253 253 253 253
41750 -253 253 253 253 253 253 253 253 253 253 253 253
41751 -253 253 253 253 253 253 253 253 253 253 253 253
41752 -253 253 253 253 253 253 253 253 253 253 253 253
41753 -253 253 253 253 253 253 253 253 253 253 253 253
41754 -253 253 253 253 253 253 253 253 253 250 250 250
41755 -238 238 238 198 198 198 6 6 6 38 38 38
41756 - 58 58 58 26 26 26 38 38 38 2 2 6
41757 - 2 2 6 2 2 6 2 2 6 46 46 46
41758 - 78 78 78 30 30 30 10 10 10 0 0 0
41759 - 0 0 0 0 0 0 0 0 0 0 0 0
41760 - 0 0 0 0 0 0 0 0 0 0 0 0
41761 - 0 0 0 0 0 0 0 0 0 0 0 0
41762 - 0 0 0 0 0 0 0 0 0 0 0 0
41763 - 0 0 0 0 0 0 0 0 0 0 0 0
41764 - 0 0 0 0 0 0 0 0 0 0 0 0
41765 - 0 0 0 0 0 0 0 0 0 0 0 0
41766 - 0 0 0 0 0 0 10 10 10 30 30 30
41767 - 74 74 74 58 58 58 2 2 6 42 42 42
41768 - 2 2 6 22 22 22 231 231 231 253 253 253
41769 -253 253 253 253 253 253 253 253 253 253 253 253
41770 -253 253 253 253 253 253 253 253 253 250 250 250
41771 -253 253 253 253 253 253 253 253 253 253 253 253
41772 -253 253 253 253 253 253 253 253 253 253 253 253
41773 -253 253 253 253 253 253 253 253 253 253 253 253
41774 -253 253 253 253 253 253 253 253 253 253 253 253
41775 -253 253 253 246 246 246 46 46 46 38 38 38
41776 - 42 42 42 14 14 14 38 38 38 14 14 14
41777 - 2 2 6 2 2 6 2 2 6 6 6 6
41778 - 86 86 86 46 46 46 14 14 14 0 0 0
41779 - 0 0 0 0 0 0 0 0 0 0 0 0
41780 - 0 0 0 0 0 0 0 0 0 0 0 0
41781 - 0 0 0 0 0 0 0 0 0 0 0 0
41782 - 0 0 0 0 0 0 0 0 0 0 0 0
41783 - 0 0 0 0 0 0 0 0 0 0 0 0
41784 - 0 0 0 0 0 0 0 0 0 0 0 0
41785 - 0 0 0 0 0 0 0 0 0 0 0 0
41786 - 0 0 0 6 6 6 14 14 14 42 42 42
41787 - 90 90 90 18 18 18 18 18 18 26 26 26
41788 - 2 2 6 116 116 116 253 253 253 253 253 253
41789 -253 253 253 253 253 253 253 253 253 253 253 253
41790 -253 253 253 253 253 253 250 250 250 238 238 238
41791 -253 253 253 253 253 253 253 253 253 253 253 253
41792 -253 253 253 253 253 253 253 253 253 253 253 253
41793 -253 253 253 253 253 253 253 253 253 253 253 253
41794 -253 253 253 253 253 253 253 253 253 253 253 253
41795 -253 253 253 253 253 253 94 94 94 6 6 6
41796 - 2 2 6 2 2 6 10 10 10 34 34 34
41797 - 2 2 6 2 2 6 2 2 6 2 2 6
41798 - 74 74 74 58 58 58 22 22 22 6 6 6
41799 - 0 0 0 0 0 0 0 0 0 0 0 0
41800 - 0 0 0 0 0 0 0 0 0 0 0 0
41801 - 0 0 0 0 0 0 0 0 0 0 0 0
41802 - 0 0 0 0 0 0 0 0 0 0 0 0
41803 - 0 0 0 0 0 0 0 0 0 0 0 0
41804 - 0 0 0 0 0 0 0 0 0 0 0 0
41805 - 0 0 0 0 0 0 0 0 0 0 0 0
41806 - 0 0 0 10 10 10 26 26 26 66 66 66
41807 - 82 82 82 2 2 6 38 38 38 6 6 6
41808 - 14 14 14 210 210 210 253 253 253 253 253 253
41809 -253 253 253 253 253 253 253 253 253 253 253 253
41810 -253 253 253 253 253 253 246 246 246 242 242 242
41811 -253 253 253 253 253 253 253 253 253 253 253 253
41812 -253 253 253 253 253 253 253 253 253 253 253 253
41813 -253 253 253 253 253 253 253 253 253 253 253 253
41814 -253 253 253 253 253 253 253 253 253 253 253 253
41815 -253 253 253 253 253 253 144 144 144 2 2 6
41816 - 2 2 6 2 2 6 2 2 6 46 46 46
41817 - 2 2 6 2 2 6 2 2 6 2 2 6
41818 - 42 42 42 74 74 74 30 30 30 10 10 10
41819 - 0 0 0 0 0 0 0 0 0 0 0 0
41820 - 0 0 0 0 0 0 0 0 0 0 0 0
41821 - 0 0 0 0 0 0 0 0 0 0 0 0
41822 - 0 0 0 0 0 0 0 0 0 0 0 0
41823 - 0 0 0 0 0 0 0 0 0 0 0 0
41824 - 0 0 0 0 0 0 0 0 0 0 0 0
41825 - 0 0 0 0 0 0 0 0 0 0 0 0
41826 - 6 6 6 14 14 14 42 42 42 90 90 90
41827 - 26 26 26 6 6 6 42 42 42 2 2 6
41828 - 74 74 74 250 250 250 253 253 253 253 253 253
41829 -253 253 253 253 253 253 253 253 253 253 253 253
41830 -253 253 253 253 253 253 242 242 242 242 242 242
41831 -253 253 253 253 253 253 253 253 253 253 253 253
41832 -253 253 253 253 253 253 253 253 253 253 253 253
41833 -253 253 253 253 253 253 253 253 253 253 253 253
41834 -253 253 253 253 253 253 253 253 253 253 253 253
41835 -253 253 253 253 253 253 182 182 182 2 2 6
41836 - 2 2 6 2 2 6 2 2 6 46 46 46
41837 - 2 2 6 2 2 6 2 2 6 2 2 6
41838 - 10 10 10 86 86 86 38 38 38 10 10 10
41839 - 0 0 0 0 0 0 0 0 0 0 0 0
41840 - 0 0 0 0 0 0 0 0 0 0 0 0
41841 - 0 0 0 0 0 0 0 0 0 0 0 0
41842 - 0 0 0 0 0 0 0 0 0 0 0 0
41843 - 0 0 0 0 0 0 0 0 0 0 0 0
41844 - 0 0 0 0 0 0 0 0 0 0 0 0
41845 - 0 0 0 0 0 0 0 0 0 0 0 0
41846 - 10 10 10 26 26 26 66 66 66 82 82 82
41847 - 2 2 6 22 22 22 18 18 18 2 2 6
41848 -149 149 149 253 253 253 253 253 253 253 253 253
41849 -253 253 253 253 253 253 253 253 253 253 253 253
41850 -253 253 253 253 253 253 234 234 234 242 242 242
41851 -253 253 253 253 253 253 253 253 253 253 253 253
41852 -253 253 253 253 253 253 253 253 253 253 253 253
41853 -253 253 253 253 253 253 253 253 253 253 253 253
41854 -253 253 253 253 253 253 253 253 253 253 253 253
41855 -253 253 253 253 253 253 206 206 206 2 2 6
41856 - 2 2 6 2 2 6 2 2 6 38 38 38
41857 - 2 2 6 2 2 6 2 2 6 2 2 6
41858 - 6 6 6 86 86 86 46 46 46 14 14 14
41859 - 0 0 0 0 0 0 0 0 0 0 0 0
41860 - 0 0 0 0 0 0 0 0 0 0 0 0
41861 - 0 0 0 0 0 0 0 0 0 0 0 0
41862 - 0 0 0 0 0 0 0 0 0 0 0 0
41863 - 0 0 0 0 0 0 0 0 0 0 0 0
41864 - 0 0 0 0 0 0 0 0 0 0 0 0
41865 - 0 0 0 0 0 0 0 0 0 6 6 6
41866 - 18 18 18 46 46 46 86 86 86 18 18 18
41867 - 2 2 6 34 34 34 10 10 10 6 6 6
41868 -210 210 210 253 253 253 253 253 253 253 253 253
41869 -253 253 253 253 253 253 253 253 253 253 253 253
41870 -253 253 253 253 253 253 234 234 234 242 242 242
41871 -253 253 253 253 253 253 253 253 253 253 253 253
41872 -253 253 253 253 253 253 253 253 253 253 253 253
41873 -253 253 253 253 253 253 253 253 253 253 253 253
41874 -253 253 253 253 253 253 253 253 253 253 253 253
41875 -253 253 253 253 253 253 221 221 221 6 6 6
41876 - 2 2 6 2 2 6 6 6 6 30 30 30
41877 - 2 2 6 2 2 6 2 2 6 2 2 6
41878 - 2 2 6 82 82 82 54 54 54 18 18 18
41879 - 6 6 6 0 0 0 0 0 0 0 0 0
41880 - 0 0 0 0 0 0 0 0 0 0 0 0
41881 - 0 0 0 0 0 0 0 0 0 0 0 0
41882 - 0 0 0 0 0 0 0 0 0 0 0 0
41883 - 0 0 0 0 0 0 0 0 0 0 0 0
41884 - 0 0 0 0 0 0 0 0 0 0 0 0
41885 - 0 0 0 0 0 0 0 0 0 10 10 10
41886 - 26 26 26 66 66 66 62 62 62 2 2 6
41887 - 2 2 6 38 38 38 10 10 10 26 26 26
41888 -238 238 238 253 253 253 253 253 253 253 253 253
41889 -253 253 253 253 253 253 253 253 253 253 253 253
41890 -253 253 253 253 253 253 231 231 231 238 238 238
41891 -253 253 253 253 253 253 253 253 253 253 253 253
41892 -253 253 253 253 253 253 253 253 253 253 253 253
41893 -253 253 253 253 253 253 253 253 253 253 253 253
41894 -253 253 253 253 253 253 253 253 253 253 253 253
41895 -253 253 253 253 253 253 231 231 231 6 6 6
41896 - 2 2 6 2 2 6 10 10 10 30 30 30
41897 - 2 2 6 2 2 6 2 2 6 2 2 6
41898 - 2 2 6 66 66 66 58 58 58 22 22 22
41899 - 6 6 6 0 0 0 0 0 0 0 0 0
41900 - 0 0 0 0 0 0 0 0 0 0 0 0
41901 - 0 0 0 0 0 0 0 0 0 0 0 0
41902 - 0 0 0 0 0 0 0 0 0 0 0 0
41903 - 0 0 0 0 0 0 0 0 0 0 0 0
41904 - 0 0 0 0 0 0 0 0 0 0 0 0
41905 - 0 0 0 0 0 0 0 0 0 10 10 10
41906 - 38 38 38 78 78 78 6 6 6 2 2 6
41907 - 2 2 6 46 46 46 14 14 14 42 42 42
41908 -246 246 246 253 253 253 253 253 253 253 253 253
41909 -253 253 253 253 253 253 253 253 253 253 253 253
41910 -253 253 253 253 253 253 231 231 231 242 242 242
41911 -253 253 253 253 253 253 253 253 253 253 253 253
41912 -253 253 253 253 253 253 253 253 253 253 253 253
41913 -253 253 253 253 253 253 253 253 253 253 253 253
41914 -253 253 253 253 253 253 253 253 253 253 253 253
41915 -253 253 253 253 253 253 234 234 234 10 10 10
41916 - 2 2 6 2 2 6 22 22 22 14 14 14
41917 - 2 2 6 2 2 6 2 2 6 2 2 6
41918 - 2 2 6 66 66 66 62 62 62 22 22 22
41919 - 6 6 6 0 0 0 0 0 0 0 0 0
41920 - 0 0 0 0 0 0 0 0 0 0 0 0
41921 - 0 0 0 0 0 0 0 0 0 0 0 0
41922 - 0 0 0 0 0 0 0 0 0 0 0 0
41923 - 0 0 0 0 0 0 0 0 0 0 0 0
41924 - 0 0 0 0 0 0 0 0 0 0 0 0
41925 - 0 0 0 0 0 0 6 6 6 18 18 18
41926 - 50 50 50 74 74 74 2 2 6 2 2 6
41927 - 14 14 14 70 70 70 34 34 34 62 62 62
41928 -250 250 250 253 253 253 253 253 253 253 253 253
41929 -253 253 253 253 253 253 253 253 253 253 253 253
41930 -253 253 253 253 253 253 231 231 231 246 246 246
41931 -253 253 253 253 253 253 253 253 253 253 253 253
41932 -253 253 253 253 253 253 253 253 253 253 253 253
41933 -253 253 253 253 253 253 253 253 253 253 253 253
41934 -253 253 253 253 253 253 253 253 253 253 253 253
41935 -253 253 253 253 253 253 234 234 234 14 14 14
41936 - 2 2 6 2 2 6 30 30 30 2 2 6
41937 - 2 2 6 2 2 6 2 2 6 2 2 6
41938 - 2 2 6 66 66 66 62 62 62 22 22 22
41939 - 6 6 6 0 0 0 0 0 0 0 0 0
41940 - 0 0 0 0 0 0 0 0 0 0 0 0
41941 - 0 0 0 0 0 0 0 0 0 0 0 0
41942 - 0 0 0 0 0 0 0 0 0 0 0 0
41943 - 0 0 0 0 0 0 0 0 0 0 0 0
41944 - 0 0 0 0 0 0 0 0 0 0 0 0
41945 - 0 0 0 0 0 0 6 6 6 18 18 18
41946 - 54 54 54 62 62 62 2 2 6 2 2 6
41947 - 2 2 6 30 30 30 46 46 46 70 70 70
41948 -250 250 250 253 253 253 253 253 253 253 253 253
41949 -253 253 253 253 253 253 253 253 253 253 253 253
41950 -253 253 253 253 253 253 231 231 231 246 246 246
41951 -253 253 253 253 253 253 253 253 253 253 253 253
41952 -253 253 253 253 253 253 253 253 253 253 253 253
41953 -253 253 253 253 253 253 253 253 253 253 253 253
41954 -253 253 253 253 253 253 253 253 253 253 253 253
41955 -253 253 253 253 253 253 226 226 226 10 10 10
41956 - 2 2 6 6 6 6 30 30 30 2 2 6
41957 - 2 2 6 2 2 6 2 2 6 2 2 6
41958 - 2 2 6 66 66 66 58 58 58 22 22 22
41959 - 6 6 6 0 0 0 0 0 0 0 0 0
41960 - 0 0 0 0 0 0 0 0 0 0 0 0
41961 - 0 0 0 0 0 0 0 0 0 0 0 0
41962 - 0 0 0 0 0 0 0 0 0 0 0 0
41963 - 0 0 0 0 0 0 0 0 0 0 0 0
41964 - 0 0 0 0 0 0 0 0 0 0 0 0
41965 - 0 0 0 0 0 0 6 6 6 22 22 22
41966 - 58 58 58 62 62 62 2 2 6 2 2 6
41967 - 2 2 6 2 2 6 30 30 30 78 78 78
41968 -250 250 250 253 253 253 253 253 253 253 253 253
41969 -253 253 253 253 253 253 253 253 253 253 253 253
41970 -253 253 253 253 253 253 231 231 231 246 246 246
41971 -253 253 253 253 253 253 253 253 253 253 253 253
41972 -253 253 253 253 253 253 253 253 253 253 253 253
41973 -253 253 253 253 253 253 253 253 253 253 253 253
41974 -253 253 253 253 253 253 253 253 253 253 253 253
41975 -253 253 253 253 253 253 206 206 206 2 2 6
41976 - 22 22 22 34 34 34 18 14 6 22 22 22
41977 - 26 26 26 18 18 18 6 6 6 2 2 6
41978 - 2 2 6 82 82 82 54 54 54 18 18 18
41979 - 6 6 6 0 0 0 0 0 0 0 0 0
41980 - 0 0 0 0 0 0 0 0 0 0 0 0
41981 - 0 0 0 0 0 0 0 0 0 0 0 0
41982 - 0 0 0 0 0 0 0 0 0 0 0 0
41983 - 0 0 0 0 0 0 0 0 0 0 0 0
41984 - 0 0 0 0 0 0 0 0 0 0 0 0
41985 - 0 0 0 0 0 0 6 6 6 26 26 26
41986 - 62 62 62 106 106 106 74 54 14 185 133 11
41987 -210 162 10 121 92 8 6 6 6 62 62 62
41988 -238 238 238 253 253 253 253 253 253 253 253 253
41989 -253 253 253 253 253 253 253 253 253 253 253 253
41990 -253 253 253 253 253 253 231 231 231 246 246 246
41991 -253 253 253 253 253 253 253 253 253 253 253 253
41992 -253 253 253 253 253 253 253 253 253 253 253 253
41993 -253 253 253 253 253 253 253 253 253 253 253 253
41994 -253 253 253 253 253 253 253 253 253 253 253 253
41995 -253 253 253 253 253 253 158 158 158 18 18 18
41996 - 14 14 14 2 2 6 2 2 6 2 2 6
41997 - 6 6 6 18 18 18 66 66 66 38 38 38
41998 - 6 6 6 94 94 94 50 50 50 18 18 18
41999 - 6 6 6 0 0 0 0 0 0 0 0 0
42000 - 0 0 0 0 0 0 0 0 0 0 0 0
42001 - 0 0 0 0 0 0 0 0 0 0 0 0
42002 - 0 0 0 0 0 0 0 0 0 0 0 0
42003 - 0 0 0 0 0 0 0 0 0 0 0 0
42004 - 0 0 0 0 0 0 0 0 0 6 6 6
42005 - 10 10 10 10 10 10 18 18 18 38 38 38
42006 - 78 78 78 142 134 106 216 158 10 242 186 14
42007 -246 190 14 246 190 14 156 118 10 10 10 10
42008 - 90 90 90 238 238 238 253 253 253 253 253 253
42009 -253 253 253 253 253 253 253 253 253 253 253 253
42010 -253 253 253 253 253 253 231 231 231 250 250 250
42011 -253 253 253 253 253 253 253 253 253 253 253 253
42012 -253 253 253 253 253 253 253 253 253 253 253 253
42013 -253 253 253 253 253 253 253 253 253 253 253 253
42014 -253 253 253 253 253 253 253 253 253 246 230 190
42015 -238 204 91 238 204 91 181 142 44 37 26 9
42016 - 2 2 6 2 2 6 2 2 6 2 2 6
42017 - 2 2 6 2 2 6 38 38 38 46 46 46
42018 - 26 26 26 106 106 106 54 54 54 18 18 18
42019 - 6 6 6 0 0 0 0 0 0 0 0 0
42020 - 0 0 0 0 0 0 0 0 0 0 0 0
42021 - 0 0 0 0 0 0 0 0 0 0 0 0
42022 - 0 0 0 0 0 0 0 0 0 0 0 0
42023 - 0 0 0 0 0 0 0 0 0 0 0 0
42024 - 0 0 0 6 6 6 14 14 14 22 22 22
42025 - 30 30 30 38 38 38 50 50 50 70 70 70
42026 -106 106 106 190 142 34 226 170 11 242 186 14
42027 -246 190 14 246 190 14 246 190 14 154 114 10
42028 - 6 6 6 74 74 74 226 226 226 253 253 253
42029 -253 253 253 253 253 253 253 253 253 253 253 253
42030 -253 253 253 253 253 253 231 231 231 250 250 250
42031 -253 253 253 253 253 253 253 253 253 253 253 253
42032 -253 253 253 253 253 253 253 253 253 253 253 253
42033 -253 253 253 253 253 253 253 253 253 253 253 253
42034 -253 253 253 253 253 253 253 253 253 228 184 62
42035 -241 196 14 241 208 19 232 195 16 38 30 10
42036 - 2 2 6 2 2 6 2 2 6 2 2 6
42037 - 2 2 6 6 6 6 30 30 30 26 26 26
42038 -203 166 17 154 142 90 66 66 66 26 26 26
42039 - 6 6 6 0 0 0 0 0 0 0 0 0
42040 - 0 0 0 0 0 0 0 0 0 0 0 0
42041 - 0 0 0 0 0 0 0 0 0 0 0 0
42042 - 0 0 0 0 0 0 0 0 0 0 0 0
42043 - 0 0 0 0 0 0 0 0 0 0 0 0
42044 - 6 6 6 18 18 18 38 38 38 58 58 58
42045 - 78 78 78 86 86 86 101 101 101 123 123 123
42046 -175 146 61 210 150 10 234 174 13 246 186 14
42047 -246 190 14 246 190 14 246 190 14 238 190 10
42048 -102 78 10 2 2 6 46 46 46 198 198 198
42049 -253 253 253 253 253 253 253 253 253 253 253 253
42050 -253 253 253 253 253 253 234 234 234 242 242 242
42051 -253 253 253 253 253 253 253 253 253 253 253 253
42052 -253 253 253 253 253 253 253 253 253 253 253 253
42053 -253 253 253 253 253 253 253 253 253 253 253 253
42054 -253 253 253 253 253 253 253 253 253 224 178 62
42055 -242 186 14 241 196 14 210 166 10 22 18 6
42056 - 2 2 6 2 2 6 2 2 6 2 2 6
42057 - 2 2 6 2 2 6 6 6 6 121 92 8
42058 -238 202 15 232 195 16 82 82 82 34 34 34
42059 - 10 10 10 0 0 0 0 0 0 0 0 0
42060 - 0 0 0 0 0 0 0 0 0 0 0 0
42061 - 0 0 0 0 0 0 0 0 0 0 0 0
42062 - 0 0 0 0 0 0 0 0 0 0 0 0
42063 - 0 0 0 0 0 0 0 0 0 0 0 0
42064 - 14 14 14 38 38 38 70 70 70 154 122 46
42065 -190 142 34 200 144 11 197 138 11 197 138 11
42066 -213 154 11 226 170 11 242 186 14 246 190 14
42067 -246 190 14 246 190 14 246 190 14 246 190 14
42068 -225 175 15 46 32 6 2 2 6 22 22 22
42069 -158 158 158 250 250 250 253 253 253 253 253 253
42070 -253 253 253 253 253 253 253 253 253 253 253 253
42071 -253 253 253 253 253 253 253 253 253 253 253 253
42072 -253 253 253 253 253 253 253 253 253 253 253 253
42073 -253 253 253 253 253 253 253 253 253 253 253 253
42074 -253 253 253 250 250 250 242 242 242 224 178 62
42075 -239 182 13 236 186 11 213 154 11 46 32 6
42076 - 2 2 6 2 2 6 2 2 6 2 2 6
42077 - 2 2 6 2 2 6 61 42 6 225 175 15
42078 -238 190 10 236 186 11 112 100 78 42 42 42
42079 - 14 14 14 0 0 0 0 0 0 0 0 0
42080 - 0 0 0 0 0 0 0 0 0 0 0 0
42081 - 0 0 0 0 0 0 0 0 0 0 0 0
42082 - 0 0 0 0 0 0 0 0 0 0 0 0
42083 - 0 0 0 0 0 0 0 0 0 6 6 6
42084 - 22 22 22 54 54 54 154 122 46 213 154 11
42085 -226 170 11 230 174 11 226 170 11 226 170 11
42086 -236 178 12 242 186 14 246 190 14 246 190 14
42087 -246 190 14 246 190 14 246 190 14 246 190 14
42088 -241 196 14 184 144 12 10 10 10 2 2 6
42089 - 6 6 6 116 116 116 242 242 242 253 253 253
42090 -253 253 253 253 253 253 253 253 253 253 253 253
42091 -253 253 253 253 253 253 253 253 253 253 253 253
42092 -253 253 253 253 253 253 253 253 253 253 253 253
42093 -253 253 253 253 253 253 253 253 253 253 253 253
42094 -253 253 253 231 231 231 198 198 198 214 170 54
42095 -236 178 12 236 178 12 210 150 10 137 92 6
42096 - 18 14 6 2 2 6 2 2 6 2 2 6
42097 - 6 6 6 70 47 6 200 144 11 236 178 12
42098 -239 182 13 239 182 13 124 112 88 58 58 58
42099 - 22 22 22 6 6 6 0 0 0 0 0 0
42100 - 0 0 0 0 0 0 0 0 0 0 0 0
42101 - 0 0 0 0 0 0 0 0 0 0 0 0
42102 - 0 0 0 0 0 0 0 0 0 0 0 0
42103 - 0 0 0 0 0 0 0 0 0 10 10 10
42104 - 30 30 30 70 70 70 180 133 36 226 170 11
42105 -239 182 13 242 186 14 242 186 14 246 186 14
42106 -246 190 14 246 190 14 246 190 14 246 190 14
42107 -246 190 14 246 190 14 246 190 14 246 190 14
42108 -246 190 14 232 195 16 98 70 6 2 2 6
42109 - 2 2 6 2 2 6 66 66 66 221 221 221
42110 -253 253 253 253 253 253 253 253 253 253 253 253
42111 -253 253 253 253 253 253 253 253 253 253 253 253
42112 -253 253 253 253 253 253 253 253 253 253 253 253
42113 -253 253 253 253 253 253 253 253 253 253 253 253
42114 -253 253 253 206 206 206 198 198 198 214 166 58
42115 -230 174 11 230 174 11 216 158 10 192 133 9
42116 -163 110 8 116 81 8 102 78 10 116 81 8
42117 -167 114 7 197 138 11 226 170 11 239 182 13
42118 -242 186 14 242 186 14 162 146 94 78 78 78
42119 - 34 34 34 14 14 14 6 6 6 0 0 0
42120 - 0 0 0 0 0 0 0 0 0 0 0 0
42121 - 0 0 0 0 0 0 0 0 0 0 0 0
42122 - 0 0 0 0 0 0 0 0 0 0 0 0
42123 - 0 0 0 0 0 0 0 0 0 6 6 6
42124 - 30 30 30 78 78 78 190 142 34 226 170 11
42125 -239 182 13 246 190 14 246 190 14 246 190 14
42126 -246 190 14 246 190 14 246 190 14 246 190 14
42127 -246 190 14 246 190 14 246 190 14 246 190 14
42128 -246 190 14 241 196 14 203 166 17 22 18 6
42129 - 2 2 6 2 2 6 2 2 6 38 38 38
42130 -218 218 218 253 253 253 253 253 253 253 253 253
42131 -253 253 253 253 253 253 253 253 253 253 253 253
42132 -253 253 253 253 253 253 253 253 253 253 253 253
42133 -253 253 253 253 253 253 253 253 253 253 253 253
42134 -250 250 250 206 206 206 198 198 198 202 162 69
42135 -226 170 11 236 178 12 224 166 10 210 150 10
42136 -200 144 11 197 138 11 192 133 9 197 138 11
42137 -210 150 10 226 170 11 242 186 14 246 190 14
42138 -246 190 14 246 186 14 225 175 15 124 112 88
42139 - 62 62 62 30 30 30 14 14 14 6 6 6
42140 - 0 0 0 0 0 0 0 0 0 0 0 0
42141 - 0 0 0 0 0 0 0 0 0 0 0 0
42142 - 0 0 0 0 0 0 0 0 0 0 0 0
42143 - 0 0 0 0 0 0 0 0 0 10 10 10
42144 - 30 30 30 78 78 78 174 135 50 224 166 10
42145 -239 182 13 246 190 14 246 190 14 246 190 14
42146 -246 190 14 246 190 14 246 190 14 246 190 14
42147 -246 190 14 246 190 14 246 190 14 246 190 14
42148 -246 190 14 246 190 14 241 196 14 139 102 15
42149 - 2 2 6 2 2 6 2 2 6 2 2 6
42150 - 78 78 78 250 250 250 253 253 253 253 253 253
42151 -253 253 253 253 253 253 253 253 253 253 253 253
42152 -253 253 253 253 253 253 253 253 253 253 253 253
42153 -253 253 253 253 253 253 253 253 253 253 253 253
42154 -250 250 250 214 214 214 198 198 198 190 150 46
42155 -219 162 10 236 178 12 234 174 13 224 166 10
42156 -216 158 10 213 154 11 213 154 11 216 158 10
42157 -226 170 11 239 182 13 246 190 14 246 190 14
42158 -246 190 14 246 190 14 242 186 14 206 162 42
42159 -101 101 101 58 58 58 30 30 30 14 14 14
42160 - 6 6 6 0 0 0 0 0 0 0 0 0
42161 - 0 0 0 0 0 0 0 0 0 0 0 0
42162 - 0 0 0 0 0 0 0 0 0 0 0 0
42163 - 0 0 0 0 0 0 0 0 0 10 10 10
42164 - 30 30 30 74 74 74 174 135 50 216 158 10
42165 -236 178 12 246 190 14 246 190 14 246 190 14
42166 -246 190 14 246 190 14 246 190 14 246 190 14
42167 -246 190 14 246 190 14 246 190 14 246 190 14
42168 -246 190 14 246 190 14 241 196 14 226 184 13
42169 - 61 42 6 2 2 6 2 2 6 2 2 6
42170 - 22 22 22 238 238 238 253 253 253 253 253 253
42171 -253 253 253 253 253 253 253 253 253 253 253 253
42172 -253 253 253 253 253 253 253 253 253 253 253 253
42173 -253 253 253 253 253 253 253 253 253 253 253 253
42174 -253 253 253 226 226 226 187 187 187 180 133 36
42175 -216 158 10 236 178 12 239 182 13 236 178 12
42176 -230 174 11 226 170 11 226 170 11 230 174 11
42177 -236 178 12 242 186 14 246 190 14 246 190 14
42178 -246 190 14 246 190 14 246 186 14 239 182 13
42179 -206 162 42 106 106 106 66 66 66 34 34 34
42180 - 14 14 14 6 6 6 0 0 0 0 0 0
42181 - 0 0 0 0 0 0 0 0 0 0 0 0
42182 - 0 0 0 0 0 0 0 0 0 0 0 0
42183 - 0 0 0 0 0 0 0 0 0 6 6 6
42184 - 26 26 26 70 70 70 163 133 67 213 154 11
42185 -236 178 12 246 190 14 246 190 14 246 190 14
42186 -246 190 14 246 190 14 246 190 14 246 190 14
42187 -246 190 14 246 190 14 246 190 14 246 190 14
42188 -246 190 14 246 190 14 246 190 14 241 196 14
42189 -190 146 13 18 14 6 2 2 6 2 2 6
42190 - 46 46 46 246 246 246 253 253 253 253 253 253
42191 -253 253 253 253 253 253 253 253 253 253 253 253
42192 -253 253 253 253 253 253 253 253 253 253 253 253
42193 -253 253 253 253 253 253 253 253 253 253 253 253
42194 -253 253 253 221 221 221 86 86 86 156 107 11
42195 -216 158 10 236 178 12 242 186 14 246 186 14
42196 -242 186 14 239 182 13 239 182 13 242 186 14
42197 -242 186 14 246 186 14 246 190 14 246 190 14
42198 -246 190 14 246 190 14 246 190 14 246 190 14
42199 -242 186 14 225 175 15 142 122 72 66 66 66
42200 - 30 30 30 10 10 10 0 0 0 0 0 0
42201 - 0 0 0 0 0 0 0 0 0 0 0 0
42202 - 0 0 0 0 0 0 0 0 0 0 0 0
42203 - 0 0 0 0 0 0 0 0 0 6 6 6
42204 - 26 26 26 70 70 70 163 133 67 210 150 10
42205 -236 178 12 246 190 14 246 190 14 246 190 14
42206 -246 190 14 246 190 14 246 190 14 246 190 14
42207 -246 190 14 246 190 14 246 190 14 246 190 14
42208 -246 190 14 246 190 14 246 190 14 246 190 14
42209 -232 195 16 121 92 8 34 34 34 106 106 106
42210 -221 221 221 253 253 253 253 253 253 253 253 253
42211 -253 253 253 253 253 253 253 253 253 253 253 253
42212 -253 253 253 253 253 253 253 253 253 253 253 253
42213 -253 253 253 253 253 253 253 253 253 253 253 253
42214 -242 242 242 82 82 82 18 14 6 163 110 8
42215 -216 158 10 236 178 12 242 186 14 246 190 14
42216 -246 190 14 246 190 14 246 190 14 246 190 14
42217 -246 190 14 246 190 14 246 190 14 246 190 14
42218 -246 190 14 246 190 14 246 190 14 246 190 14
42219 -246 190 14 246 190 14 242 186 14 163 133 67
42220 - 46 46 46 18 18 18 6 6 6 0 0 0
42221 - 0 0 0 0 0 0 0 0 0 0 0 0
42222 - 0 0 0 0 0 0 0 0 0 0 0 0
42223 - 0 0 0 0 0 0 0 0 0 10 10 10
42224 - 30 30 30 78 78 78 163 133 67 210 150 10
42225 -236 178 12 246 186 14 246 190 14 246 190 14
42226 -246 190 14 246 190 14 246 190 14 246 190 14
42227 -246 190 14 246 190 14 246 190 14 246 190 14
42228 -246 190 14 246 190 14 246 190 14 246 190 14
42229 -241 196 14 215 174 15 190 178 144 253 253 253
42230 -253 253 253 253 253 253 253 253 253 253 253 253
42231 -253 253 253 253 253 253 253 253 253 253 253 253
42232 -253 253 253 253 253 253 253 253 253 253 253 253
42233 -253 253 253 253 253 253 253 253 253 218 218 218
42234 - 58 58 58 2 2 6 22 18 6 167 114 7
42235 -216 158 10 236 178 12 246 186 14 246 190 14
42236 -246 190 14 246 190 14 246 190 14 246 190 14
42237 -246 190 14 246 190 14 246 190 14 246 190 14
42238 -246 190 14 246 190 14 246 190 14 246 190 14
42239 -246 190 14 246 186 14 242 186 14 190 150 46
42240 - 54 54 54 22 22 22 6 6 6 0 0 0
42241 - 0 0 0 0 0 0 0 0 0 0 0 0
42242 - 0 0 0 0 0 0 0 0 0 0 0 0
42243 - 0 0 0 0 0 0 0 0 0 14 14 14
42244 - 38 38 38 86 86 86 180 133 36 213 154 11
42245 -236 178 12 246 186 14 246 190 14 246 190 14
42246 -246 190 14 246 190 14 246 190 14 246 190 14
42247 -246 190 14 246 190 14 246 190 14 246 190 14
42248 -246 190 14 246 190 14 246 190 14 246 190 14
42249 -246 190 14 232 195 16 190 146 13 214 214 214
42250 -253 253 253 253 253 253 253 253 253 253 253 253
42251 -253 253 253 253 253 253 253 253 253 253 253 253
42252 -253 253 253 253 253 253 253 253 253 253 253 253
42253 -253 253 253 250 250 250 170 170 170 26 26 26
42254 - 2 2 6 2 2 6 37 26 9 163 110 8
42255 -219 162 10 239 182 13 246 186 14 246 190 14
42256 -246 190 14 246 190 14 246 190 14 246 190 14
42257 -246 190 14 246 190 14 246 190 14 246 190 14
42258 -246 190 14 246 190 14 246 190 14 246 190 14
42259 -246 186 14 236 178 12 224 166 10 142 122 72
42260 - 46 46 46 18 18 18 6 6 6 0 0 0
42261 - 0 0 0 0 0 0 0 0 0 0 0 0
42262 - 0 0 0 0 0 0 0 0 0 0 0 0
42263 - 0 0 0 0 0 0 6 6 6 18 18 18
42264 - 50 50 50 109 106 95 192 133 9 224 166 10
42265 -242 186 14 246 190 14 246 190 14 246 190 14
42266 -246 190 14 246 190 14 246 190 14 246 190 14
42267 -246 190 14 246 190 14 246 190 14 246 190 14
42268 -246 190 14 246 190 14 246 190 14 246 190 14
42269 -242 186 14 226 184 13 210 162 10 142 110 46
42270 -226 226 226 253 253 253 253 253 253 253 253 253
42271 -253 253 253 253 253 253 253 253 253 253 253 253
42272 -253 253 253 253 253 253 253 253 253 253 253 253
42273 -198 198 198 66 66 66 2 2 6 2 2 6
42274 - 2 2 6 2 2 6 50 34 6 156 107 11
42275 -219 162 10 239 182 13 246 186 14 246 190 14
42276 -246 190 14 246 190 14 246 190 14 246 190 14
42277 -246 190 14 246 190 14 246 190 14 246 190 14
42278 -246 190 14 246 190 14 246 190 14 242 186 14
42279 -234 174 13 213 154 11 154 122 46 66 66 66
42280 - 30 30 30 10 10 10 0 0 0 0 0 0
42281 - 0 0 0 0 0 0 0 0 0 0 0 0
42282 - 0 0 0 0 0 0 0 0 0 0 0 0
42283 - 0 0 0 0 0 0 6 6 6 22 22 22
42284 - 58 58 58 154 121 60 206 145 10 234 174 13
42285 -242 186 14 246 186 14 246 190 14 246 190 14
42286 -246 190 14 246 190 14 246 190 14 246 190 14
42287 -246 190 14 246 190 14 246 190 14 246 190 14
42288 -246 190 14 246 190 14 246 190 14 246 190 14
42289 -246 186 14 236 178 12 210 162 10 163 110 8
42290 - 61 42 6 138 138 138 218 218 218 250 250 250
42291 -253 253 253 253 253 253 253 253 253 250 250 250
42292 -242 242 242 210 210 210 144 144 144 66 66 66
42293 - 6 6 6 2 2 6 2 2 6 2 2 6
42294 - 2 2 6 2 2 6 61 42 6 163 110 8
42295 -216 158 10 236 178 12 246 190 14 246 190 14
42296 -246 190 14 246 190 14 246 190 14 246 190 14
42297 -246 190 14 246 190 14 246 190 14 246 190 14
42298 -246 190 14 239 182 13 230 174 11 216 158 10
42299 -190 142 34 124 112 88 70 70 70 38 38 38
42300 - 18 18 18 6 6 6 0 0 0 0 0 0
42301 - 0 0 0 0 0 0 0 0 0 0 0 0
42302 - 0 0 0 0 0 0 0 0 0 0 0 0
42303 - 0 0 0 0 0 0 6 6 6 22 22 22
42304 - 62 62 62 168 124 44 206 145 10 224 166 10
42305 -236 178 12 239 182 13 242 186 14 242 186 14
42306 -246 186 14 246 190 14 246 190 14 246 190 14
42307 -246 190 14 246 190 14 246 190 14 246 190 14
42308 -246 190 14 246 190 14 246 190 14 246 190 14
42309 -246 190 14 236 178 12 216 158 10 175 118 6
42310 - 80 54 7 2 2 6 6 6 6 30 30 30
42311 - 54 54 54 62 62 62 50 50 50 38 38 38
42312 - 14 14 14 2 2 6 2 2 6 2 2 6
42313 - 2 2 6 2 2 6 2 2 6 2 2 6
42314 - 2 2 6 6 6 6 80 54 7 167 114 7
42315 -213 154 11 236 178 12 246 190 14 246 190 14
42316 -246 190 14 246 190 14 246 190 14 246 190 14
42317 -246 190 14 242 186 14 239 182 13 239 182 13
42318 -230 174 11 210 150 10 174 135 50 124 112 88
42319 - 82 82 82 54 54 54 34 34 34 18 18 18
42320 - 6 6 6 0 0 0 0 0 0 0 0 0
42321 - 0 0 0 0 0 0 0 0 0 0 0 0
42322 - 0 0 0 0 0 0 0 0 0 0 0 0
42323 - 0 0 0 0 0 0 6 6 6 18 18 18
42324 - 50 50 50 158 118 36 192 133 9 200 144 11
42325 -216 158 10 219 162 10 224 166 10 226 170 11
42326 -230 174 11 236 178 12 239 182 13 239 182 13
42327 -242 186 14 246 186 14 246 190 14 246 190 14
42328 -246 190 14 246 190 14 246 190 14 246 190 14
42329 -246 186 14 230 174 11 210 150 10 163 110 8
42330 -104 69 6 10 10 10 2 2 6 2 2 6
42331 - 2 2 6 2 2 6 2 2 6 2 2 6
42332 - 2 2 6 2 2 6 2 2 6 2 2 6
42333 - 2 2 6 2 2 6 2 2 6 2 2 6
42334 - 2 2 6 6 6 6 91 60 6 167 114 7
42335 -206 145 10 230 174 11 242 186 14 246 190 14
42336 -246 190 14 246 190 14 246 186 14 242 186 14
42337 -239 182 13 230 174 11 224 166 10 213 154 11
42338 -180 133 36 124 112 88 86 86 86 58 58 58
42339 - 38 38 38 22 22 22 10 10 10 6 6 6
42340 - 0 0 0 0 0 0 0 0 0 0 0 0
42341 - 0 0 0 0 0 0 0 0 0 0 0 0
42342 - 0 0 0 0 0 0 0 0 0 0 0 0
42343 - 0 0 0 0 0 0 0 0 0 14 14 14
42344 - 34 34 34 70 70 70 138 110 50 158 118 36
42345 -167 114 7 180 123 7 192 133 9 197 138 11
42346 -200 144 11 206 145 10 213 154 11 219 162 10
42347 -224 166 10 230 174 11 239 182 13 242 186 14
42348 -246 186 14 246 186 14 246 186 14 246 186 14
42349 -239 182 13 216 158 10 185 133 11 152 99 6
42350 -104 69 6 18 14 6 2 2 6 2 2 6
42351 - 2 2 6 2 2 6 2 2 6 2 2 6
42352 - 2 2 6 2 2 6 2 2 6 2 2 6
42353 - 2 2 6 2 2 6 2 2 6 2 2 6
42354 - 2 2 6 6 6 6 80 54 7 152 99 6
42355 -192 133 9 219 162 10 236 178 12 239 182 13
42356 -246 186 14 242 186 14 239 182 13 236 178 12
42357 -224 166 10 206 145 10 192 133 9 154 121 60
42358 - 94 94 94 62 62 62 42 42 42 22 22 22
42359 - 14 14 14 6 6 6 0 0 0 0 0 0
42360 - 0 0 0 0 0 0 0 0 0 0 0 0
42361 - 0 0 0 0 0 0 0 0 0 0 0 0
42362 - 0 0 0 0 0 0 0 0 0 0 0 0
42363 - 0 0 0 0 0 0 0 0 0 6 6 6
42364 - 18 18 18 34 34 34 58 58 58 78 78 78
42365 -101 98 89 124 112 88 142 110 46 156 107 11
42366 -163 110 8 167 114 7 175 118 6 180 123 7
42367 -185 133 11 197 138 11 210 150 10 219 162 10
42368 -226 170 11 236 178 12 236 178 12 234 174 13
42369 -219 162 10 197 138 11 163 110 8 130 83 6
42370 - 91 60 6 10 10 10 2 2 6 2 2 6
42371 - 18 18 18 38 38 38 38 38 38 38 38 38
42372 - 38 38 38 38 38 38 38 38 38 38 38 38
42373 - 38 38 38 38 38 38 26 26 26 2 2 6
42374 - 2 2 6 6 6 6 70 47 6 137 92 6
42375 -175 118 6 200 144 11 219 162 10 230 174 11
42376 -234 174 13 230 174 11 219 162 10 210 150 10
42377 -192 133 9 163 110 8 124 112 88 82 82 82
42378 - 50 50 50 30 30 30 14 14 14 6 6 6
42379 - 0 0 0 0 0 0 0 0 0 0 0 0
42380 - 0 0 0 0 0 0 0 0 0 0 0 0
42381 - 0 0 0 0 0 0 0 0 0 0 0 0
42382 - 0 0 0 0 0 0 0 0 0 0 0 0
42383 - 0 0 0 0 0 0 0 0 0 0 0 0
42384 - 6 6 6 14 14 14 22 22 22 34 34 34
42385 - 42 42 42 58 58 58 74 74 74 86 86 86
42386 -101 98 89 122 102 70 130 98 46 121 87 25
42387 -137 92 6 152 99 6 163 110 8 180 123 7
42388 -185 133 11 197 138 11 206 145 10 200 144 11
42389 -180 123 7 156 107 11 130 83 6 104 69 6
42390 - 50 34 6 54 54 54 110 110 110 101 98 89
42391 - 86 86 86 82 82 82 78 78 78 78 78 78
42392 - 78 78 78 78 78 78 78 78 78 78 78 78
42393 - 78 78 78 82 82 82 86 86 86 94 94 94
42394 -106 106 106 101 101 101 86 66 34 124 80 6
42395 -156 107 11 180 123 7 192 133 9 200 144 11
42396 -206 145 10 200 144 11 192 133 9 175 118 6
42397 -139 102 15 109 106 95 70 70 70 42 42 42
42398 - 22 22 22 10 10 10 0 0 0 0 0 0
42399 - 0 0 0 0 0 0 0 0 0 0 0 0
42400 - 0 0 0 0 0 0 0 0 0 0 0 0
42401 - 0 0 0 0 0 0 0 0 0 0 0 0
42402 - 0 0 0 0 0 0 0 0 0 0 0 0
42403 - 0 0 0 0 0 0 0 0 0 0 0 0
42404 - 0 0 0 0 0 0 6 6 6 10 10 10
42405 - 14 14 14 22 22 22 30 30 30 38 38 38
42406 - 50 50 50 62 62 62 74 74 74 90 90 90
42407 -101 98 89 112 100 78 121 87 25 124 80 6
42408 -137 92 6 152 99 6 152 99 6 152 99 6
42409 -138 86 6 124 80 6 98 70 6 86 66 30
42410 -101 98 89 82 82 82 58 58 58 46 46 46
42411 - 38 38 38 34 34 34 34 34 34 34 34 34
42412 - 34 34 34 34 34 34 34 34 34 34 34 34
42413 - 34 34 34 34 34 34 38 38 38 42 42 42
42414 - 54 54 54 82 82 82 94 86 76 91 60 6
42415 -134 86 6 156 107 11 167 114 7 175 118 6
42416 -175 118 6 167 114 7 152 99 6 121 87 25
42417 -101 98 89 62 62 62 34 34 34 18 18 18
42418 - 6 6 6 0 0 0 0 0 0 0 0 0
42419 - 0 0 0 0 0 0 0 0 0 0 0 0
42420 - 0 0 0 0 0 0 0 0 0 0 0 0
42421 - 0 0 0 0 0 0 0 0 0 0 0 0
42422 - 0 0 0 0 0 0 0 0 0 0 0 0
42423 - 0 0 0 0 0 0 0 0 0 0 0 0
42424 - 0 0 0 0 0 0 0 0 0 0 0 0
42425 - 0 0 0 6 6 6 6 6 6 10 10 10
42426 - 18 18 18 22 22 22 30 30 30 42 42 42
42427 - 50 50 50 66 66 66 86 86 86 101 98 89
42428 -106 86 58 98 70 6 104 69 6 104 69 6
42429 -104 69 6 91 60 6 82 62 34 90 90 90
42430 - 62 62 62 38 38 38 22 22 22 14 14 14
42431 - 10 10 10 10 10 10 10 10 10 10 10 10
42432 - 10 10 10 10 10 10 6 6 6 10 10 10
42433 - 10 10 10 10 10 10 10 10 10 14 14 14
42434 - 22 22 22 42 42 42 70 70 70 89 81 66
42435 - 80 54 7 104 69 6 124 80 6 137 92 6
42436 -134 86 6 116 81 8 100 82 52 86 86 86
42437 - 58 58 58 30 30 30 14 14 14 6 6 6
42438 - 0 0 0 0 0 0 0 0 0 0 0 0
42439 - 0 0 0 0 0 0 0 0 0 0 0 0
42440 - 0 0 0 0 0 0 0 0 0 0 0 0
42441 - 0 0 0 0 0 0 0 0 0 0 0 0
42442 - 0 0 0 0 0 0 0 0 0 0 0 0
42443 - 0 0 0 0 0 0 0 0 0 0 0 0
42444 - 0 0 0 0 0 0 0 0 0 0 0 0
42445 - 0 0 0 0 0 0 0 0 0 0 0 0
42446 - 0 0 0 6 6 6 10 10 10 14 14 14
42447 - 18 18 18 26 26 26 38 38 38 54 54 54
42448 - 70 70 70 86 86 86 94 86 76 89 81 66
42449 - 89 81 66 86 86 86 74 74 74 50 50 50
42450 - 30 30 30 14 14 14 6 6 6 0 0 0
42451 - 0 0 0 0 0 0 0 0 0 0 0 0
42452 - 0 0 0 0 0 0 0 0 0 0 0 0
42453 - 0 0 0 0 0 0 0 0 0 0 0 0
42454 - 6 6 6 18 18 18 34 34 34 58 58 58
42455 - 82 82 82 89 81 66 89 81 66 89 81 66
42456 - 94 86 66 94 86 76 74 74 74 50 50 50
42457 - 26 26 26 14 14 14 6 6 6 0 0 0
42458 - 0 0 0 0 0 0 0 0 0 0 0 0
42459 - 0 0 0 0 0 0 0 0 0 0 0 0
42460 - 0 0 0 0 0 0 0 0 0 0 0 0
42461 - 0 0 0 0 0 0 0 0 0 0 0 0
42462 - 0 0 0 0 0 0 0 0 0 0 0 0
42463 - 0 0 0 0 0 0 0 0 0 0 0 0
42464 - 0 0 0 0 0 0 0 0 0 0 0 0
42465 - 0 0 0 0 0 0 0 0 0 0 0 0
42466 - 0 0 0 0 0 0 0 0 0 0 0 0
42467 - 6 6 6 6 6 6 14 14 14 18 18 18
42468 - 30 30 30 38 38 38 46 46 46 54 54 54
42469 - 50 50 50 42 42 42 30 30 30 18 18 18
42470 - 10 10 10 0 0 0 0 0 0 0 0 0
42471 - 0 0 0 0 0 0 0 0 0 0 0 0
42472 - 0 0 0 0 0 0 0 0 0 0 0 0
42473 - 0 0 0 0 0 0 0 0 0 0 0 0
42474 - 0 0 0 6 6 6 14 14 14 26 26 26
42475 - 38 38 38 50 50 50 58 58 58 58 58 58
42476 - 54 54 54 42 42 42 30 30 30 18 18 18
42477 - 10 10 10 0 0 0 0 0 0 0 0 0
42478 - 0 0 0 0 0 0 0 0 0 0 0 0
42479 - 0 0 0 0 0 0 0 0 0 0 0 0
42480 - 0 0 0 0 0 0 0 0 0 0 0 0
42481 - 0 0 0 0 0 0 0 0 0 0 0 0
42482 - 0 0 0 0 0 0 0 0 0 0 0 0
42483 - 0 0 0 0 0 0 0 0 0 0 0 0
42484 - 0 0 0 0 0 0 0 0 0 0 0 0
42485 - 0 0 0 0 0 0 0 0 0 0 0 0
42486 - 0 0 0 0 0 0 0 0 0 0 0 0
42487 - 0 0 0 0 0 0 0 0 0 6 6 6
42488 - 6 6 6 10 10 10 14 14 14 18 18 18
42489 - 18 18 18 14 14 14 10 10 10 6 6 6
42490 - 0 0 0 0 0 0 0 0 0 0 0 0
42491 - 0 0 0 0 0 0 0 0 0 0 0 0
42492 - 0 0 0 0 0 0 0 0 0 0 0 0
42493 - 0 0 0 0 0 0 0 0 0 0 0 0
42494 - 0 0 0 0 0 0 0 0 0 6 6 6
42495 - 14 14 14 18 18 18 22 22 22 22 22 22
42496 - 18 18 18 14 14 14 10 10 10 6 6 6
42497 - 0 0 0 0 0 0 0 0 0 0 0 0
42498 - 0 0 0 0 0 0 0 0 0 0 0 0
42499 - 0 0 0 0 0 0 0 0 0 0 0 0
42500 - 0 0 0 0 0 0 0 0 0 0 0 0
42501 - 0 0 0 0 0 0 0 0 0 0 0 0
42502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42515 +4 4 4 4 4 4
42516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42529 +4 4 4 4 4 4
42530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42543 +4 4 4 4 4 4
42544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42557 +4 4 4 4 4 4
42558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42571 +4 4 4 4 4 4
42572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42585 +4 4 4 4 4 4
42586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42590 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
42591 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
42592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42595 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
42596 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42597 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
42598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42599 +4 4 4 4 4 4
42600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42604 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
42605 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
42606 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42609 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
42610 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
42611 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
42612 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42613 +4 4 4 4 4 4
42614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42618 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
42619 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
42620 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42623 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
42624 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
42625 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
42626 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
42627 +4 4 4 4 4 4
42628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42631 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
42632 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
42633 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
42634 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
42635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42636 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42637 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
42638 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
42639 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
42640 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
42641 +4 4 4 4 4 4
42642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42645 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
42646 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
42647 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
42648 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
42649 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42650 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
42651 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
42652 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
42653 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
42654 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
42655 +4 4 4 4 4 4
42656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42659 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
42660 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
42661 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
42662 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
42663 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42664 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
42665 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
42666 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
42667 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
42668 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
42669 +4 4 4 4 4 4
42670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42672 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
42673 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
42674 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
42675 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
42676 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
42677 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
42678 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
42679 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
42680 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
42681 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
42682 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
42683 +4 4 4 4 4 4
42684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42686 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
42687 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
42688 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
42689 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
42690 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
42691 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
42692 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
42693 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
42694 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
42695 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
42696 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
42697 +4 4 4 4 4 4
42698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42700 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
42701 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
42702 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
42703 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
42704 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
42705 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
42706 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
42707 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
42708 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
42709 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
42710 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42711 +4 4 4 4 4 4
42712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42714 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
42715 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
42716 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
42717 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
42718 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
42719 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
42720 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
42721 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
42722 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
42723 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
42724 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
42725 +4 4 4 4 4 4
42726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42727 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
42728 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
42729 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
42730 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
42731 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
42732 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
42733 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
42734 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
42735 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
42736 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
42737 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
42738 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
42739 +4 4 4 4 4 4
42740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42741 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
42742 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
42743 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
42744 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
42745 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
42746 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
42747 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
42748 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
42749 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
42750 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
42751 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
42752 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
42753 +0 0 0 4 4 4
42754 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42755 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
42756 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
42757 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
42758 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
42759 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
42760 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
42761 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
42762 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
42763 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
42764 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
42765 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
42766 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
42767 +2 0 0 0 0 0
42768 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
42769 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
42770 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
42771 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
42772 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
42773 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
42774 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
42775 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
42776 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
42777 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
42778 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
42779 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
42780 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
42781 +37 38 37 0 0 0
42782 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
42783 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
42784 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
42785 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
42786 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
42787 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
42788 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
42789 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
42790 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
42791 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
42792 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
42793 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
42794 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
42795 +85 115 134 4 0 0
42796 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
42797 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
42798 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
42799 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
42800 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
42801 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
42802 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
42803 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
42804 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
42805 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
42806 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
42807 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
42808 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
42809 +60 73 81 4 0 0
42810 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
42811 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
42812 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
42813 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
42814 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
42815 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
42816 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
42817 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
42818 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
42819 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
42820 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
42821 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
42822 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
42823 +16 19 21 4 0 0
42824 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
42825 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
42826 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
42827 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
42828 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
42829 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
42830 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
42831 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
42832 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
42833 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
42834 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
42835 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
42836 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
42837 +4 0 0 4 3 3
42838 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
42839 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
42840 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
42841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
42842 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
42843 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
42844 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
42845 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
42846 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
42847 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
42848 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
42849 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
42850 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
42851 +3 2 2 4 4 4
42852 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
42853 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
42854 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
42855 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
42856 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
42857 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
42858 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
42859 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
42860 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
42861 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
42862 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
42863 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
42864 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
42865 +4 4 4 4 4 4
42866 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
42867 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
42868 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
42869 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
42870 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
42871 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
42872 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
42873 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
42874 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
42875 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
42876 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
42877 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
42878 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
42879 +4 4 4 4 4 4
42880 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
42881 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
42882 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
42883 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
42884 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
42885 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
42886 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
42887 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
42888 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
42889 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
42890 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
42891 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
42892 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
42893 +5 5 5 5 5 5
42894 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
42895 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
42896 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
42897 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
42898 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
42899 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
42900 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
42901 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
42902 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
42903 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
42904 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
42905 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
42906 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
42907 +5 5 5 4 4 4
42908 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
42909 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
42910 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
42911 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
42912 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
42913 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
42914 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
42915 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
42916 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
42917 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
42918 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
42919 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
42920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42921 +4 4 4 4 4 4
42922 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
42923 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
42924 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
42925 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
42926 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
42927 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
42928 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
42929 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
42930 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
42931 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
42932 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
42933 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
42934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42935 +4 4 4 4 4 4
42936 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
42937 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
42938 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
42939 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
42940 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
42941 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
42942 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
42943 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
42944 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
42945 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
42946 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
42947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42949 +4 4 4 4 4 4
42950 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
42951 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
42952 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
42953 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
42954 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
42955 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
42956 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
42957 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
42958 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
42959 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
42960 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
42961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42963 +4 4 4 4 4 4
42964 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
42965 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
42966 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
42967 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
42968 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
42969 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
42970 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
42971 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
42972 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
42973 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
42974 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42977 +4 4 4 4 4 4
42978 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
42979 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
42980 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
42981 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
42982 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
42983 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
42984 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
42985 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
42986 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
42987 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
42988 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
42989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42991 +4 4 4 4 4 4
42992 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
42993 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
42994 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
42995 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
42996 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
42997 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
42998 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
42999 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
43000 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
43001 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
43002 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
43003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43005 +4 4 4 4 4 4
43006 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
43007 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
43008 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
43009 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43010 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
43011 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
43012 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
43013 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
43014 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
43015 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
43016 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43019 +4 4 4 4 4 4
43020 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
43021 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
43022 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
43023 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43024 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43025 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
43026 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
43027 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
43028 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
43029 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
43030 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43033 +4 4 4 4 4 4
43034 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
43035 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
43036 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43037 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43038 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43039 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
43040 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
43041 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
43042 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
43043 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
43044 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43047 +4 4 4 4 4 4
43048 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
43049 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
43050 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43051 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43052 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43053 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
43054 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
43055 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
43056 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43057 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43058 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43061 +4 4 4 4 4 4
43062 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43063 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
43064 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43065 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
43066 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
43067 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
43068 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
43069 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
43070 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43071 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43072 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43075 +4 4 4 4 4 4
43076 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43077 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
43078 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43079 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
43080 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43081 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
43082 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
43083 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
43084 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43085 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43086 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43089 +4 4 4 4 4 4
43090 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
43091 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
43092 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43093 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
43094 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
43095 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
43096 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
43097 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
43098 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43099 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43100 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43103 +4 4 4 4 4 4
43104 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
43105 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
43106 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43107 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
43108 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
43109 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
43110 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
43111 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
43112 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43113 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43114 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43117 +4 4 4 4 4 4
43118 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43119 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
43120 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43121 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
43122 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
43123 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
43124 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
43125 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
43126 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43127 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43128 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43131 +4 4 4 4 4 4
43132 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
43133 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
43134 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43135 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
43136 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
43137 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
43138 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
43139 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
43140 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
43141 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43142 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43145 +4 4 4 4 4 4
43146 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43147 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
43148 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
43149 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
43150 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
43151 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
43152 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
43153 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
43154 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43155 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43156 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43159 +4 4 4 4 4 4
43160 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43161 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
43162 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43163 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
43164 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
43165 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
43166 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
43167 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
43168 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43169 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43170 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43173 +4 4 4 4 4 4
43174 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43175 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
43176 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
43177 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
43178 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
43179 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
43180 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43181 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
43182 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43183 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43184 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43187 +4 4 4 4 4 4
43188 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43189 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
43190 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
43191 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43192 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
43193 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
43194 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43195 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
43196 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43197 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43198 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43201 +4 4 4 4 4 4
43202 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43203 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
43204 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
43205 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
43206 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
43207 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
43208 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
43209 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
43210 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
43211 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43212 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43215 +4 4 4 4 4 4
43216 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43217 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
43218 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
43219 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
43220 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
43221 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
43222 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
43223 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
43224 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
43225 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43226 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43229 +4 4 4 4 4 4
43230 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
43231 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
43232 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
43233 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
43234 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43235 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
43236 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
43237 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
43238 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
43239 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43240 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43243 +4 4 4 4 4 4
43244 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43245 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
43246 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
43247 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
43248 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
43249 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
43250 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
43251 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
43252 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
43253 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43254 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43257 +4 4 4 4 4 4
43258 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
43259 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
43260 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
43261 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
43262 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
43263 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
43264 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
43265 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
43266 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
43267 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
43268 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43271 +4 4 4 4 4 4
43272 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
43273 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43274 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
43275 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
43276 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
43277 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
43278 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
43279 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
43280 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
43281 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
43282 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43285 +4 4 4 4 4 4
43286 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
43287 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43288 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
43289 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
43290 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
43291 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
43292 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43293 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
43294 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
43295 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
43296 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43299 +4 4 4 4 4 4
43300 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
43301 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
43302 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
43303 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
43304 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
43305 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
43306 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
43307 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
43308 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
43309 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
43310 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43313 +4 4 4 4 4 4
43314 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
43315 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
43316 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43317 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
43318 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
43319 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
43320 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
43321 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
43322 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
43323 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
43324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43327 +4 4 4 4 4 4
43328 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43329 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
43330 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
43331 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
43332 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
43333 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
43334 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
43335 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
43336 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
43337 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43341 +4 4 4 4 4 4
43342 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
43343 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
43344 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
43345 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
43346 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
43347 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
43348 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
43349 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
43350 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
43351 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43355 +4 4 4 4 4 4
43356 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
43357 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
43358 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
43359 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
43360 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
43361 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
43362 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
43363 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
43364 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
43365 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43369 +4 4 4 4 4 4
43370 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
43371 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43372 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
43373 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43374 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
43375 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
43376 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
43377 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
43378 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
43379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43383 +4 4 4 4 4 4
43384 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
43385 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
43386 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
43387 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
43388 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
43389 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
43390 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
43391 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
43392 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
43393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43397 +4 4 4 4 4 4
43398 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43399 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
43400 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
43401 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
43402 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
43403 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
43404 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
43405 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
43406 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43411 +4 4 4 4 4 4
43412 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
43413 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
43414 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43415 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
43416 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
43417 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
43418 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
43419 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
43420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43425 +4 4 4 4 4 4
43426 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43427 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
43428 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
43429 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
43430 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
43431 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
43432 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
43433 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43439 +4 4 4 4 4 4
43440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43441 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
43442 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43443 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
43444 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
43445 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
43446 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
43447 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
43448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43453 +4 4 4 4 4 4
43454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43455 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
43456 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
43457 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
43458 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
43459 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
43460 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
43461 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
43462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43467 +4 4 4 4 4 4
43468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43469 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43470 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
43471 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43472 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
43473 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
43474 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
43475 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43481 +4 4 4 4 4 4
43482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43484 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43485 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
43486 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
43487 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
43488 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
43489 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43495 +4 4 4 4 4 4
43496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43499 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43500 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
43501 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
43502 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
43503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43509 +4 4 4 4 4 4
43510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43513 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43514 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43515 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
43516 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
43517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43523 +4 4 4 4 4 4
43524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43527 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43528 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43529 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43530 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
43531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43537 +4 4 4 4 4 4
43538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43541 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
43542 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
43543 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
43544 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
43545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43551 +4 4 4 4 4 4
43552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43556 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
43557 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43558 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43565 +4 4 4 4 4 4
43566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43570 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
43571 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
43572 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
43573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43579 +4 4 4 4 4 4
43580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43584 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
43585 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
43586 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43593 +4 4 4 4 4 4
43594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43598 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
43599 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
43600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43607 +4 4 4 4 4 4
43608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43612 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43613 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
43614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43621 +4 4 4 4 4 4
43622 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
43623 index 86d449e..af6a7f7 100644
43624 --- a/drivers/video/udlfb.c
43625 +++ b/drivers/video/udlfb.c
43626 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
43627 dlfb_urb_completion(urb);
43628
43629 error:
43630 - atomic_add(bytes_sent, &dev->bytes_sent);
43631 - atomic_add(bytes_identical, &dev->bytes_identical);
43632 - atomic_add(width*height*2, &dev->bytes_rendered);
43633 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43634 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43635 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
43636 end_cycles = get_cycles();
43637 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
43638 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43639 >> 10)), /* Kcycles */
43640 &dev->cpu_kcycles_used);
43641
43642 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
43643 dlfb_urb_completion(urb);
43644
43645 error:
43646 - atomic_add(bytes_sent, &dev->bytes_sent);
43647 - atomic_add(bytes_identical, &dev->bytes_identical);
43648 - atomic_add(bytes_rendered, &dev->bytes_rendered);
43649 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43650 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43651 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
43652 end_cycles = get_cycles();
43653 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
43654 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43655 >> 10)), /* Kcycles */
43656 &dev->cpu_kcycles_used);
43657 }
43658 @@ -1372,7 +1372,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
43659 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43660 struct dlfb_data *dev = fb_info->par;
43661 return snprintf(buf, PAGE_SIZE, "%u\n",
43662 - atomic_read(&dev->bytes_rendered));
43663 + atomic_read_unchecked(&dev->bytes_rendered));
43664 }
43665
43666 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43667 @@ -1380,7 +1380,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43668 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43669 struct dlfb_data *dev = fb_info->par;
43670 return snprintf(buf, PAGE_SIZE, "%u\n",
43671 - atomic_read(&dev->bytes_identical));
43672 + atomic_read_unchecked(&dev->bytes_identical));
43673 }
43674
43675 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43676 @@ -1388,7 +1388,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43677 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43678 struct dlfb_data *dev = fb_info->par;
43679 return snprintf(buf, PAGE_SIZE, "%u\n",
43680 - atomic_read(&dev->bytes_sent));
43681 + atomic_read_unchecked(&dev->bytes_sent));
43682 }
43683
43684 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43685 @@ -1396,7 +1396,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43686 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43687 struct dlfb_data *dev = fb_info->par;
43688 return snprintf(buf, PAGE_SIZE, "%u\n",
43689 - atomic_read(&dev->cpu_kcycles_used));
43690 + atomic_read_unchecked(&dev->cpu_kcycles_used));
43691 }
43692
43693 static ssize_t edid_show(
43694 @@ -1456,10 +1456,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
43695 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43696 struct dlfb_data *dev = fb_info->par;
43697
43698 - atomic_set(&dev->bytes_rendered, 0);
43699 - atomic_set(&dev->bytes_identical, 0);
43700 - atomic_set(&dev->bytes_sent, 0);
43701 - atomic_set(&dev->cpu_kcycles_used, 0);
43702 + atomic_set_unchecked(&dev->bytes_rendered, 0);
43703 + atomic_set_unchecked(&dev->bytes_identical, 0);
43704 + atomic_set_unchecked(&dev->bytes_sent, 0);
43705 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
43706
43707 return count;
43708 }
43709 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
43710 index 2f8f82d..191de37 100644
43711 --- a/drivers/video/uvesafb.c
43712 +++ b/drivers/video/uvesafb.c
43713 @@ -19,6 +19,7 @@
43714 #include <linux/io.h>
43715 #include <linux/mutex.h>
43716 #include <linux/slab.h>
43717 +#include <linux/moduleloader.h>
43718 #include <video/edid.h>
43719 #include <video/uvesafb.h>
43720 #ifdef CONFIG_X86
43721 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
43722 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
43723 par->pmi_setpal = par->ypan = 0;
43724 } else {
43725 +
43726 +#ifdef CONFIG_PAX_KERNEXEC
43727 +#ifdef CONFIG_MODULES
43728 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
43729 +#endif
43730 + if (!par->pmi_code) {
43731 + par->pmi_setpal = par->ypan = 0;
43732 + return 0;
43733 + }
43734 +#endif
43735 +
43736 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
43737 + task->t.regs.edi);
43738 +
43739 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43740 + pax_open_kernel();
43741 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
43742 + pax_close_kernel();
43743 +
43744 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
43745 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
43746 +#else
43747 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
43748 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
43749 +#endif
43750 +
43751 printk(KERN_INFO "uvesafb: protected mode interface info at "
43752 "%04x:%04x\n",
43753 (u16)task->t.regs.es, (u16)task->t.regs.edi);
43754 @@ -818,13 +841,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
43755 par->ypan = ypan;
43756
43757 if (par->pmi_setpal || par->ypan) {
43758 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
43759 if (__supported_pte_mask & _PAGE_NX) {
43760 par->pmi_setpal = par->ypan = 0;
43761 printk(KERN_WARNING "uvesafb: NX protection is actively."
43762 "We have better not to use the PMI.\n");
43763 - } else {
43764 + } else
43765 +#endif
43766 uvesafb_vbe_getpmi(task, par);
43767 - }
43768 }
43769 #else
43770 /* The protected mode interface is not available on non-x86. */
43771 @@ -1838,6 +1862,11 @@ out:
43772 if (par->vbe_modes)
43773 kfree(par->vbe_modes);
43774
43775 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43776 + if (par->pmi_code)
43777 + module_free_exec(NULL, par->pmi_code);
43778 +#endif
43779 +
43780 framebuffer_release(info);
43781 return err;
43782 }
43783 @@ -1864,6 +1893,12 @@ static int uvesafb_remove(struct platform_device *dev)
43784 kfree(par->vbe_state_orig);
43785 if (par->vbe_state_saved)
43786 kfree(par->vbe_state_saved);
43787 +
43788 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43789 + if (par->pmi_code)
43790 + module_free_exec(NULL, par->pmi_code);
43791 +#endif
43792 +
43793 }
43794
43795 framebuffer_release(info);
43796 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
43797 index 501b340..86bd4cf 100644
43798 --- a/drivers/video/vesafb.c
43799 +++ b/drivers/video/vesafb.c
43800 @@ -9,6 +9,7 @@
43801 */
43802
43803 #include <linux/module.h>
43804 +#include <linux/moduleloader.h>
43805 #include <linux/kernel.h>
43806 #include <linux/errno.h>
43807 #include <linux/string.h>
43808 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
43809 static int vram_total __initdata; /* Set total amount of memory */
43810 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
43811 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
43812 -static void (*pmi_start)(void) __read_mostly;
43813 -static void (*pmi_pal) (void) __read_mostly;
43814 +static void (*pmi_start)(void) __read_only;
43815 +static void (*pmi_pal) (void) __read_only;
43816 static int depth __read_mostly;
43817 static int vga_compat __read_mostly;
43818 /* --------------------------------------------------------------------- */
43819 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
43820 unsigned int size_vmode;
43821 unsigned int size_remap;
43822 unsigned int size_total;
43823 + void *pmi_code = NULL;
43824
43825 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
43826 return -ENODEV;
43827 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
43828 size_remap = size_total;
43829 vesafb_fix.smem_len = size_remap;
43830
43831 -#ifndef __i386__
43832 - screen_info.vesapm_seg = 0;
43833 -#endif
43834 -
43835 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
43836 printk(KERN_WARNING
43837 "vesafb: cannot reserve video memory at 0x%lx\n",
43838 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
43839 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
43840 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
43841
43842 +#ifdef __i386__
43843 +
43844 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43845 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
43846 + if (!pmi_code)
43847 +#elif !defined(CONFIG_PAX_KERNEXEC)
43848 + if (0)
43849 +#endif
43850 +
43851 +#endif
43852 + screen_info.vesapm_seg = 0;
43853 +
43854 if (screen_info.vesapm_seg) {
43855 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
43856 - screen_info.vesapm_seg,screen_info.vesapm_off);
43857 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
43858 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
43859 }
43860
43861 if (screen_info.vesapm_seg < 0xc000)
43862 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
43863
43864 if (ypan || pmi_setpal) {
43865 unsigned short *pmi_base;
43866 +
43867 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
43868 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
43869 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
43870 +
43871 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43872 + pax_open_kernel();
43873 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
43874 +#else
43875 + pmi_code = pmi_base;
43876 +#endif
43877 +
43878 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
43879 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
43880 +
43881 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43882 + pmi_start = ktva_ktla(pmi_start);
43883 + pmi_pal = ktva_ktla(pmi_pal);
43884 + pax_close_kernel();
43885 +#endif
43886 +
43887 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
43888 if (pmi_base[3]) {
43889 printk(KERN_INFO "vesafb: pmi: ports = ");
43890 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
43891 info->node, info->fix.id);
43892 return 0;
43893 err:
43894 +
43895 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
43896 + module_free_exec(NULL, pmi_code);
43897 +#endif
43898 +
43899 if (info->screen_base)
43900 iounmap(info->screen_base);
43901 framebuffer_release(info);
43902 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
43903 index 88714ae..16c2e11 100644
43904 --- a/drivers/video/via/via_clock.h
43905 +++ b/drivers/video/via/via_clock.h
43906 @@ -56,7 +56,7 @@ struct via_clock {
43907
43908 void (*set_engine_pll_state)(u8 state);
43909 void (*set_engine_pll)(struct via_pll_config config);
43910 -};
43911 +} __no_const;
43912
43913
43914 static inline u32 get_pll_internal_frequency(u32 ref_freq,
43915 diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
43916 index 6b1b7e1..b2fa4d5 100644
43917 --- a/drivers/virtio/virtio_mmio.c
43918 +++ b/drivers/virtio/virtio_mmio.c
43919 @@ -530,7 +530,7 @@ static int vm_cmdline_set(const char *device,
43920
43921 resources[0].end = memparse(device, &str) - 1;
43922
43923 - processed = sscanf(str, "@%lli:%u%n:%d%n",
43924 + processed = sscanf(str, "@%lli:%llu%n:%d%n",
43925 &base, &resources[1].start, &consumed,
43926 &vm_cmdline_id, &consumed);
43927
43928 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
43929 index e56c934..fc22f4b 100644
43930 --- a/drivers/xen/xen-pciback/conf_space.h
43931 +++ b/drivers/xen/xen-pciback/conf_space.h
43932 @@ -44,15 +44,15 @@ struct config_field {
43933 struct {
43934 conf_dword_write write;
43935 conf_dword_read read;
43936 - } dw;
43937 + } __no_const dw;
43938 struct {
43939 conf_word_write write;
43940 conf_word_read read;
43941 - } w;
43942 + } __no_const w;
43943 struct {
43944 conf_byte_write write;
43945 conf_byte_read read;
43946 - } b;
43947 + } __no_const b;
43948 } u;
43949 struct list_head list;
43950 };
43951 diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
43952 index fef20db..d28b1ab 100644
43953 --- a/drivers/xen/xenfs/xenstored.c
43954 +++ b/drivers/xen/xenfs/xenstored.c
43955 @@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
43956 static int xsd_kva_open(struct inode *inode, struct file *file)
43957 {
43958 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
43959 +#ifdef CONFIG_GRKERNSEC_HIDESYM
43960 + NULL);
43961 +#else
43962 xen_store_interface);
43963 +#endif
43964 +
43965 if (!file->private_data)
43966 return -ENOMEM;
43967 return 0;
43968 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
43969 index 890bed5..17ae73e 100644
43970 --- a/fs/9p/vfs_inode.c
43971 +++ b/fs/9p/vfs_inode.c
43972 @@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43973 void
43974 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43975 {
43976 - char *s = nd_get_link(nd);
43977 + const char *s = nd_get_link(nd);
43978
43979 p9_debug(P9_DEBUG_VFS, " %s %s\n",
43980 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
43981 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
43982 index 0efd152..b5802ad 100644
43983 --- a/fs/Kconfig.binfmt
43984 +++ b/fs/Kconfig.binfmt
43985 @@ -89,7 +89,7 @@ config HAVE_AOUT
43986
43987 config BINFMT_AOUT
43988 tristate "Kernel support for a.out and ECOFF binaries"
43989 - depends on HAVE_AOUT
43990 + depends on HAVE_AOUT && BROKEN
43991 ---help---
43992 A.out (Assembler.OUTput) is a set of formats for libraries and
43993 executables used in the earliest versions of UNIX. Linux used
43994 diff --git a/fs/aio.c b/fs/aio.c
43995 index 71f613c..9d01f1f 100644
43996 --- a/fs/aio.c
43997 +++ b/fs/aio.c
43998 @@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
43999 size += sizeof(struct io_event) * nr_events;
44000 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
44001
44002 - if (nr_pages < 0)
44003 + if (nr_pages <= 0)
44004 return -EINVAL;
44005
44006 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
44007 @@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
44008 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44009 {
44010 ssize_t ret;
44011 + struct iovec iovstack;
44012
44013 #ifdef CONFIG_COMPAT
44014 if (compat)
44015 ret = compat_rw_copy_check_uvector(type,
44016 (struct compat_iovec __user *)kiocb->ki_buf,
44017 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44018 + kiocb->ki_nbytes, 1, &iovstack,
44019 &kiocb->ki_iovec);
44020 else
44021 #endif
44022 ret = rw_copy_check_uvector(type,
44023 (struct iovec __user *)kiocb->ki_buf,
44024 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44025 + kiocb->ki_nbytes, 1, &iovstack,
44026 &kiocb->ki_iovec);
44027 if (ret < 0)
44028 goto out;
44029 @@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44030 if (ret < 0)
44031 goto out;
44032
44033 + if (kiocb->ki_iovec == &iovstack) {
44034 + kiocb->ki_inline_vec = iovstack;
44035 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
44036 + }
44037 kiocb->ki_nr_segs = kiocb->ki_nbytes;
44038 kiocb->ki_cur_seg = 0;
44039 /* ki_nbytes/left now reflect bytes instead of segs */
44040 diff --git a/fs/attr.c b/fs/attr.c
44041 index cce7df5..eaa2731 100644
44042 --- a/fs/attr.c
44043 +++ b/fs/attr.c
44044 @@ -100,6 +100,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
44045 unsigned long limit;
44046
44047 limit = rlimit(RLIMIT_FSIZE);
44048 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
44049 if (limit != RLIM_INFINITY && offset > limit)
44050 goto out_sig;
44051 if (offset > inode->i_sb->s_maxbytes)
44052 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
44053 index dce436e..55e670d 100644
44054 --- a/fs/autofs4/waitq.c
44055 +++ b/fs/autofs4/waitq.c
44056 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
44057 {
44058 unsigned long sigpipe, flags;
44059 mm_segment_t fs;
44060 - const char *data = (const char *)addr;
44061 + const char __user *data = (const char __force_user *)addr;
44062 ssize_t wr = 0;
44063
44064 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
44065 @@ -347,6 +347,10 @@ static int validate_request(struct autofs_wait_queue **wait,
44066 return 1;
44067 }
44068
44069 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44070 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
44071 +#endif
44072 +
44073 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44074 enum autofs_notify notify)
44075 {
44076 @@ -380,7 +384,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44077
44078 /* If this is a direct mount request create a dummy name */
44079 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
44080 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44081 + /* this name does get written to userland via autofs4_write() */
44082 + qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
44083 +#else
44084 qstr.len = sprintf(name, "%p", dentry);
44085 +#endif
44086 else {
44087 qstr.len = autofs4_getpath(sbi, dentry, &name);
44088 if (!qstr.len) {
44089 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
44090 index 2b3bda8..6a2d4be 100644
44091 --- a/fs/befs/linuxvfs.c
44092 +++ b/fs/befs/linuxvfs.c
44093 @@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44094 {
44095 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
44096 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
44097 - char *link = nd_get_link(nd);
44098 + const char *link = nd_get_link(nd);
44099 if (!IS_ERR(link))
44100 kfree(link);
44101 }
44102 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
44103 index 0e7a6f8..332b1ca 100644
44104 --- a/fs/binfmt_aout.c
44105 +++ b/fs/binfmt_aout.c
44106 @@ -16,6 +16,7 @@
44107 #include <linux/string.h>
44108 #include <linux/fs.h>
44109 #include <linux/file.h>
44110 +#include <linux/security.h>
44111 #include <linux/stat.h>
44112 #include <linux/fcntl.h>
44113 #include <linux/ptrace.h>
44114 @@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
44115 #endif
44116 # define START_STACK(u) ((void __user *)u.start_stack)
44117
44118 + memset(&dump, 0, sizeof(dump));
44119 +
44120 fs = get_fs();
44121 set_fs(KERNEL_DS);
44122 has_dumped = 1;
44123 @@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
44124
44125 /* If the size of the dump file exceeds the rlimit, then see what would happen
44126 if we wrote the stack, but not the data area. */
44127 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
44128 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
44129 dump.u_dsize = 0;
44130
44131 /* Make sure we have enough room to write the stack and data areas. */
44132 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
44133 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
44134 dump.u_ssize = 0;
44135
44136 @@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44137 rlim = rlimit(RLIMIT_DATA);
44138 if (rlim >= RLIM_INFINITY)
44139 rlim = ~0;
44140 +
44141 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
44142 if (ex.a_data + ex.a_bss > rlim)
44143 return -ENOMEM;
44144
44145 @@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44146
44147 install_exec_creds(bprm);
44148
44149 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44150 + current->mm->pax_flags = 0UL;
44151 +#endif
44152 +
44153 +#ifdef CONFIG_PAX_PAGEEXEC
44154 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
44155 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
44156 +
44157 +#ifdef CONFIG_PAX_EMUTRAMP
44158 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
44159 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
44160 +#endif
44161 +
44162 +#ifdef CONFIG_PAX_MPROTECT
44163 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
44164 + current->mm->pax_flags |= MF_PAX_MPROTECT;
44165 +#endif
44166 +
44167 + }
44168 +#endif
44169 +
44170 if (N_MAGIC(ex) == OMAGIC) {
44171 unsigned long text_addr, map_size;
44172 loff_t pos;
44173 @@ -332,7 +360,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44174 }
44175
44176 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
44177 - PROT_READ | PROT_WRITE | PROT_EXEC,
44178 + PROT_READ | PROT_WRITE,
44179 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
44180 fd_offset + ex.a_text);
44181 if (error != N_DATADDR(ex)) {
44182 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
44183 index fbd9f60..d4edac0 100644
44184 --- a/fs/binfmt_elf.c
44185 +++ b/fs/binfmt_elf.c
44186 @@ -33,6 +33,7 @@
44187 #include <linux/elf.h>
44188 #include <linux/utsname.h>
44189 #include <linux/coredump.h>
44190 +#include <linux/xattr.h>
44191 #include <asm/uaccess.h>
44192 #include <asm/param.h>
44193 #include <asm/page.h>
44194 @@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
44195 #define elf_core_dump NULL
44196 #endif
44197
44198 +#ifdef CONFIG_PAX_MPROTECT
44199 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
44200 +#endif
44201 +
44202 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
44203 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
44204 #else
44205 @@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
44206 .load_binary = load_elf_binary,
44207 .load_shlib = load_elf_library,
44208 .core_dump = elf_core_dump,
44209 +
44210 +#ifdef CONFIG_PAX_MPROTECT
44211 + .handle_mprotect= elf_handle_mprotect,
44212 +#endif
44213 +
44214 .min_coredump = ELF_EXEC_PAGESIZE,
44215 };
44216
44217 @@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
44218
44219 static int set_brk(unsigned long start, unsigned long end)
44220 {
44221 + unsigned long e = end;
44222 +
44223 start = ELF_PAGEALIGN(start);
44224 end = ELF_PAGEALIGN(end);
44225 if (end > start) {
44226 @@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
44227 if (BAD_ADDR(addr))
44228 return addr;
44229 }
44230 - current->mm->start_brk = current->mm->brk = end;
44231 + current->mm->start_brk = current->mm->brk = e;
44232 return 0;
44233 }
44234
44235 @@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44236 elf_addr_t __user *u_rand_bytes;
44237 const char *k_platform = ELF_PLATFORM;
44238 const char *k_base_platform = ELF_BASE_PLATFORM;
44239 - unsigned char k_rand_bytes[16];
44240 + u32 k_rand_bytes[4];
44241 int items;
44242 elf_addr_t *elf_info;
44243 int ei_index = 0;
44244 const struct cred *cred = current_cred();
44245 struct vm_area_struct *vma;
44246 + unsigned long saved_auxv[AT_VECTOR_SIZE];
44247
44248 /*
44249 * In some cases (e.g. Hyper-Threading), we want to avoid L1
44250 @@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44251 * Generate 16 random bytes for userspace PRNG seeding.
44252 */
44253 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
44254 - u_rand_bytes = (elf_addr_t __user *)
44255 - STACK_ALLOC(p, sizeof(k_rand_bytes));
44256 + srandom32(k_rand_bytes[0] ^ random32());
44257 + srandom32(k_rand_bytes[1] ^ random32());
44258 + srandom32(k_rand_bytes[2] ^ random32());
44259 + srandom32(k_rand_bytes[3] ^ random32());
44260 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
44261 + u_rand_bytes = (elf_addr_t __user *) p;
44262 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
44263 return -EFAULT;
44264
44265 @@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44266 return -EFAULT;
44267 current->mm->env_end = p;
44268
44269 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
44270 +
44271 /* Put the elf_info on the stack in the right place. */
44272 sp = (elf_addr_t __user *)envp + 1;
44273 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
44274 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
44275 return -EFAULT;
44276 return 0;
44277 }
44278 @@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
44279 an ELF header */
44280
44281 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44282 - struct file *interpreter, unsigned long *interp_map_addr,
44283 - unsigned long no_base)
44284 + struct file *interpreter, unsigned long no_base)
44285 {
44286 struct elf_phdr *elf_phdata;
44287 struct elf_phdr *eppnt;
44288 - unsigned long load_addr = 0;
44289 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
44290 int load_addr_set = 0;
44291 unsigned long last_bss = 0, elf_bss = 0;
44292 - unsigned long error = ~0UL;
44293 + unsigned long error = -EINVAL;
44294 unsigned long total_size;
44295 int retval, i, size;
44296
44297 @@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44298 goto out_close;
44299 }
44300
44301 +#ifdef CONFIG_PAX_SEGMEXEC
44302 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
44303 + pax_task_size = SEGMEXEC_TASK_SIZE;
44304 +#endif
44305 +
44306 eppnt = elf_phdata;
44307 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
44308 if (eppnt->p_type == PT_LOAD) {
44309 @@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44310 map_addr = elf_map(interpreter, load_addr + vaddr,
44311 eppnt, elf_prot, elf_type, total_size);
44312 total_size = 0;
44313 - if (!*interp_map_addr)
44314 - *interp_map_addr = map_addr;
44315 error = map_addr;
44316 if (BAD_ADDR(map_addr))
44317 goto out_close;
44318 @@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44319 k = load_addr + eppnt->p_vaddr;
44320 if (BAD_ADDR(k) ||
44321 eppnt->p_filesz > eppnt->p_memsz ||
44322 - eppnt->p_memsz > TASK_SIZE ||
44323 - TASK_SIZE - eppnt->p_memsz < k) {
44324 + eppnt->p_memsz > pax_task_size ||
44325 + pax_task_size - eppnt->p_memsz < k) {
44326 error = -ENOMEM;
44327 goto out_close;
44328 }
44329 @@ -530,6 +551,315 @@ out:
44330 return error;
44331 }
44332
44333 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
44334 +#ifdef CONFIG_PAX_SOFTMODE
44335 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
44336 +{
44337 + unsigned long pax_flags = 0UL;
44338 +
44339 +#ifdef CONFIG_PAX_PAGEEXEC
44340 + if (elf_phdata->p_flags & PF_PAGEEXEC)
44341 + pax_flags |= MF_PAX_PAGEEXEC;
44342 +#endif
44343 +
44344 +#ifdef CONFIG_PAX_SEGMEXEC
44345 + if (elf_phdata->p_flags & PF_SEGMEXEC)
44346 + pax_flags |= MF_PAX_SEGMEXEC;
44347 +#endif
44348 +
44349 +#ifdef CONFIG_PAX_EMUTRAMP
44350 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
44351 + pax_flags |= MF_PAX_EMUTRAMP;
44352 +#endif
44353 +
44354 +#ifdef CONFIG_PAX_MPROTECT
44355 + if (elf_phdata->p_flags & PF_MPROTECT)
44356 + pax_flags |= MF_PAX_MPROTECT;
44357 +#endif
44358 +
44359 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44360 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
44361 + pax_flags |= MF_PAX_RANDMMAP;
44362 +#endif
44363 +
44364 + return pax_flags;
44365 +}
44366 +#endif
44367 +
44368 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
44369 +{
44370 + unsigned long pax_flags = 0UL;
44371 +
44372 +#ifdef CONFIG_PAX_PAGEEXEC
44373 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
44374 + pax_flags |= MF_PAX_PAGEEXEC;
44375 +#endif
44376 +
44377 +#ifdef CONFIG_PAX_SEGMEXEC
44378 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
44379 + pax_flags |= MF_PAX_SEGMEXEC;
44380 +#endif
44381 +
44382 +#ifdef CONFIG_PAX_EMUTRAMP
44383 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
44384 + pax_flags |= MF_PAX_EMUTRAMP;
44385 +#endif
44386 +
44387 +#ifdef CONFIG_PAX_MPROTECT
44388 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
44389 + pax_flags |= MF_PAX_MPROTECT;
44390 +#endif
44391 +
44392 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44393 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
44394 + pax_flags |= MF_PAX_RANDMMAP;
44395 +#endif
44396 +
44397 + return pax_flags;
44398 +}
44399 +#endif
44400 +
44401 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44402 +#ifdef CONFIG_PAX_SOFTMODE
44403 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
44404 +{
44405 + unsigned long pax_flags = 0UL;
44406 +
44407 +#ifdef CONFIG_PAX_PAGEEXEC
44408 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
44409 + pax_flags |= MF_PAX_PAGEEXEC;
44410 +#endif
44411 +
44412 +#ifdef CONFIG_PAX_SEGMEXEC
44413 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
44414 + pax_flags |= MF_PAX_SEGMEXEC;
44415 +#endif
44416 +
44417 +#ifdef CONFIG_PAX_EMUTRAMP
44418 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
44419 + pax_flags |= MF_PAX_EMUTRAMP;
44420 +#endif
44421 +
44422 +#ifdef CONFIG_PAX_MPROTECT
44423 + if (pax_flags_softmode & MF_PAX_MPROTECT)
44424 + pax_flags |= MF_PAX_MPROTECT;
44425 +#endif
44426 +
44427 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44428 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
44429 + pax_flags |= MF_PAX_RANDMMAP;
44430 +#endif
44431 +
44432 + return pax_flags;
44433 +}
44434 +#endif
44435 +
44436 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
44437 +{
44438 + unsigned long pax_flags = 0UL;
44439 +
44440 +#ifdef CONFIG_PAX_PAGEEXEC
44441 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
44442 + pax_flags |= MF_PAX_PAGEEXEC;
44443 +#endif
44444 +
44445 +#ifdef CONFIG_PAX_SEGMEXEC
44446 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
44447 + pax_flags |= MF_PAX_SEGMEXEC;
44448 +#endif
44449 +
44450 +#ifdef CONFIG_PAX_EMUTRAMP
44451 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
44452 + pax_flags |= MF_PAX_EMUTRAMP;
44453 +#endif
44454 +
44455 +#ifdef CONFIG_PAX_MPROTECT
44456 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
44457 + pax_flags |= MF_PAX_MPROTECT;
44458 +#endif
44459 +
44460 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44461 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
44462 + pax_flags |= MF_PAX_RANDMMAP;
44463 +#endif
44464 +
44465 + return pax_flags;
44466 +}
44467 +#endif
44468 +
44469 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44470 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
44471 +{
44472 + unsigned long pax_flags = 0UL;
44473 +
44474 +#ifdef CONFIG_PAX_EI_PAX
44475 +
44476 +#ifdef CONFIG_PAX_PAGEEXEC
44477 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
44478 + pax_flags |= MF_PAX_PAGEEXEC;
44479 +#endif
44480 +
44481 +#ifdef CONFIG_PAX_SEGMEXEC
44482 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
44483 + pax_flags |= MF_PAX_SEGMEXEC;
44484 +#endif
44485 +
44486 +#ifdef CONFIG_PAX_EMUTRAMP
44487 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
44488 + pax_flags |= MF_PAX_EMUTRAMP;
44489 +#endif
44490 +
44491 +#ifdef CONFIG_PAX_MPROTECT
44492 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
44493 + pax_flags |= MF_PAX_MPROTECT;
44494 +#endif
44495 +
44496 +#ifdef CONFIG_PAX_ASLR
44497 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
44498 + pax_flags |= MF_PAX_RANDMMAP;
44499 +#endif
44500 +
44501 +#else
44502 +
44503 +#ifdef CONFIG_PAX_PAGEEXEC
44504 + pax_flags |= MF_PAX_PAGEEXEC;
44505 +#endif
44506 +
44507 +#ifdef CONFIG_PAX_SEGMEXEC
44508 + pax_flags |= MF_PAX_SEGMEXEC;
44509 +#endif
44510 +
44511 +#ifdef CONFIG_PAX_MPROTECT
44512 + pax_flags |= MF_PAX_MPROTECT;
44513 +#endif
44514 +
44515 +#ifdef CONFIG_PAX_RANDMMAP
44516 + if (randomize_va_space)
44517 + pax_flags |= MF_PAX_RANDMMAP;
44518 +#endif
44519 +
44520 +#endif
44521 +
44522 + return pax_flags;
44523 +}
44524 +
44525 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
44526 +{
44527 +
44528 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
44529 + unsigned long i;
44530 +
44531 + for (i = 0UL; i < elf_ex->e_phnum; i++)
44532 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
44533 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
44534 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
44535 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
44536 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
44537 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
44538 + return ~0UL;
44539 +
44540 +#ifdef CONFIG_PAX_SOFTMODE
44541 + if (pax_softmode)
44542 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
44543 + else
44544 +#endif
44545 +
44546 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
44547 + break;
44548 + }
44549 +#endif
44550 +
44551 + return ~0UL;
44552 +}
44553 +
44554 +static unsigned long pax_parse_xattr_pax(struct file * const file)
44555 +{
44556 +
44557 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44558 + ssize_t xattr_size, i;
44559 + unsigned char xattr_value[5];
44560 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
44561 +
44562 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
44563 + if (xattr_size <= 0 || xattr_size > 5)
44564 + return ~0UL;
44565 +
44566 + for (i = 0; i < xattr_size; i++)
44567 + switch (xattr_value[i]) {
44568 + default:
44569 + return ~0UL;
44570 +
44571 +#define parse_flag(option1, option2, flag) \
44572 + case option1: \
44573 + if (pax_flags_hardmode & MF_PAX_##flag) \
44574 + return ~0UL; \
44575 + pax_flags_hardmode |= MF_PAX_##flag; \
44576 + break; \
44577 + case option2: \
44578 + if (pax_flags_softmode & MF_PAX_##flag) \
44579 + return ~0UL; \
44580 + pax_flags_softmode |= MF_PAX_##flag; \
44581 + break;
44582 +
44583 + parse_flag('p', 'P', PAGEEXEC);
44584 + parse_flag('e', 'E', EMUTRAMP);
44585 + parse_flag('m', 'M', MPROTECT);
44586 + parse_flag('r', 'R', RANDMMAP);
44587 + parse_flag('s', 'S', SEGMEXEC);
44588 +
44589 +#undef parse_flag
44590 + }
44591 +
44592 + if (pax_flags_hardmode & pax_flags_softmode)
44593 + return ~0UL;
44594 +
44595 +#ifdef CONFIG_PAX_SOFTMODE
44596 + if (pax_softmode)
44597 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
44598 + else
44599 +#endif
44600 +
44601 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
44602 +#else
44603 + return ~0UL;
44604 +#endif
44605 +
44606 +}
44607 +
44608 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
44609 +{
44610 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
44611 +
44612 + pax_flags = pax_parse_ei_pax(elf_ex);
44613 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
44614 + xattr_pax_flags = pax_parse_xattr_pax(file);
44615 +
44616 + if (pt_pax_flags == ~0UL)
44617 + pt_pax_flags = xattr_pax_flags;
44618 + else if (xattr_pax_flags == ~0UL)
44619 + xattr_pax_flags = pt_pax_flags;
44620 + if (pt_pax_flags != xattr_pax_flags)
44621 + return -EINVAL;
44622 + if (pt_pax_flags != ~0UL)
44623 + pax_flags = pt_pax_flags;
44624 +
44625 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
44626 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44627 + if ((__supported_pte_mask & _PAGE_NX))
44628 + pax_flags &= ~MF_PAX_SEGMEXEC;
44629 + else
44630 + pax_flags &= ~MF_PAX_PAGEEXEC;
44631 + }
44632 +#endif
44633 +
44634 + if (0 > pax_check_flags(&pax_flags))
44635 + return -EINVAL;
44636 +
44637 + current->mm->pax_flags = pax_flags;
44638 + return 0;
44639 +}
44640 +#endif
44641 +
44642 /*
44643 * These are the functions used to load ELF style executables and shared
44644 * libraries. There is no binary dependent code anywhere else.
44645 @@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
44646 {
44647 unsigned int random_variable = 0;
44648
44649 +#ifdef CONFIG_PAX_RANDUSTACK
44650 + if (randomize_va_space)
44651 + return stack_top - current->mm->delta_stack;
44652 +#endif
44653 +
44654 if ((current->flags & PF_RANDOMIZE) &&
44655 !(current->personality & ADDR_NO_RANDOMIZE)) {
44656 random_variable = get_random_int() & STACK_RND_MASK;
44657 @@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44658 unsigned long load_addr = 0, load_bias = 0;
44659 int load_addr_set = 0;
44660 char * elf_interpreter = NULL;
44661 - unsigned long error;
44662 + unsigned long error = 0;
44663 struct elf_phdr *elf_ppnt, *elf_phdata;
44664 unsigned long elf_bss, elf_brk;
44665 int retval, i;
44666 @@ -574,11 +909,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44667 unsigned long start_code, end_code, start_data, end_data;
44668 unsigned long reloc_func_desc __maybe_unused = 0;
44669 int executable_stack = EXSTACK_DEFAULT;
44670 - unsigned long def_flags = 0;
44671 struct {
44672 struct elfhdr elf_ex;
44673 struct elfhdr interp_elf_ex;
44674 } *loc;
44675 + unsigned long pax_task_size = TASK_SIZE;
44676
44677 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
44678 if (!loc) {
44679 @@ -714,11 +1049,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44680 goto out_free_dentry;
44681
44682 /* OK, This is the point of no return */
44683 - current->mm->def_flags = def_flags;
44684 +
44685 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44686 + current->mm->pax_flags = 0UL;
44687 +#endif
44688 +
44689 +#ifdef CONFIG_PAX_DLRESOLVE
44690 + current->mm->call_dl_resolve = 0UL;
44691 +#endif
44692 +
44693 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
44694 + current->mm->call_syscall = 0UL;
44695 +#endif
44696 +
44697 +#ifdef CONFIG_PAX_ASLR
44698 + current->mm->delta_mmap = 0UL;
44699 + current->mm->delta_stack = 0UL;
44700 +#endif
44701 +
44702 + current->mm->def_flags = 0;
44703 +
44704 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44705 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
44706 + send_sig(SIGKILL, current, 0);
44707 + goto out_free_dentry;
44708 + }
44709 +#endif
44710 +
44711 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44712 + pax_set_initial_flags(bprm);
44713 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
44714 + if (pax_set_initial_flags_func)
44715 + (pax_set_initial_flags_func)(bprm);
44716 +#endif
44717 +
44718 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44719 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
44720 + current->mm->context.user_cs_limit = PAGE_SIZE;
44721 + current->mm->def_flags |= VM_PAGEEXEC;
44722 + }
44723 +#endif
44724 +
44725 +#ifdef CONFIG_PAX_SEGMEXEC
44726 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
44727 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
44728 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
44729 + pax_task_size = SEGMEXEC_TASK_SIZE;
44730 + current->mm->def_flags |= VM_NOHUGEPAGE;
44731 + }
44732 +#endif
44733 +
44734 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
44735 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44736 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
44737 + put_cpu();
44738 + }
44739 +#endif
44740
44741 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
44742 may depend on the personality. */
44743 SET_PERSONALITY(loc->elf_ex);
44744 +
44745 +#ifdef CONFIG_PAX_ASLR
44746 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
44747 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
44748 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
44749 + }
44750 +#endif
44751 +
44752 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44753 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44754 + executable_stack = EXSTACK_DISABLE_X;
44755 + current->personality &= ~READ_IMPLIES_EXEC;
44756 + } else
44757 +#endif
44758 +
44759 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
44760 current->personality |= READ_IMPLIES_EXEC;
44761
44762 @@ -809,6 +1214,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44763 #else
44764 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
44765 #endif
44766 +
44767 +#ifdef CONFIG_PAX_RANDMMAP
44768 + /* PaX: randomize base address at the default exe base if requested */
44769 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
44770 +#ifdef CONFIG_SPARC64
44771 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
44772 +#else
44773 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
44774 +#endif
44775 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
44776 + elf_flags |= MAP_FIXED;
44777 + }
44778 +#endif
44779 +
44780 }
44781
44782 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
44783 @@ -841,9 +1260,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44784 * allowed task size. Note that p_filesz must always be
44785 * <= p_memsz so it is only necessary to check p_memsz.
44786 */
44787 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
44788 - elf_ppnt->p_memsz > TASK_SIZE ||
44789 - TASK_SIZE - elf_ppnt->p_memsz < k) {
44790 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
44791 + elf_ppnt->p_memsz > pax_task_size ||
44792 + pax_task_size - elf_ppnt->p_memsz < k) {
44793 /* set_brk can never work. Avoid overflows. */
44794 send_sig(SIGKILL, current, 0);
44795 retval = -EINVAL;
44796 @@ -882,17 +1301,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44797 goto out_free_dentry;
44798 }
44799 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
44800 - send_sig(SIGSEGV, current, 0);
44801 - retval = -EFAULT; /* Nobody gets to see this, but.. */
44802 - goto out_free_dentry;
44803 + /*
44804 + * This bss-zeroing can fail if the ELF
44805 + * file specifies odd protections. So
44806 + * we don't check the return value
44807 + */
44808 }
44809
44810 +#ifdef CONFIG_PAX_RANDMMAP
44811 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
44812 + unsigned long start, size;
44813 +
44814 + start = ELF_PAGEALIGN(elf_brk);
44815 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
44816 + down_read(&current->mm->mmap_sem);
44817 + retval = -ENOMEM;
44818 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
44819 + unsigned long prot = PROT_NONE;
44820 +
44821 + up_read(&current->mm->mmap_sem);
44822 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
44823 +// if (current->personality & ADDR_NO_RANDOMIZE)
44824 +// prot = PROT_READ;
44825 + start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
44826 + retval = IS_ERR_VALUE(start) ? start : 0;
44827 + } else
44828 + up_read(&current->mm->mmap_sem);
44829 + if (retval == 0)
44830 + retval = set_brk(start + size, start + size + PAGE_SIZE);
44831 + if (retval < 0) {
44832 + send_sig(SIGKILL, current, 0);
44833 + goto out_free_dentry;
44834 + }
44835 + }
44836 +#endif
44837 +
44838 if (elf_interpreter) {
44839 - unsigned long interp_map_addr = 0;
44840 -
44841 elf_entry = load_elf_interp(&loc->interp_elf_ex,
44842 interpreter,
44843 - &interp_map_addr,
44844 load_bias);
44845 if (!IS_ERR((void *)elf_entry)) {
44846 /*
44847 @@ -1114,7 +1560,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
44848 * Decide what to dump of a segment, part, all or none.
44849 */
44850 static unsigned long vma_dump_size(struct vm_area_struct *vma,
44851 - unsigned long mm_flags)
44852 + unsigned long mm_flags, long signr)
44853 {
44854 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
44855
44856 @@ -1151,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
44857 if (vma->vm_file == NULL)
44858 return 0;
44859
44860 - if (FILTER(MAPPED_PRIVATE))
44861 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
44862 goto whole;
44863
44864 /*
44865 @@ -1373,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
44866 {
44867 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
44868 int i = 0;
44869 - do
44870 + do {
44871 i += 2;
44872 - while (auxv[i - 2] != AT_NULL);
44873 + } while (auxv[i - 2] != AT_NULL);
44874 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
44875 }
44876
44877 @@ -2003,14 +2449,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
44878 }
44879
44880 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
44881 - unsigned long mm_flags)
44882 + struct coredump_params *cprm)
44883 {
44884 struct vm_area_struct *vma;
44885 size_t size = 0;
44886
44887 for (vma = first_vma(current, gate_vma); vma != NULL;
44888 vma = next_vma(vma, gate_vma))
44889 - size += vma_dump_size(vma, mm_flags);
44890 + size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
44891 return size;
44892 }
44893
44894 @@ -2104,7 +2550,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44895
44896 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
44897
44898 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
44899 + offset += elf_core_vma_data_size(gate_vma, cprm);
44900 offset += elf_core_extra_data_size();
44901 e_shoff = offset;
44902
44903 @@ -2118,10 +2564,12 @@ static int elf_core_dump(struct coredump_params *cprm)
44904 offset = dataoff;
44905
44906 size += sizeof(*elf);
44907 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
44908 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
44909 goto end_coredump;
44910
44911 size += sizeof(*phdr4note);
44912 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
44913 if (size > cprm->limit
44914 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
44915 goto end_coredump;
44916 @@ -2135,7 +2583,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44917 phdr.p_offset = offset;
44918 phdr.p_vaddr = vma->vm_start;
44919 phdr.p_paddr = 0;
44920 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
44921 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
44922 phdr.p_memsz = vma->vm_end - vma->vm_start;
44923 offset += phdr.p_filesz;
44924 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
44925 @@ -2146,6 +2594,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44926 phdr.p_align = ELF_EXEC_PAGESIZE;
44927
44928 size += sizeof(phdr);
44929 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
44930 if (size > cprm->limit
44931 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
44932 goto end_coredump;
44933 @@ -2170,7 +2619,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44934 unsigned long addr;
44935 unsigned long end;
44936
44937 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
44938 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
44939
44940 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
44941 struct page *page;
44942 @@ -2179,6 +2628,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44943 page = get_dump_page(addr);
44944 if (page) {
44945 void *kaddr = kmap(page);
44946 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
44947 stop = ((size += PAGE_SIZE) > cprm->limit) ||
44948 !dump_write(cprm->file, kaddr,
44949 PAGE_SIZE);
44950 @@ -2196,6 +2646,7 @@ static int elf_core_dump(struct coredump_params *cprm)
44951
44952 if (e_phnum == PN_XNUM) {
44953 size += sizeof(*shdr4extnum);
44954 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
44955 if (size > cprm->limit
44956 || !dump_write(cprm->file, shdr4extnum,
44957 sizeof(*shdr4extnum)))
44958 @@ -2216,6 +2667,97 @@ out:
44959
44960 #endif /* CONFIG_ELF_CORE */
44961
44962 +#ifdef CONFIG_PAX_MPROTECT
44963 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
44964 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
44965 + * we'll remove VM_MAYWRITE for good on RELRO segments.
44966 + *
44967 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
44968 + * basis because we want to allow the common case and not the special ones.
44969 + */
44970 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
44971 +{
44972 + struct elfhdr elf_h;
44973 + struct elf_phdr elf_p;
44974 + unsigned long i;
44975 + unsigned long oldflags;
44976 + bool is_textrel_rw, is_textrel_rx, is_relro;
44977 +
44978 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
44979 + return;
44980 +
44981 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
44982 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
44983 +
44984 +#ifdef CONFIG_PAX_ELFRELOCS
44985 + /* possible TEXTREL */
44986 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
44987 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
44988 +#else
44989 + is_textrel_rw = false;
44990 + is_textrel_rx = false;
44991 +#endif
44992 +
44993 + /* possible RELRO */
44994 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
44995 +
44996 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
44997 + return;
44998 +
44999 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
45000 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
45001 +
45002 +#ifdef CONFIG_PAX_ETEXECRELOCS
45003 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45004 +#else
45005 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
45006 +#endif
45007 +
45008 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45009 + !elf_check_arch(&elf_h) ||
45010 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
45011 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
45012 + return;
45013 +
45014 + for (i = 0UL; i < elf_h.e_phnum; i++) {
45015 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
45016 + return;
45017 + switch (elf_p.p_type) {
45018 + case PT_DYNAMIC:
45019 + if (!is_textrel_rw && !is_textrel_rx)
45020 + continue;
45021 + i = 0UL;
45022 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
45023 + elf_dyn dyn;
45024 +
45025 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
45026 + return;
45027 + if (dyn.d_tag == DT_NULL)
45028 + return;
45029 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
45030 + gr_log_textrel(vma);
45031 + if (is_textrel_rw)
45032 + vma->vm_flags |= VM_MAYWRITE;
45033 + else
45034 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
45035 + vma->vm_flags &= ~VM_MAYWRITE;
45036 + return;
45037 + }
45038 + i++;
45039 + }
45040 + return;
45041 +
45042 + case PT_GNU_RELRO:
45043 + if (!is_relro)
45044 + continue;
45045 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
45046 + vma->vm_flags &= ~VM_MAYWRITE;
45047 + return;
45048 + }
45049 + }
45050 +}
45051 +#endif
45052 +
45053 static int __init init_elf_binfmt(void)
45054 {
45055 register_binfmt(&elf_format);
45056 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
45057 index e280352..7b2f231 100644
45058 --- a/fs/binfmt_flat.c
45059 +++ b/fs/binfmt_flat.c
45060 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
45061 realdatastart = (unsigned long) -ENOMEM;
45062 printk("Unable to allocate RAM for process data, errno %d\n",
45063 (int)-realdatastart);
45064 + down_write(&current->mm->mmap_sem);
45065 vm_munmap(textpos, text_len);
45066 + up_write(&current->mm->mmap_sem);
45067 ret = realdatastart;
45068 goto err;
45069 }
45070 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45071 }
45072 if (IS_ERR_VALUE(result)) {
45073 printk("Unable to read data+bss, errno %d\n", (int)-result);
45074 + down_write(&current->mm->mmap_sem);
45075 vm_munmap(textpos, text_len);
45076 vm_munmap(realdatastart, len);
45077 + up_write(&current->mm->mmap_sem);
45078 ret = result;
45079 goto err;
45080 }
45081 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45082 }
45083 if (IS_ERR_VALUE(result)) {
45084 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
45085 + down_write(&current->mm->mmap_sem);
45086 vm_munmap(textpos, text_len + data_len + extra +
45087 MAX_SHARED_LIBS * sizeof(unsigned long));
45088 + up_write(&current->mm->mmap_sem);
45089 ret = result;
45090 goto err;
45091 }
45092 diff --git a/fs/bio.c b/fs/bio.c
45093 index b96fc6c..431d628 100644
45094 --- a/fs/bio.c
45095 +++ b/fs/bio.c
45096 @@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
45097 /*
45098 * Overflow, abort
45099 */
45100 - if (end < start)
45101 + if (end < start || end - start > INT_MAX - nr_pages)
45102 return ERR_PTR(-EINVAL);
45103
45104 nr_pages += end - start;
45105 @@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
45106 /*
45107 * Overflow, abort
45108 */
45109 - if (end < start)
45110 + if (end < start || end - start > INT_MAX - nr_pages)
45111 return ERR_PTR(-EINVAL);
45112
45113 nr_pages += end - start;
45114 @@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
45115 const int read = bio_data_dir(bio) == READ;
45116 struct bio_map_data *bmd = bio->bi_private;
45117 int i;
45118 - char *p = bmd->sgvecs[0].iov_base;
45119 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
45120
45121 __bio_for_each_segment(bvec, bio, i, 0) {
45122 char *addr = page_address(bvec->bv_page);
45123 diff --git a/fs/block_dev.c b/fs/block_dev.c
45124 index ab3a456..7da538b 100644
45125 --- a/fs/block_dev.c
45126 +++ b/fs/block_dev.c
45127 @@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
45128 else if (bdev->bd_contains == bdev)
45129 return true; /* is a whole device which isn't held */
45130
45131 - else if (whole->bd_holder == bd_may_claim)
45132 + else if (whole->bd_holder == (void *)bd_may_claim)
45133 return true; /* is a partition of a device that is being partitioned */
45134 else if (whole->bd_holder != NULL)
45135 return false; /* is a partition of a held device */
45136 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
45137 index 5a3e45d..ad19cc3 100644
45138 --- a/fs/btrfs/check-integrity.c
45139 +++ b/fs/btrfs/check-integrity.c
45140 @@ -156,7 +156,7 @@ struct btrfsic_block {
45141 union {
45142 bio_end_io_t *bio;
45143 bh_end_io_t *bh;
45144 - } orig_bio_bh_end_io;
45145 + } __no_const orig_bio_bh_end_io;
45146 int submit_bio_bh_rw;
45147 u64 flush_gen; /* only valid if !never_written */
45148 };
45149 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
45150 index cdfb4c4..da736d4 100644
45151 --- a/fs/btrfs/ctree.c
45152 +++ b/fs/btrfs/ctree.c
45153 @@ -1035,9 +1035,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
45154 free_extent_buffer(buf);
45155 add_root_to_dirty_list(root);
45156 } else {
45157 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
45158 - parent_start = parent->start;
45159 - else
45160 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
45161 + if (parent)
45162 + parent_start = parent->start;
45163 + else
45164 + parent_start = 0;
45165 + } else
45166 parent_start = 0;
45167
45168 WARN_ON(trans->transid != btrfs_header_generation(parent));
45169 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
45170 index 95542a1..95a8727 100644
45171 --- a/fs/btrfs/inode.c
45172 +++ b/fs/btrfs/inode.c
45173 @@ -7243,7 +7243,7 @@ fail:
45174 return -ENOMEM;
45175 }
45176
45177 -static int btrfs_getattr(struct vfsmount *mnt,
45178 +int btrfs_getattr(struct vfsmount *mnt,
45179 struct dentry *dentry, struct kstat *stat)
45180 {
45181 struct inode *inode = dentry->d_inode;
45182 @@ -7257,6 +7257,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
45183 return 0;
45184 }
45185
45186 +EXPORT_SYMBOL(btrfs_getattr);
45187 +
45188 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
45189 +{
45190 + return BTRFS_I(inode)->root->anon_dev;
45191 +}
45192 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
45193 +
45194 /*
45195 * If a file is moved, it will inherit the cow and compression flags of the new
45196 * directory.
45197 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
45198 index 8fcf9a5..a200000 100644
45199 --- a/fs/btrfs/ioctl.c
45200 +++ b/fs/btrfs/ioctl.c
45201 @@ -2965,9 +2965,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45202 for (i = 0; i < num_types; i++) {
45203 struct btrfs_space_info *tmp;
45204
45205 + /* Don't copy in more than we allocated */
45206 if (!slot_count)
45207 break;
45208
45209 + slot_count--;
45210 +
45211 info = NULL;
45212 rcu_read_lock();
45213 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
45214 @@ -2989,10 +2992,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45215 memcpy(dest, &space, sizeof(space));
45216 dest++;
45217 space_args.total_spaces++;
45218 - slot_count--;
45219 }
45220 - if (!slot_count)
45221 - break;
45222 }
45223 up_read(&info->groups_sem);
45224 }
45225 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
45226 index 776f0aa..3aad281 100644
45227 --- a/fs/btrfs/relocation.c
45228 +++ b/fs/btrfs/relocation.c
45229 @@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
45230 }
45231 spin_unlock(&rc->reloc_root_tree.lock);
45232
45233 - BUG_ON((struct btrfs_root *)node->data != root);
45234 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
45235
45236 if (!del) {
45237 spin_lock(&rc->reloc_root_tree.lock);
45238 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
45239 index 622f469..e8d2d55 100644
45240 --- a/fs/cachefiles/bind.c
45241 +++ b/fs/cachefiles/bind.c
45242 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
45243 args);
45244
45245 /* start by checking things over */
45246 - ASSERT(cache->fstop_percent >= 0 &&
45247 - cache->fstop_percent < cache->fcull_percent &&
45248 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
45249 cache->fcull_percent < cache->frun_percent &&
45250 cache->frun_percent < 100);
45251
45252 - ASSERT(cache->bstop_percent >= 0 &&
45253 - cache->bstop_percent < cache->bcull_percent &&
45254 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
45255 cache->bcull_percent < cache->brun_percent &&
45256 cache->brun_percent < 100);
45257
45258 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
45259 index 0a1467b..6a53245 100644
45260 --- a/fs/cachefiles/daemon.c
45261 +++ b/fs/cachefiles/daemon.c
45262 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
45263 if (n > buflen)
45264 return -EMSGSIZE;
45265
45266 - if (copy_to_user(_buffer, buffer, n) != 0)
45267 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
45268 return -EFAULT;
45269
45270 return n;
45271 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
45272 if (test_bit(CACHEFILES_DEAD, &cache->flags))
45273 return -EIO;
45274
45275 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
45276 + if (datalen > PAGE_SIZE - 1)
45277 return -EOPNOTSUPP;
45278
45279 /* drag the command string into the kernel so we can parse it */
45280 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
45281 if (args[0] != '%' || args[1] != '\0')
45282 return -EINVAL;
45283
45284 - if (fstop < 0 || fstop >= cache->fcull_percent)
45285 + if (fstop >= cache->fcull_percent)
45286 return cachefiles_daemon_range_error(cache, args);
45287
45288 cache->fstop_percent = fstop;
45289 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
45290 if (args[0] != '%' || args[1] != '\0')
45291 return -EINVAL;
45292
45293 - if (bstop < 0 || bstop >= cache->bcull_percent)
45294 + if (bstop >= cache->bcull_percent)
45295 return cachefiles_daemon_range_error(cache, args);
45296
45297 cache->bstop_percent = bstop;
45298 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
45299 index bd6bc1b..b627b53 100644
45300 --- a/fs/cachefiles/internal.h
45301 +++ b/fs/cachefiles/internal.h
45302 @@ -57,7 +57,7 @@ struct cachefiles_cache {
45303 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
45304 struct rb_root active_nodes; /* active nodes (can't be culled) */
45305 rwlock_t active_lock; /* lock for active_nodes */
45306 - atomic_t gravecounter; /* graveyard uniquifier */
45307 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
45308 unsigned frun_percent; /* when to stop culling (% files) */
45309 unsigned fcull_percent; /* when to start culling (% files) */
45310 unsigned fstop_percent; /* when to stop allocating (% files) */
45311 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
45312 * proc.c
45313 */
45314 #ifdef CONFIG_CACHEFILES_HISTOGRAM
45315 -extern atomic_t cachefiles_lookup_histogram[HZ];
45316 -extern atomic_t cachefiles_mkdir_histogram[HZ];
45317 -extern atomic_t cachefiles_create_histogram[HZ];
45318 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45319 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45320 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
45321
45322 extern int __init cachefiles_proc_init(void);
45323 extern void cachefiles_proc_cleanup(void);
45324 static inline
45325 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
45326 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
45327 {
45328 unsigned long jif = jiffies - start_jif;
45329 if (jif >= HZ)
45330 jif = HZ - 1;
45331 - atomic_inc(&histogram[jif]);
45332 + atomic_inc_unchecked(&histogram[jif]);
45333 }
45334
45335 #else
45336 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
45337 index b0b5f7c..039bb26 100644
45338 --- a/fs/cachefiles/namei.c
45339 +++ b/fs/cachefiles/namei.c
45340 @@ -318,7 +318,7 @@ try_again:
45341 /* first step is to make up a grave dentry in the graveyard */
45342 sprintf(nbuffer, "%08x%08x",
45343 (uint32_t) get_seconds(),
45344 - (uint32_t) atomic_inc_return(&cache->gravecounter));
45345 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
45346
45347 /* do the multiway lock magic */
45348 trap = lock_rename(cache->graveyard, dir);
45349 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
45350 index eccd339..4c1d995 100644
45351 --- a/fs/cachefiles/proc.c
45352 +++ b/fs/cachefiles/proc.c
45353 @@ -14,9 +14,9 @@
45354 #include <linux/seq_file.h>
45355 #include "internal.h"
45356
45357 -atomic_t cachefiles_lookup_histogram[HZ];
45358 -atomic_t cachefiles_mkdir_histogram[HZ];
45359 -atomic_t cachefiles_create_histogram[HZ];
45360 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45361 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45362 +atomic_unchecked_t cachefiles_create_histogram[HZ];
45363
45364 /*
45365 * display the latency histogram
45366 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
45367 return 0;
45368 default:
45369 index = (unsigned long) v - 3;
45370 - x = atomic_read(&cachefiles_lookup_histogram[index]);
45371 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
45372 - z = atomic_read(&cachefiles_create_histogram[index]);
45373 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
45374 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
45375 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
45376 if (x == 0 && y == 0 && z == 0)
45377 return 0;
45378
45379 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
45380 index c994691..2a1537f 100644
45381 --- a/fs/cachefiles/rdwr.c
45382 +++ b/fs/cachefiles/rdwr.c
45383 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
45384 old_fs = get_fs();
45385 set_fs(KERNEL_DS);
45386 ret = file->f_op->write(
45387 - file, (const void __user *) data, len, &pos);
45388 + file, (const void __force_user *) data, len, &pos);
45389 set_fs(old_fs);
45390 kunmap(page);
45391 if (ret != len)
45392 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
45393 index e5b7731..b9c59fb 100644
45394 --- a/fs/ceph/dir.c
45395 +++ b/fs/ceph/dir.c
45396 @@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
45397 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
45398 struct ceph_mds_client *mdsc = fsc->mdsc;
45399 unsigned frag = fpos_frag(filp->f_pos);
45400 - int off = fpos_off(filp->f_pos);
45401 + unsigned int off = fpos_off(filp->f_pos);
45402 int err;
45403 u32 ftype;
45404 struct ceph_mds_reply_info_parsed *rinfo;
45405 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
45406 index d9ea6ed..1e6c8ac 100644
45407 --- a/fs/cifs/cifs_debug.c
45408 +++ b/fs/cifs/cifs_debug.c
45409 @@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45410
45411 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
45412 #ifdef CONFIG_CIFS_STATS2
45413 - atomic_set(&totBufAllocCount, 0);
45414 - atomic_set(&totSmBufAllocCount, 0);
45415 + atomic_set_unchecked(&totBufAllocCount, 0);
45416 + atomic_set_unchecked(&totSmBufAllocCount, 0);
45417 #endif /* CONFIG_CIFS_STATS2 */
45418 spin_lock(&cifs_tcp_ses_lock);
45419 list_for_each(tmp1, &cifs_tcp_ses_list) {
45420 @@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45421 tcon = list_entry(tmp3,
45422 struct cifs_tcon,
45423 tcon_list);
45424 - atomic_set(&tcon->num_smbs_sent, 0);
45425 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
45426 if (server->ops->clear_stats)
45427 server->ops->clear_stats(tcon);
45428 }
45429 @@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45430 smBufAllocCount.counter, cifs_min_small);
45431 #ifdef CONFIG_CIFS_STATS2
45432 seq_printf(m, "Total Large %d Small %d Allocations\n",
45433 - atomic_read(&totBufAllocCount),
45434 - atomic_read(&totSmBufAllocCount));
45435 + atomic_read_unchecked(&totBufAllocCount),
45436 + atomic_read_unchecked(&totSmBufAllocCount));
45437 #endif /* CONFIG_CIFS_STATS2 */
45438
45439 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
45440 @@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45441 if (tcon->need_reconnect)
45442 seq_puts(m, "\tDISCONNECTED ");
45443 seq_printf(m, "\nSMBs: %d",
45444 - atomic_read(&tcon->num_smbs_sent));
45445 + atomic_read_unchecked(&tcon->num_smbs_sent));
45446 if (server->ops->print_stats)
45447 server->ops->print_stats(m, tcon);
45448 }
45449 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
45450 index e7931cc..76a1ab9 100644
45451 --- a/fs/cifs/cifsfs.c
45452 +++ b/fs/cifs/cifsfs.c
45453 @@ -999,7 +999,7 @@ cifs_init_request_bufs(void)
45454 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
45455 cifs_req_cachep = kmem_cache_create("cifs_request",
45456 CIFSMaxBufSize + max_hdr_size, 0,
45457 - SLAB_HWCACHE_ALIGN, NULL);
45458 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
45459 if (cifs_req_cachep == NULL)
45460 return -ENOMEM;
45461
45462 @@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
45463 efficient to alloc 1 per page off the slab compared to 17K (5page)
45464 alloc of large cifs buffers even when page debugging is on */
45465 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
45466 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
45467 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
45468 NULL);
45469 if (cifs_sm_req_cachep == NULL) {
45470 mempool_destroy(cifs_req_poolp);
45471 @@ -1111,8 +1111,8 @@ init_cifs(void)
45472 atomic_set(&bufAllocCount, 0);
45473 atomic_set(&smBufAllocCount, 0);
45474 #ifdef CONFIG_CIFS_STATS2
45475 - atomic_set(&totBufAllocCount, 0);
45476 - atomic_set(&totSmBufAllocCount, 0);
45477 + atomic_set_unchecked(&totBufAllocCount, 0);
45478 + atomic_set_unchecked(&totSmBufAllocCount, 0);
45479 #endif /* CONFIG_CIFS_STATS2 */
45480
45481 atomic_set(&midCount, 0);
45482 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
45483 index f5af252..489b5f2 100644
45484 --- a/fs/cifs/cifsglob.h
45485 +++ b/fs/cifs/cifsglob.h
45486 @@ -751,35 +751,35 @@ struct cifs_tcon {
45487 __u16 Flags; /* optional support bits */
45488 enum statusEnum tidStatus;
45489 #ifdef CONFIG_CIFS_STATS
45490 - atomic_t num_smbs_sent;
45491 + atomic_unchecked_t num_smbs_sent;
45492 union {
45493 struct {
45494 - atomic_t num_writes;
45495 - atomic_t num_reads;
45496 - atomic_t num_flushes;
45497 - atomic_t num_oplock_brks;
45498 - atomic_t num_opens;
45499 - atomic_t num_closes;
45500 - atomic_t num_deletes;
45501 - atomic_t num_mkdirs;
45502 - atomic_t num_posixopens;
45503 - atomic_t num_posixmkdirs;
45504 - atomic_t num_rmdirs;
45505 - atomic_t num_renames;
45506 - atomic_t num_t2renames;
45507 - atomic_t num_ffirst;
45508 - atomic_t num_fnext;
45509 - atomic_t num_fclose;
45510 - atomic_t num_hardlinks;
45511 - atomic_t num_symlinks;
45512 - atomic_t num_locks;
45513 - atomic_t num_acl_get;
45514 - atomic_t num_acl_set;
45515 + atomic_unchecked_t num_writes;
45516 + atomic_unchecked_t num_reads;
45517 + atomic_unchecked_t num_flushes;
45518 + atomic_unchecked_t num_oplock_brks;
45519 + atomic_unchecked_t num_opens;
45520 + atomic_unchecked_t num_closes;
45521 + atomic_unchecked_t num_deletes;
45522 + atomic_unchecked_t num_mkdirs;
45523 + atomic_unchecked_t num_posixopens;
45524 + atomic_unchecked_t num_posixmkdirs;
45525 + atomic_unchecked_t num_rmdirs;
45526 + atomic_unchecked_t num_renames;
45527 + atomic_unchecked_t num_t2renames;
45528 + atomic_unchecked_t num_ffirst;
45529 + atomic_unchecked_t num_fnext;
45530 + atomic_unchecked_t num_fclose;
45531 + atomic_unchecked_t num_hardlinks;
45532 + atomic_unchecked_t num_symlinks;
45533 + atomic_unchecked_t num_locks;
45534 + atomic_unchecked_t num_acl_get;
45535 + atomic_unchecked_t num_acl_set;
45536 } cifs_stats;
45537 #ifdef CONFIG_CIFS_SMB2
45538 struct {
45539 - atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45540 - atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45541 + atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45542 + atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45543 } smb2_stats;
45544 #endif /* CONFIG_CIFS_SMB2 */
45545 } stats;
45546 @@ -1094,7 +1094,7 @@ build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
45547 }
45548
45549 #ifdef CONFIG_CIFS_STATS
45550 -#define cifs_stats_inc atomic_inc
45551 +#define cifs_stats_inc atomic_inc_unchecked
45552
45553 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
45554 unsigned int bytes)
45555 @@ -1459,8 +1459,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
45556 /* Various Debug counters */
45557 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
45558 #ifdef CONFIG_CIFS_STATS2
45559 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
45560 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
45561 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
45562 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
45563 #endif
45564 GLOBAL_EXTERN atomic_t smBufAllocCount;
45565 GLOBAL_EXTERN atomic_t midCount;
45566 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
45567 index 51dc2fb..1e12a33 100644
45568 --- a/fs/cifs/link.c
45569 +++ b/fs/cifs/link.c
45570 @@ -616,7 +616,7 @@ symlink_exit:
45571
45572 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
45573 {
45574 - char *p = nd_get_link(nd);
45575 + const char *p = nd_get_link(nd);
45576 if (!IS_ERR(p))
45577 kfree(p);
45578 }
45579 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
45580 index 3a00c0d..42d901c 100644
45581 --- a/fs/cifs/misc.c
45582 +++ b/fs/cifs/misc.c
45583 @@ -169,7 +169,7 @@ cifs_buf_get(void)
45584 memset(ret_buf, 0, buf_size + 3);
45585 atomic_inc(&bufAllocCount);
45586 #ifdef CONFIG_CIFS_STATS2
45587 - atomic_inc(&totBufAllocCount);
45588 + atomic_inc_unchecked(&totBufAllocCount);
45589 #endif /* CONFIG_CIFS_STATS2 */
45590 }
45591
45592 @@ -204,7 +204,7 @@ cifs_small_buf_get(void)
45593 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
45594 atomic_inc(&smBufAllocCount);
45595 #ifdef CONFIG_CIFS_STATS2
45596 - atomic_inc(&totSmBufAllocCount);
45597 + atomic_inc_unchecked(&totSmBufAllocCount);
45598 #endif /* CONFIG_CIFS_STATS2 */
45599
45600 }
45601 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
45602 index 34cea27..3fbdf6f 100644
45603 --- a/fs/cifs/smb1ops.c
45604 +++ b/fs/cifs/smb1ops.c
45605 @@ -610,27 +610,27 @@ static void
45606 cifs_clear_stats(struct cifs_tcon *tcon)
45607 {
45608 #ifdef CONFIG_CIFS_STATS
45609 - atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
45610 - atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
45611 - atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
45612 - atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45613 - atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
45614 - atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
45615 - atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45616 - atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
45617 - atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
45618 - atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
45619 - atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
45620 - atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
45621 - atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
45622 - atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
45623 - atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
45624 - atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
45625 - atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
45626 - atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
45627 - atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
45628 - atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
45629 - atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
45630 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
45631 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
45632 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
45633 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45634 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
45635 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
45636 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45637 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
45638 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
45639 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
45640 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
45641 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
45642 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
45643 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
45644 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
45645 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
45646 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
45647 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
45648 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
45649 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
45650 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
45651 #endif
45652 }
45653
45654 @@ -639,36 +639,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45655 {
45656 #ifdef CONFIG_CIFS_STATS
45657 seq_printf(m, " Oplocks breaks: %d",
45658 - atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
45659 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
45660 seq_printf(m, "\nReads: %d Bytes: %llu",
45661 - atomic_read(&tcon->stats.cifs_stats.num_reads),
45662 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
45663 (long long)(tcon->bytes_read));
45664 seq_printf(m, "\nWrites: %d Bytes: %llu",
45665 - atomic_read(&tcon->stats.cifs_stats.num_writes),
45666 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
45667 (long long)(tcon->bytes_written));
45668 seq_printf(m, "\nFlushes: %d",
45669 - atomic_read(&tcon->stats.cifs_stats.num_flushes));
45670 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
45671 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
45672 - atomic_read(&tcon->stats.cifs_stats.num_locks),
45673 - atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
45674 - atomic_read(&tcon->stats.cifs_stats.num_symlinks));
45675 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
45676 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
45677 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
45678 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
45679 - atomic_read(&tcon->stats.cifs_stats.num_opens),
45680 - atomic_read(&tcon->stats.cifs_stats.num_closes),
45681 - atomic_read(&tcon->stats.cifs_stats.num_deletes));
45682 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
45683 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
45684 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
45685 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
45686 - atomic_read(&tcon->stats.cifs_stats.num_posixopens),
45687 - atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
45688 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
45689 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
45690 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
45691 - atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
45692 - atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
45693 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
45694 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
45695 seq_printf(m, "\nRenames: %d T2 Renames %d",
45696 - atomic_read(&tcon->stats.cifs_stats.num_renames),
45697 - atomic_read(&tcon->stats.cifs_stats.num_t2renames));
45698 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
45699 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
45700 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
45701 - atomic_read(&tcon->stats.cifs_stats.num_ffirst),
45702 - atomic_read(&tcon->stats.cifs_stats.num_fnext),
45703 - atomic_read(&tcon->stats.cifs_stats.num_fclose));
45704 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
45705 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
45706 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
45707 #endif
45708 }
45709
45710 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
45711 index 4d9dbe0..0af4601 100644
45712 --- a/fs/cifs/smb2ops.c
45713 +++ b/fs/cifs/smb2ops.c
45714 @@ -291,8 +291,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
45715 #ifdef CONFIG_CIFS_STATS
45716 int i;
45717 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
45718 - atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45719 - atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45720 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45721 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45722 }
45723 #endif
45724 }
45725 @@ -301,66 +301,66 @@ static void
45726 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45727 {
45728 #ifdef CONFIG_CIFS_STATS
45729 - atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45730 - atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45731 + atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45732 + atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45733 seq_printf(m, "\nNegotiates: %d sent %d failed",
45734 - atomic_read(&sent[SMB2_NEGOTIATE_HE]),
45735 - atomic_read(&failed[SMB2_NEGOTIATE_HE]));
45736 + atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
45737 + atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
45738 seq_printf(m, "\nSessionSetups: %d sent %d failed",
45739 - atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
45740 - atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
45741 + atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
45742 + atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
45743 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
45744 seq_printf(m, "\nLogoffs: %d sent %d failed",
45745 - atomic_read(&sent[SMB2_LOGOFF_HE]),
45746 - atomic_read(&failed[SMB2_LOGOFF_HE]));
45747 + atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
45748 + atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
45749 seq_printf(m, "\nTreeConnects: %d sent %d failed",
45750 - atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
45751 - atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
45752 + atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
45753 + atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
45754 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
45755 - atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
45756 - atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
45757 + atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
45758 + atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
45759 seq_printf(m, "\nCreates: %d sent %d failed",
45760 - atomic_read(&sent[SMB2_CREATE_HE]),
45761 - atomic_read(&failed[SMB2_CREATE_HE]));
45762 + atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
45763 + atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
45764 seq_printf(m, "\nCloses: %d sent %d failed",
45765 - atomic_read(&sent[SMB2_CLOSE_HE]),
45766 - atomic_read(&failed[SMB2_CLOSE_HE]));
45767 + atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
45768 + atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
45769 seq_printf(m, "\nFlushes: %d sent %d failed",
45770 - atomic_read(&sent[SMB2_FLUSH_HE]),
45771 - atomic_read(&failed[SMB2_FLUSH_HE]));
45772 + atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
45773 + atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
45774 seq_printf(m, "\nReads: %d sent %d failed",
45775 - atomic_read(&sent[SMB2_READ_HE]),
45776 - atomic_read(&failed[SMB2_READ_HE]));
45777 + atomic_read_unchecked(&sent[SMB2_READ_HE]),
45778 + atomic_read_unchecked(&failed[SMB2_READ_HE]));
45779 seq_printf(m, "\nWrites: %d sent %d failed",
45780 - atomic_read(&sent[SMB2_WRITE_HE]),
45781 - atomic_read(&failed[SMB2_WRITE_HE]));
45782 + atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
45783 + atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
45784 seq_printf(m, "\nLocks: %d sent %d failed",
45785 - atomic_read(&sent[SMB2_LOCK_HE]),
45786 - atomic_read(&failed[SMB2_LOCK_HE]));
45787 + atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
45788 + atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
45789 seq_printf(m, "\nIOCTLs: %d sent %d failed",
45790 - atomic_read(&sent[SMB2_IOCTL_HE]),
45791 - atomic_read(&failed[SMB2_IOCTL_HE]));
45792 + atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
45793 + atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
45794 seq_printf(m, "\nCancels: %d sent %d failed",
45795 - atomic_read(&sent[SMB2_CANCEL_HE]),
45796 - atomic_read(&failed[SMB2_CANCEL_HE]));
45797 + atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
45798 + atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
45799 seq_printf(m, "\nEchos: %d sent %d failed",
45800 - atomic_read(&sent[SMB2_ECHO_HE]),
45801 - atomic_read(&failed[SMB2_ECHO_HE]));
45802 + atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
45803 + atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
45804 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
45805 - atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
45806 - atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
45807 + atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
45808 + atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
45809 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
45810 - atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
45811 - atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
45812 + atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
45813 + atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
45814 seq_printf(m, "\nQueryInfos: %d sent %d failed",
45815 - atomic_read(&sent[SMB2_QUERY_INFO_HE]),
45816 - atomic_read(&failed[SMB2_QUERY_INFO_HE]));
45817 + atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
45818 + atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
45819 seq_printf(m, "\nSetInfos: %d sent %d failed",
45820 - atomic_read(&sent[SMB2_SET_INFO_HE]),
45821 - atomic_read(&failed[SMB2_SET_INFO_HE]));
45822 + atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
45823 + atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
45824 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
45825 - atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
45826 - atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
45827 + atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
45828 + atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
45829 #endif
45830 }
45831
45832 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
45833 index 958ae0e..505c9d0 100644
45834 --- a/fs/coda/cache.c
45835 +++ b/fs/coda/cache.c
45836 @@ -24,7 +24,7 @@
45837 #include "coda_linux.h"
45838 #include "coda_cache.h"
45839
45840 -static atomic_t permission_epoch = ATOMIC_INIT(0);
45841 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
45842
45843 /* replace or extend an acl cache hit */
45844 void coda_cache_enter(struct inode *inode, int mask)
45845 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
45846 struct coda_inode_info *cii = ITOC(inode);
45847
45848 spin_lock(&cii->c_lock);
45849 - cii->c_cached_epoch = atomic_read(&permission_epoch);
45850 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
45851 if (cii->c_uid != current_fsuid()) {
45852 cii->c_uid = current_fsuid();
45853 cii->c_cached_perm = mask;
45854 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
45855 {
45856 struct coda_inode_info *cii = ITOC(inode);
45857 spin_lock(&cii->c_lock);
45858 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
45859 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
45860 spin_unlock(&cii->c_lock);
45861 }
45862
45863 /* remove all acl caches */
45864 void coda_cache_clear_all(struct super_block *sb)
45865 {
45866 - atomic_inc(&permission_epoch);
45867 + atomic_inc_unchecked(&permission_epoch);
45868 }
45869
45870
45871 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
45872 spin_lock(&cii->c_lock);
45873 hit = (mask & cii->c_cached_perm) == mask &&
45874 cii->c_uid == current_fsuid() &&
45875 - cii->c_cached_epoch == atomic_read(&permission_epoch);
45876 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
45877 spin_unlock(&cii->c_lock);
45878
45879 return hit;
45880 diff --git a/fs/compat.c b/fs/compat.c
45881 index 015e1e1..5ce8e54 100644
45882 --- a/fs/compat.c
45883 +++ b/fs/compat.c
45884 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
45885
45886 set_fs(KERNEL_DS);
45887 /* The __user pointer cast is valid because of the set_fs() */
45888 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
45889 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
45890 set_fs(oldfs);
45891 /* truncating is ok because it's a user address */
45892 if (!ret)
45893 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
45894 goto out;
45895
45896 ret = -EINVAL;
45897 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
45898 + if (nr_segs > UIO_MAXIOV)
45899 goto out;
45900 if (nr_segs > fast_segs) {
45901 ret = -ENOMEM;
45902 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
45903
45904 struct compat_readdir_callback {
45905 struct compat_old_linux_dirent __user *dirent;
45906 + struct file * file;
45907 int result;
45908 };
45909
45910 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
45911 buf->result = -EOVERFLOW;
45912 return -EOVERFLOW;
45913 }
45914 +
45915 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45916 + return 0;
45917 +
45918 buf->result++;
45919 dirent = buf->dirent;
45920 if (!access_ok(VERIFY_WRITE, dirent,
45921 @@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
45922
45923 buf.result = 0;
45924 buf.dirent = dirent;
45925 + buf.file = f.file;
45926
45927 error = vfs_readdir(f.file, compat_fillonedir, &buf);
45928 if (buf.result)
45929 @@ -897,6 +903,7 @@ struct compat_linux_dirent {
45930 struct compat_getdents_callback {
45931 struct compat_linux_dirent __user *current_dir;
45932 struct compat_linux_dirent __user *previous;
45933 + struct file * file;
45934 int count;
45935 int error;
45936 };
45937 @@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
45938 buf->error = -EOVERFLOW;
45939 return -EOVERFLOW;
45940 }
45941 +
45942 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45943 + return 0;
45944 +
45945 dirent = buf->previous;
45946 if (dirent) {
45947 if (__put_user(offset, &dirent->d_off))
45948 @@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
45949 buf.previous = NULL;
45950 buf.count = count;
45951 buf.error = 0;
45952 + buf.file = f.file;
45953
45954 error = vfs_readdir(f.file, compat_filldir, &buf);
45955 if (error >= 0)
45956 @@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
45957 struct compat_getdents_callback64 {
45958 struct linux_dirent64 __user *current_dir;
45959 struct linux_dirent64 __user *previous;
45960 + struct file * file;
45961 int count;
45962 int error;
45963 };
45964 @@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
45965 buf->error = -EINVAL; /* only used if we fail.. */
45966 if (reclen > buf->count)
45967 return -EINVAL;
45968 +
45969 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45970 + return 0;
45971 +
45972 dirent = buf->previous;
45973
45974 if (dirent) {
45975 @@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
45976 buf.previous = NULL;
45977 buf.count = count;
45978 buf.error = 0;
45979 + buf.file = f.file;
45980
45981 error = vfs_readdir(f.file, compat_filldir64, &buf);
45982 if (error >= 0)
45983 error = buf.error;
45984 lastdirent = buf.previous;
45985 if (lastdirent) {
45986 - typeof(lastdirent->d_off) d_off = f.file->f_pos;
45987 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
45988 if (__put_user_unaligned(d_off, &lastdirent->d_off))
45989 error = -EFAULT;
45990 else
45991 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
45992 index a81147e..20bf2b5 100644
45993 --- a/fs/compat_binfmt_elf.c
45994 +++ b/fs/compat_binfmt_elf.c
45995 @@ -30,11 +30,13 @@
45996 #undef elf_phdr
45997 #undef elf_shdr
45998 #undef elf_note
45999 +#undef elf_dyn
46000 #undef elf_addr_t
46001 #define elfhdr elf32_hdr
46002 #define elf_phdr elf32_phdr
46003 #define elf_shdr elf32_shdr
46004 #define elf_note elf32_note
46005 +#define elf_dyn Elf32_Dyn
46006 #define elf_addr_t Elf32_Addr
46007
46008 /*
46009 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
46010 index 4c6285f..b7a2411 100644
46011 --- a/fs/compat_ioctl.c
46012 +++ b/fs/compat_ioctl.c
46013 @@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
46014 return -EFAULT;
46015 if (__get_user(udata, &ss32->iomem_base))
46016 return -EFAULT;
46017 - ss.iomem_base = compat_ptr(udata);
46018 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
46019 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
46020 __get_user(ss.port_high, &ss32->port_high))
46021 return -EFAULT;
46022 @@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
46023 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
46024 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
46025 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
46026 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46027 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46028 return -EFAULT;
46029
46030 return ioctl_preallocate(file, p);
46031 @@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
46032 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
46033 {
46034 unsigned int a, b;
46035 - a = *(unsigned int *)p;
46036 - b = *(unsigned int *)q;
46037 + a = *(const unsigned int *)p;
46038 + b = *(const unsigned int *)q;
46039 if (a > b)
46040 return 1;
46041 if (a < b)
46042 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
46043 index 7414ae2..d98ad6d 100644
46044 --- a/fs/configfs/dir.c
46045 +++ b/fs/configfs/dir.c
46046 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46047 }
46048 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
46049 struct configfs_dirent *next;
46050 - const char * name;
46051 + const unsigned char * name;
46052 + char d_name[sizeof(next->s_dentry->d_iname)];
46053 int len;
46054 struct inode *inode = NULL;
46055
46056 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46057 continue;
46058
46059 name = configfs_get_name(next);
46060 - len = strlen(name);
46061 + if (next->s_dentry && name == next->s_dentry->d_iname) {
46062 + len = next->s_dentry->d_name.len;
46063 + memcpy(d_name, name, len);
46064 + name = d_name;
46065 + } else
46066 + len = strlen(name);
46067
46068 /*
46069 * We'll have a dentry and an inode for
46070 diff --git a/fs/coredump.c b/fs/coredump.c
46071 index ce47379..68c8e43 100644
46072 --- a/fs/coredump.c
46073 +++ b/fs/coredump.c
46074 @@ -52,7 +52,7 @@ struct core_name {
46075 char *corename;
46076 int used, size;
46077 };
46078 -static atomic_t call_count = ATOMIC_INIT(1);
46079 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
46080
46081 /* The maximal length of core_pattern is also specified in sysctl.c */
46082
46083 @@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
46084 {
46085 char *old_corename = cn->corename;
46086
46087 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
46088 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
46089 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
46090
46091 if (!cn->corename) {
46092 @@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
46093 int pid_in_pattern = 0;
46094 int err = 0;
46095
46096 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
46097 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
46098 cn->corename = kmalloc(cn->size, GFP_KERNEL);
46099 cn->used = 0;
46100
46101 @@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
46102 pipe = file->f_path.dentry->d_inode->i_pipe;
46103
46104 pipe_lock(pipe);
46105 - pipe->readers++;
46106 - pipe->writers--;
46107 + atomic_inc(&pipe->readers);
46108 + atomic_dec(&pipe->writers);
46109
46110 - while ((pipe->readers > 1) && (!signal_pending(current))) {
46111 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
46112 wake_up_interruptible_sync(&pipe->wait);
46113 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46114 pipe_wait(pipe);
46115 }
46116
46117 - pipe->readers--;
46118 - pipe->writers++;
46119 + atomic_dec(&pipe->readers);
46120 + atomic_inc(&pipe->writers);
46121 pipe_unlock(pipe);
46122
46123 }
46124 @@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46125 int ispipe;
46126 struct files_struct *displaced;
46127 bool need_nonrelative = false;
46128 - static atomic_t core_dump_count = ATOMIC_INIT(0);
46129 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
46130 + long signr = siginfo->si_signo;
46131 struct coredump_params cprm = {
46132 .siginfo = siginfo,
46133 .regs = regs,
46134 @@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46135 .mm_flags = mm->flags,
46136 };
46137
46138 - audit_core_dumps(siginfo->si_signo);
46139 + audit_core_dumps(signr);
46140 +
46141 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
46142 + gr_handle_brute_attach(cprm.mm_flags);
46143
46144 binfmt = mm->binfmt;
46145 if (!binfmt || !binfmt->core_dump)
46146 @@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46147 need_nonrelative = true;
46148 }
46149
46150 - retval = coredump_wait(siginfo->si_signo, &core_state);
46151 + retval = coredump_wait(signr, &core_state);
46152 if (retval < 0)
46153 goto fail_creds;
46154
46155 @@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46156 }
46157 cprm.limit = RLIM_INFINITY;
46158
46159 - dump_count = atomic_inc_return(&core_dump_count);
46160 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
46161 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
46162 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
46163 task_tgid_vnr(current), current->comm);
46164 @@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46165 } else {
46166 struct inode *inode;
46167
46168 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
46169 +
46170 if (cprm.limit < binfmt->min_coredump)
46171 goto fail_unlock;
46172
46173 @@ -640,7 +646,7 @@ close_fail:
46174 filp_close(cprm.file, NULL);
46175 fail_dropcount:
46176 if (ispipe)
46177 - atomic_dec(&core_dump_count);
46178 + atomic_dec_unchecked(&core_dump_count);
46179 fail_unlock:
46180 kfree(cn.corename);
46181 fail_corename:
46182 @@ -659,7 +665,7 @@ fail:
46183 */
46184 int dump_write(struct file *file, const void *addr, int nr)
46185 {
46186 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
46187 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
46188 }
46189 EXPORT_SYMBOL(dump_write);
46190
46191 diff --git a/fs/dcache.c b/fs/dcache.c
46192 index 3a463d0..9f345c8 100644
46193 --- a/fs/dcache.c
46194 +++ b/fs/dcache.c
46195 @@ -3164,7 +3164,7 @@ void __init vfs_caches_init(unsigned long mempages)
46196 mempages -= reserve;
46197
46198 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
46199 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
46200 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
46201
46202 dcache_init();
46203 inode_init();
46204 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
46205 index b607d92..41fda09 100644
46206 --- a/fs/debugfs/inode.c
46207 +++ b/fs/debugfs/inode.c
46208 @@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
46209 */
46210 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
46211 {
46212 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46213 + return __create_file(name, S_IFDIR | S_IRWXU,
46214 +#else
46215 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46216 +#endif
46217 parent, NULL, NULL);
46218 }
46219 EXPORT_SYMBOL_GPL(debugfs_create_dir);
46220 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
46221 index cc7709e..7e7211f 100644
46222 --- a/fs/ecryptfs/inode.c
46223 +++ b/fs/ecryptfs/inode.c
46224 @@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
46225 old_fs = get_fs();
46226 set_fs(get_ds());
46227 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
46228 - (char __user *)lower_buf,
46229 + (char __force_user *)lower_buf,
46230 PATH_MAX);
46231 set_fs(old_fs);
46232 if (rc < 0)
46233 @@ -706,7 +706,7 @@ out:
46234 static void
46235 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
46236 {
46237 - char *buf = nd_get_link(nd);
46238 + const char *buf = nd_get_link(nd);
46239 if (!IS_ERR(buf)) {
46240 /* Free the char* */
46241 kfree(buf);
46242 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
46243 index 412e6ed..4292d22 100644
46244 --- a/fs/ecryptfs/miscdev.c
46245 +++ b/fs/ecryptfs/miscdev.c
46246 @@ -315,7 +315,7 @@ check_list:
46247 goto out_unlock_msg_ctx;
46248 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
46249 if (msg_ctx->msg) {
46250 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
46251 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
46252 goto out_unlock_msg_ctx;
46253 i += packet_length_size;
46254 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
46255 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
46256 index b2a34a1..162fa69 100644
46257 --- a/fs/ecryptfs/read_write.c
46258 +++ b/fs/ecryptfs/read_write.c
46259 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
46260 return -EIO;
46261 fs_save = get_fs();
46262 set_fs(get_ds());
46263 - rc = vfs_write(lower_file, data, size, &offset);
46264 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
46265 set_fs(fs_save);
46266 mark_inode_dirty_sync(ecryptfs_inode);
46267 return rc;
46268 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
46269 return -EIO;
46270 fs_save = get_fs();
46271 set_fs(get_ds());
46272 - rc = vfs_read(lower_file, data, size, &offset);
46273 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
46274 set_fs(fs_save);
46275 return rc;
46276 }
46277 diff --git a/fs/exec.c b/fs/exec.c
46278 index 0039055..03fb669 100644
46279 --- a/fs/exec.c
46280 +++ b/fs/exec.c
46281 @@ -55,6 +55,16 @@
46282 #include <linux/pipe_fs_i.h>
46283 #include <linux/oom.h>
46284 #include <linux/compat.h>
46285 +#include <linux/random.h>
46286 +#include <linux/seq_file.h>
46287 +#include <linux/coredump.h>
46288 +
46289 +#ifdef CONFIG_PAX_REFCOUNT
46290 +#include <linux/kallsyms.h>
46291 +#include <linux/kdebug.h>
46292 +#endif
46293 +
46294 +#include <trace/events/fs.h>
46295
46296 #include <asm/uaccess.h>
46297 #include <asm/mmu_context.h>
46298 @@ -66,6 +76,18 @@
46299
46300 #include <trace/events/sched.h>
46301
46302 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46303 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
46304 +{
46305 + pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
46306 +}
46307 +#endif
46308 +
46309 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
46310 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
46311 +EXPORT_SYMBOL(pax_set_initial_flags_func);
46312 +#endif
46313 +
46314 int suid_dumpable = 0;
46315
46316 static LIST_HEAD(formats);
46317 @@ -180,18 +202,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46318 int write)
46319 {
46320 struct page *page;
46321 - int ret;
46322
46323 -#ifdef CONFIG_STACK_GROWSUP
46324 - if (write) {
46325 - ret = expand_downwards(bprm->vma, pos);
46326 - if (ret < 0)
46327 - return NULL;
46328 - }
46329 -#endif
46330 - ret = get_user_pages(current, bprm->mm, pos,
46331 - 1, write, 1, &page, NULL);
46332 - if (ret <= 0)
46333 + if (0 > expand_downwards(bprm->vma, pos))
46334 + return NULL;
46335 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
46336 return NULL;
46337
46338 if (write) {
46339 @@ -207,6 +221,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46340 if (size <= ARG_MAX)
46341 return page;
46342
46343 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46344 + // only allow 512KB for argv+env on suid/sgid binaries
46345 + // to prevent easy ASLR exhaustion
46346 + if (((bprm->cred->euid != current_euid()) ||
46347 + (bprm->cred->egid != current_egid())) &&
46348 + (size > (512 * 1024))) {
46349 + put_page(page);
46350 + return NULL;
46351 + }
46352 +#endif
46353 +
46354 /*
46355 * Limit to 1/4-th the stack size for the argv+env strings.
46356 * This ensures that:
46357 @@ -266,6 +291,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46358 vma->vm_end = STACK_TOP_MAX;
46359 vma->vm_start = vma->vm_end - PAGE_SIZE;
46360 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
46361 +
46362 +#ifdef CONFIG_PAX_SEGMEXEC
46363 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
46364 +#endif
46365 +
46366 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
46367 INIT_LIST_HEAD(&vma->anon_vma_chain);
46368
46369 @@ -276,6 +306,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46370 mm->stack_vm = mm->total_vm = 1;
46371 up_write(&mm->mmap_sem);
46372 bprm->p = vma->vm_end - sizeof(void *);
46373 +
46374 +#ifdef CONFIG_PAX_RANDUSTACK
46375 + if (randomize_va_space)
46376 + bprm->p ^= random32() & ~PAGE_MASK;
46377 +#endif
46378 +
46379 return 0;
46380 err:
46381 up_write(&mm->mmap_sem);
46382 @@ -384,19 +420,7 @@ err:
46383 return err;
46384 }
46385
46386 -struct user_arg_ptr {
46387 -#ifdef CONFIG_COMPAT
46388 - bool is_compat;
46389 -#endif
46390 - union {
46391 - const char __user *const __user *native;
46392 -#ifdef CONFIG_COMPAT
46393 - const compat_uptr_t __user *compat;
46394 -#endif
46395 - } ptr;
46396 -};
46397 -
46398 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46399 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46400 {
46401 const char __user *native;
46402
46403 @@ -405,14 +429,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46404 compat_uptr_t compat;
46405
46406 if (get_user(compat, argv.ptr.compat + nr))
46407 - return ERR_PTR(-EFAULT);
46408 + return (const char __force_user *)ERR_PTR(-EFAULT);
46409
46410 return compat_ptr(compat);
46411 }
46412 #endif
46413
46414 if (get_user(native, argv.ptr.native + nr))
46415 - return ERR_PTR(-EFAULT);
46416 + return (const char __force_user *)ERR_PTR(-EFAULT);
46417
46418 return native;
46419 }
46420 @@ -431,7 +455,7 @@ static int count(struct user_arg_ptr argv, int max)
46421 if (!p)
46422 break;
46423
46424 - if (IS_ERR(p))
46425 + if (IS_ERR((const char __force_kernel *)p))
46426 return -EFAULT;
46427
46428 if (i++ >= max)
46429 @@ -465,7 +489,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
46430
46431 ret = -EFAULT;
46432 str = get_user_arg_ptr(argv, argc);
46433 - if (IS_ERR(str))
46434 + if (IS_ERR((const char __force_kernel *)str))
46435 goto out;
46436
46437 len = strnlen_user(str, MAX_ARG_STRLEN);
46438 @@ -547,7 +571,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
46439 int r;
46440 mm_segment_t oldfs = get_fs();
46441 struct user_arg_ptr argv = {
46442 - .ptr.native = (const char __user *const __user *)__argv,
46443 + .ptr.native = (const char __force_user *const __force_user *)__argv,
46444 };
46445
46446 set_fs(KERNEL_DS);
46447 @@ -582,7 +606,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46448 unsigned long new_end = old_end - shift;
46449 struct mmu_gather tlb;
46450
46451 - BUG_ON(new_start > new_end);
46452 + if (new_start >= new_end || new_start < mmap_min_addr)
46453 + return -ENOMEM;
46454
46455 /*
46456 * ensure there are no vmas between where we want to go
46457 @@ -591,6 +616,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46458 if (vma != find_vma(mm, new_start))
46459 return -EFAULT;
46460
46461 +#ifdef CONFIG_PAX_SEGMEXEC
46462 + BUG_ON(pax_find_mirror_vma(vma));
46463 +#endif
46464 +
46465 /*
46466 * cover the whole range: [new_start, old_end)
46467 */
46468 @@ -671,10 +700,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46469 stack_top = arch_align_stack(stack_top);
46470 stack_top = PAGE_ALIGN(stack_top);
46471
46472 - if (unlikely(stack_top < mmap_min_addr) ||
46473 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
46474 - return -ENOMEM;
46475 -
46476 stack_shift = vma->vm_end - stack_top;
46477
46478 bprm->p -= stack_shift;
46479 @@ -686,8 +711,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
46480 bprm->exec -= stack_shift;
46481
46482 down_write(&mm->mmap_sem);
46483 +
46484 + /* Move stack pages down in memory. */
46485 + if (stack_shift) {
46486 + ret = shift_arg_pages(vma, stack_shift);
46487 + if (ret)
46488 + goto out_unlock;
46489 + }
46490 +
46491 vm_flags = VM_STACK_FLAGS;
46492
46493 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46494 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46495 + vm_flags &= ~VM_EXEC;
46496 +
46497 +#ifdef CONFIG_PAX_MPROTECT
46498 + if (mm->pax_flags & MF_PAX_MPROTECT)
46499 + vm_flags &= ~VM_MAYEXEC;
46500 +#endif
46501 +
46502 + }
46503 +#endif
46504 +
46505 /*
46506 * Adjust stack execute permissions; explicitly enable for
46507 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
46508 @@ -706,13 +751,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46509 goto out_unlock;
46510 BUG_ON(prev != vma);
46511
46512 - /* Move stack pages down in memory. */
46513 - if (stack_shift) {
46514 - ret = shift_arg_pages(vma, stack_shift);
46515 - if (ret)
46516 - goto out_unlock;
46517 - }
46518 -
46519 /* mprotect_fixup is overkill to remove the temporary stack flags */
46520 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
46521
46522 @@ -771,6 +809,8 @@ struct file *open_exec(const char *name)
46523
46524 fsnotify_open(file);
46525
46526 + trace_open_exec(name);
46527 +
46528 err = deny_write_access(file);
46529 if (err)
46530 goto exit;
46531 @@ -794,7 +834,7 @@ int kernel_read(struct file *file, loff_t offset,
46532 old_fs = get_fs();
46533 set_fs(get_ds());
46534 /* The cast to a user pointer is valid due to the set_fs() */
46535 - result = vfs_read(file, (void __user *)addr, count, &pos);
46536 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
46537 set_fs(old_fs);
46538 return result;
46539 }
46540 @@ -1231,7 +1271,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
46541 }
46542 rcu_read_unlock();
46543
46544 - if (p->fs->users > n_fs) {
46545 + if (atomic_read(&p->fs->users) > n_fs) {
46546 bprm->unsafe |= LSM_UNSAFE_SHARE;
46547 } else {
46548 res = -EAGAIN;
46549 @@ -1434,6 +1474,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
46550
46551 EXPORT_SYMBOL(search_binary_handler);
46552
46553 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46554 +static DEFINE_PER_CPU(u64, exec_counter);
46555 +static int __init init_exec_counters(void)
46556 +{
46557 + unsigned int cpu;
46558 +
46559 + for_each_possible_cpu(cpu) {
46560 + per_cpu(exec_counter, cpu) = (u64)cpu;
46561 + }
46562 +
46563 + return 0;
46564 +}
46565 +early_initcall(init_exec_counters);
46566 +static inline void increment_exec_counter(void)
46567 +{
46568 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
46569 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
46570 +}
46571 +#else
46572 +static inline void increment_exec_counter(void) {}
46573 +#endif
46574 +
46575 /*
46576 * sys_execve() executes a new program.
46577 */
46578 @@ -1442,6 +1504,11 @@ static int do_execve_common(const char *filename,
46579 struct user_arg_ptr envp,
46580 struct pt_regs *regs)
46581 {
46582 +#ifdef CONFIG_GRKERNSEC
46583 + struct file *old_exec_file;
46584 + struct acl_subject_label *old_acl;
46585 + struct rlimit old_rlim[RLIM_NLIMITS];
46586 +#endif
46587 struct linux_binprm *bprm;
46588 struct file *file;
46589 struct files_struct *displaced;
46590 @@ -1449,6 +1516,8 @@ static int do_execve_common(const char *filename,
46591 int retval;
46592 const struct cred *cred = current_cred();
46593
46594 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
46595 +
46596 /*
46597 * We move the actual failure in case of RLIMIT_NPROC excess from
46598 * set*uid() to execve() because too many poorly written programs
46599 @@ -1489,12 +1558,27 @@ static int do_execve_common(const char *filename,
46600 if (IS_ERR(file))
46601 goto out_unmark;
46602
46603 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
46604 + retval = -EPERM;
46605 + goto out_file;
46606 + }
46607 +
46608 sched_exec();
46609
46610 bprm->file = file;
46611 bprm->filename = filename;
46612 bprm->interp = filename;
46613
46614 + if (gr_process_user_ban()) {
46615 + retval = -EPERM;
46616 + goto out_file;
46617 + }
46618 +
46619 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
46620 + retval = -EACCES;
46621 + goto out_file;
46622 + }
46623 +
46624 retval = bprm_mm_init(bprm);
46625 if (retval)
46626 goto out_file;
46627 @@ -1511,24 +1595,65 @@ static int do_execve_common(const char *filename,
46628 if (retval < 0)
46629 goto out;
46630
46631 +#ifdef CONFIG_GRKERNSEC
46632 + old_acl = current->acl;
46633 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
46634 + old_exec_file = current->exec_file;
46635 + get_file(file);
46636 + current->exec_file = file;
46637 +#endif
46638 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46639 + /* limit suid stack to 8MB
46640 + we saved the old limits above and will restore them if this exec fails
46641 + */
46642 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
46643 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
46644 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
46645 +#endif
46646 +
46647 + if (!gr_tpe_allow(file)) {
46648 + retval = -EACCES;
46649 + goto out_fail;
46650 + }
46651 +
46652 + if (gr_check_crash_exec(file)) {
46653 + retval = -EACCES;
46654 + goto out_fail;
46655 + }
46656 +
46657 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
46658 + bprm->unsafe);
46659 + if (retval < 0)
46660 + goto out_fail;
46661 +
46662 retval = copy_strings_kernel(1, &bprm->filename, bprm);
46663 if (retval < 0)
46664 - goto out;
46665 + goto out_fail;
46666
46667 bprm->exec = bprm->p;
46668 retval = copy_strings(bprm->envc, envp, bprm);
46669 if (retval < 0)
46670 - goto out;
46671 + goto out_fail;
46672
46673 retval = copy_strings(bprm->argc, argv, bprm);
46674 if (retval < 0)
46675 - goto out;
46676 + goto out_fail;
46677 +
46678 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
46679 +
46680 + gr_handle_exec_args(bprm, argv);
46681
46682 retval = search_binary_handler(bprm,regs);
46683 if (retval < 0)
46684 - goto out;
46685 + goto out_fail;
46686 +#ifdef CONFIG_GRKERNSEC
46687 + if (old_exec_file)
46688 + fput(old_exec_file);
46689 +#endif
46690
46691 /* execve succeeded */
46692 +
46693 + increment_exec_counter();
46694 current->fs->in_exec = 0;
46695 current->in_execve = 0;
46696 acct_update_integrals(current);
46697 @@ -1537,6 +1662,14 @@ static int do_execve_common(const char *filename,
46698 put_files_struct(displaced);
46699 return retval;
46700
46701 +out_fail:
46702 +#ifdef CONFIG_GRKERNSEC
46703 + current->acl = old_acl;
46704 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
46705 + fput(current->exec_file);
46706 + current->exec_file = old_exec_file;
46707 +#endif
46708 +
46709 out:
46710 if (bprm->mm) {
46711 acct_arg_size(bprm, 0);
46712 @@ -1712,3 +1845,253 @@ int kernel_execve(const char *filename,
46713 ret_from_kernel_execve(p);
46714 }
46715 #endif
46716 +
46717 +int pax_check_flags(unsigned long *flags)
46718 +{
46719 + int retval = 0;
46720 +
46721 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
46722 + if (*flags & MF_PAX_SEGMEXEC)
46723 + {
46724 + *flags &= ~MF_PAX_SEGMEXEC;
46725 + retval = -EINVAL;
46726 + }
46727 +#endif
46728 +
46729 + if ((*flags & MF_PAX_PAGEEXEC)
46730 +
46731 +#ifdef CONFIG_PAX_PAGEEXEC
46732 + && (*flags & MF_PAX_SEGMEXEC)
46733 +#endif
46734 +
46735 + )
46736 + {
46737 + *flags &= ~MF_PAX_PAGEEXEC;
46738 + retval = -EINVAL;
46739 + }
46740 +
46741 + if ((*flags & MF_PAX_MPROTECT)
46742 +
46743 +#ifdef CONFIG_PAX_MPROTECT
46744 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
46745 +#endif
46746 +
46747 + )
46748 + {
46749 + *flags &= ~MF_PAX_MPROTECT;
46750 + retval = -EINVAL;
46751 + }
46752 +
46753 + if ((*flags & MF_PAX_EMUTRAMP)
46754 +
46755 +#ifdef CONFIG_PAX_EMUTRAMP
46756 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
46757 +#endif
46758 +
46759 + )
46760 + {
46761 + *flags &= ~MF_PAX_EMUTRAMP;
46762 + retval = -EINVAL;
46763 + }
46764 +
46765 + return retval;
46766 +}
46767 +
46768 +EXPORT_SYMBOL(pax_check_flags);
46769 +
46770 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46771 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
46772 +{
46773 + struct task_struct *tsk = current;
46774 + struct mm_struct *mm = current->mm;
46775 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
46776 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
46777 + char *path_exec = NULL;
46778 + char *path_fault = NULL;
46779 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
46780 + siginfo_t info = { };
46781 +
46782 + if (buffer_exec && buffer_fault) {
46783 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
46784 +
46785 + down_read(&mm->mmap_sem);
46786 + vma = mm->mmap;
46787 + while (vma && (!vma_exec || !vma_fault)) {
46788 + if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
46789 + vma_exec = vma;
46790 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
46791 + vma_fault = vma;
46792 + vma = vma->vm_next;
46793 + }
46794 + if (vma_exec) {
46795 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
46796 + if (IS_ERR(path_exec))
46797 + path_exec = "<path too long>";
46798 + else {
46799 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
46800 + if (path_exec) {
46801 + *path_exec = 0;
46802 + path_exec = buffer_exec;
46803 + } else
46804 + path_exec = "<path too long>";
46805 + }
46806 + }
46807 + if (vma_fault) {
46808 + start = vma_fault->vm_start;
46809 + end = vma_fault->vm_end;
46810 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
46811 + if (vma_fault->vm_file) {
46812 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
46813 + if (IS_ERR(path_fault))
46814 + path_fault = "<path too long>";
46815 + else {
46816 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
46817 + if (path_fault) {
46818 + *path_fault = 0;
46819 + path_fault = buffer_fault;
46820 + } else
46821 + path_fault = "<path too long>";
46822 + }
46823 + } else
46824 + path_fault = "<anonymous mapping>";
46825 + }
46826 + up_read(&mm->mmap_sem);
46827 + }
46828 + if (tsk->signal->curr_ip)
46829 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
46830 + else
46831 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
46832 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
46833 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
46834 + task_uid(tsk), task_euid(tsk), pc, sp);
46835 + free_page((unsigned long)buffer_exec);
46836 + free_page((unsigned long)buffer_fault);
46837 + pax_report_insns(regs, pc, sp);
46838 + info.si_signo = SIGKILL;
46839 + info.si_errno = 0;
46840 + info.si_code = SI_KERNEL;
46841 + info.si_pid = 0;
46842 + info.si_uid = 0;
46843 + do_coredump(&info, regs);
46844 +}
46845 +#endif
46846 +
46847 +#ifdef CONFIG_PAX_REFCOUNT
46848 +void pax_report_refcount_overflow(struct pt_regs *regs)
46849 +{
46850 + if (current->signal->curr_ip)
46851 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
46852 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
46853 + else
46854 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
46855 + current->comm, task_pid_nr(current), current_uid(), current_euid());
46856 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
46857 + show_regs(regs);
46858 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
46859 +}
46860 +#endif
46861 +
46862 +#ifdef CONFIG_PAX_USERCOPY
46863 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
46864 +static noinline int check_stack_object(const void *obj, unsigned long len)
46865 +{
46866 + const void * const stack = task_stack_page(current);
46867 + const void * const stackend = stack + THREAD_SIZE;
46868 +
46869 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
46870 + const void *frame = NULL;
46871 + const void *oldframe;
46872 +#endif
46873 +
46874 + if (obj + len < obj)
46875 + return -1;
46876 +
46877 + if (obj + len <= stack || stackend <= obj)
46878 + return 0;
46879 +
46880 + if (obj < stack || stackend < obj + len)
46881 + return -1;
46882 +
46883 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
46884 + oldframe = __builtin_frame_address(1);
46885 + if (oldframe)
46886 + frame = __builtin_frame_address(2);
46887 + /*
46888 + low ----------------------------------------------> high
46889 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
46890 + ^----------------^
46891 + allow copies only within here
46892 + */
46893 + while (stack <= frame && frame < stackend) {
46894 + /* if obj + len extends past the last frame, this
46895 + check won't pass and the next frame will be 0,
46896 + causing us to bail out and correctly report
46897 + the copy as invalid
46898 + */
46899 + if (obj + len <= frame)
46900 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
46901 + oldframe = frame;
46902 + frame = *(const void * const *)frame;
46903 + }
46904 + return -1;
46905 +#else
46906 + return 1;
46907 +#endif
46908 +}
46909 +
46910 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
46911 +{
46912 + if (current->signal->curr_ip)
46913 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
46914 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
46915 + else
46916 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
46917 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
46918 + dump_stack();
46919 + gr_handle_kernel_exploit();
46920 + do_group_exit(SIGKILL);
46921 +}
46922 +#endif
46923 +
46924 +void check_object_size(const void *ptr, unsigned long n, bool to)
46925 +{
46926 +
46927 +#ifdef CONFIG_PAX_USERCOPY
46928 + const char *type;
46929 +
46930 + if (!n)
46931 + return;
46932 +
46933 + type = check_heap_object(ptr, n);
46934 + if (!type) {
46935 + if (check_stack_object(ptr, n) != -1)
46936 + return;
46937 + type = "<process stack>";
46938 + }
46939 +
46940 + pax_report_usercopy(ptr, n, to, type);
46941 +#endif
46942 +
46943 +}
46944 +EXPORT_SYMBOL(check_object_size);
46945 +
46946 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
46947 +void pax_track_stack(void)
46948 +{
46949 + unsigned long sp = (unsigned long)&sp;
46950 + if (sp < current_thread_info()->lowest_stack &&
46951 + sp > (unsigned long)task_stack_page(current))
46952 + current_thread_info()->lowest_stack = sp;
46953 +}
46954 +EXPORT_SYMBOL(pax_track_stack);
46955 +#endif
46956 +
46957 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
46958 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
46959 +{
46960 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
46961 + dump_stack();
46962 + do_group_exit(SIGKILL);
46963 +}
46964 +EXPORT_SYMBOL(report_size_overflow);
46965 +#endif
46966 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
46967 index 2616d0e..2ffdec9 100644
46968 --- a/fs/ext2/balloc.c
46969 +++ b/fs/ext2/balloc.c
46970 @@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
46971
46972 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
46973 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
46974 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
46975 + if (free_blocks < root_blocks + 1 &&
46976 !uid_eq(sbi->s_resuid, current_fsuid()) &&
46977 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
46978 - !in_group_p (sbi->s_resgid))) {
46979 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
46980 return 0;
46981 }
46982 return 1;
46983 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
46984 index 22548f5..41521d8 100644
46985 --- a/fs/ext3/balloc.c
46986 +++ b/fs/ext3/balloc.c
46987 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
46988
46989 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
46990 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
46991 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
46992 + if (free_blocks < root_blocks + 1 &&
46993 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
46994 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
46995 - !in_group_p (sbi->s_resgid))) {
46996 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
46997 return 0;
46998 }
46999 return 1;
47000 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
47001 index cf18217..8f6b9c3 100644
47002 --- a/fs/ext4/balloc.c
47003 +++ b/fs/ext4/balloc.c
47004 @@ -498,8 +498,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
47005 /* Hm, nope. Are (enough) root reserved clusters available? */
47006 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
47007 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
47008 - capable(CAP_SYS_RESOURCE) ||
47009 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
47010 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
47011 + capable_nolog(CAP_SYS_RESOURCE)) {
47012
47013 if (free_clusters >= (nclusters + dirty_clusters))
47014 return 1;
47015 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
47016 index 3c20de1..6ff2460 100644
47017 --- a/fs/ext4/ext4.h
47018 +++ b/fs/ext4/ext4.h
47019 @@ -1247,19 +1247,19 @@ struct ext4_sb_info {
47020 unsigned long s_mb_last_start;
47021
47022 /* stats for buddy allocator */
47023 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
47024 - atomic_t s_bal_success; /* we found long enough chunks */
47025 - atomic_t s_bal_allocated; /* in blocks */
47026 - atomic_t s_bal_ex_scanned; /* total extents scanned */
47027 - atomic_t s_bal_goals; /* goal hits */
47028 - atomic_t s_bal_breaks; /* too long searches */
47029 - atomic_t s_bal_2orders; /* 2^order hits */
47030 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
47031 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
47032 + atomic_unchecked_t s_bal_allocated; /* in blocks */
47033 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
47034 + atomic_unchecked_t s_bal_goals; /* goal hits */
47035 + atomic_unchecked_t s_bal_breaks; /* too long searches */
47036 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
47037 spinlock_t s_bal_lock;
47038 unsigned long s_mb_buddies_generated;
47039 unsigned long long s_mb_generation_time;
47040 - atomic_t s_mb_lost_chunks;
47041 - atomic_t s_mb_preallocated;
47042 - atomic_t s_mb_discarded;
47043 + atomic_unchecked_t s_mb_lost_chunks;
47044 + atomic_unchecked_t s_mb_preallocated;
47045 + atomic_unchecked_t s_mb_discarded;
47046 atomic_t s_lock_busy;
47047
47048 /* locality groups */
47049 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
47050 index 3a100e7..c7efa88 100644
47051 --- a/fs/ext4/ialloc.c
47052 +++ b/fs/ext4/ialloc.c
47053 @@ -762,7 +762,6 @@ got:
47054
47055 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
47056 err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
47057 - brelse(block_bitmap_bh);
47058
47059 /* recheck and clear flag under lock if we still need to */
47060 ext4_lock_group(sb, group);
47061 @@ -775,6 +774,7 @@ got:
47062 ext4_group_desc_csum_set(sb, group, gdp);
47063 }
47064 ext4_unlock_group(sb, group);
47065 + brelse(block_bitmap_bh);
47066
47067 if (err)
47068 goto fail;
47069 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
47070 index b3c243b..772c318 100644
47071 --- a/fs/ext4/inode.c
47072 +++ b/fs/ext4/inode.c
47073 @@ -1503,6 +1503,7 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
47074
47075 index = mpd->first_page;
47076 end = mpd->next_page - 1;
47077 + pagevec_init(&pvec, 0);
47078 while (index <= end) {
47079 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
47080 if (nr_pages == 0)
47081 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
47082 index 526e553..3f2de85 100644
47083 --- a/fs/ext4/mballoc.c
47084 +++ b/fs/ext4/mballoc.c
47085 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
47086 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
47087
47088 if (EXT4_SB(sb)->s_mb_stats)
47089 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
47090 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
47091
47092 break;
47093 }
47094 @@ -2044,7 +2044,7 @@ repeat:
47095 ac->ac_status = AC_STATUS_CONTINUE;
47096 ac->ac_flags |= EXT4_MB_HINT_FIRST;
47097 cr = 3;
47098 - atomic_inc(&sbi->s_mb_lost_chunks);
47099 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
47100 goto repeat;
47101 }
47102 }
47103 @@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
47104 if (sbi->s_mb_stats) {
47105 ext4_msg(sb, KERN_INFO,
47106 "mballoc: %u blocks %u reqs (%u success)",
47107 - atomic_read(&sbi->s_bal_allocated),
47108 - atomic_read(&sbi->s_bal_reqs),
47109 - atomic_read(&sbi->s_bal_success));
47110 + atomic_read_unchecked(&sbi->s_bal_allocated),
47111 + atomic_read_unchecked(&sbi->s_bal_reqs),
47112 + atomic_read_unchecked(&sbi->s_bal_success));
47113 ext4_msg(sb, KERN_INFO,
47114 "mballoc: %u extents scanned, %u goal hits, "
47115 "%u 2^N hits, %u breaks, %u lost",
47116 - atomic_read(&sbi->s_bal_ex_scanned),
47117 - atomic_read(&sbi->s_bal_goals),
47118 - atomic_read(&sbi->s_bal_2orders),
47119 - atomic_read(&sbi->s_bal_breaks),
47120 - atomic_read(&sbi->s_mb_lost_chunks));
47121 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
47122 + atomic_read_unchecked(&sbi->s_bal_goals),
47123 + atomic_read_unchecked(&sbi->s_bal_2orders),
47124 + atomic_read_unchecked(&sbi->s_bal_breaks),
47125 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
47126 ext4_msg(sb, KERN_INFO,
47127 "mballoc: %lu generated and it took %Lu",
47128 sbi->s_mb_buddies_generated,
47129 sbi->s_mb_generation_time);
47130 ext4_msg(sb, KERN_INFO,
47131 "mballoc: %u preallocated, %u discarded",
47132 - atomic_read(&sbi->s_mb_preallocated),
47133 - atomic_read(&sbi->s_mb_discarded));
47134 + atomic_read_unchecked(&sbi->s_mb_preallocated),
47135 + atomic_read_unchecked(&sbi->s_mb_discarded));
47136 }
47137
47138 free_percpu(sbi->s_locality_groups);
47139 @@ -3052,16 +3052,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
47140 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
47141
47142 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
47143 - atomic_inc(&sbi->s_bal_reqs);
47144 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47145 + atomic_inc_unchecked(&sbi->s_bal_reqs);
47146 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47147 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
47148 - atomic_inc(&sbi->s_bal_success);
47149 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
47150 + atomic_inc_unchecked(&sbi->s_bal_success);
47151 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
47152 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
47153 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
47154 - atomic_inc(&sbi->s_bal_goals);
47155 + atomic_inc_unchecked(&sbi->s_bal_goals);
47156 if (ac->ac_found > sbi->s_mb_max_to_scan)
47157 - atomic_inc(&sbi->s_bal_breaks);
47158 + atomic_inc_unchecked(&sbi->s_bal_breaks);
47159 }
47160
47161 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
47162 @@ -3461,7 +3461,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
47163 trace_ext4_mb_new_inode_pa(ac, pa);
47164
47165 ext4_mb_use_inode_pa(ac, pa);
47166 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
47167 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
47168
47169 ei = EXT4_I(ac->ac_inode);
47170 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47171 @@ -3521,7 +3521,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
47172 trace_ext4_mb_new_group_pa(ac, pa);
47173
47174 ext4_mb_use_group_pa(ac, pa);
47175 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47176 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47177
47178 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47179 lg = ac->ac_lg;
47180 @@ -3610,7 +3610,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
47181 * from the bitmap and continue.
47182 */
47183 }
47184 - atomic_add(free, &sbi->s_mb_discarded);
47185 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
47186
47187 return err;
47188 }
47189 @@ -3628,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
47190 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
47191 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
47192 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
47193 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47194 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47195 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
47196
47197 return 0;
47198 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
47199 index 80928f7..c3f2b1f 100644
47200 --- a/fs/ext4/super.c
47201 +++ b/fs/ext4/super.c
47202 @@ -3202,7 +3202,6 @@ int ext4_calculate_overhead(struct super_block *sb)
47203 ext4_fsblk_t overhead = 0;
47204 char *buf = (char *) get_zeroed_page(GFP_KERNEL);
47205
47206 - memset(buf, 0, PAGE_SIZE);
47207 if (!buf)
47208 return -ENOMEM;
47209
47210 diff --git a/fs/fcntl.c b/fs/fcntl.c
47211 index 71a600a..20d87b1 100644
47212 --- a/fs/fcntl.c
47213 +++ b/fs/fcntl.c
47214 @@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
47215 if (err)
47216 return err;
47217
47218 + if (gr_handle_chroot_fowner(pid, type))
47219 + return -ENOENT;
47220 + if (gr_check_protected_task_fowner(pid, type))
47221 + return -EACCES;
47222 +
47223 f_modown(filp, pid, type, force);
47224 return 0;
47225 }
47226 diff --git a/fs/fifo.c b/fs/fifo.c
47227 index cf6f434..3d7942c 100644
47228 --- a/fs/fifo.c
47229 +++ b/fs/fifo.c
47230 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
47231 */
47232 filp->f_op = &read_pipefifo_fops;
47233 pipe->r_counter++;
47234 - if (pipe->readers++ == 0)
47235 + if (atomic_inc_return(&pipe->readers) == 1)
47236 wake_up_partner(inode);
47237
47238 - if (!pipe->writers) {
47239 + if (!atomic_read(&pipe->writers)) {
47240 if ((filp->f_flags & O_NONBLOCK)) {
47241 /* suppress POLLHUP until we have
47242 * seen a writer */
47243 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
47244 * errno=ENXIO when there is no process reading the FIFO.
47245 */
47246 ret = -ENXIO;
47247 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
47248 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
47249 goto err;
47250
47251 filp->f_op = &write_pipefifo_fops;
47252 pipe->w_counter++;
47253 - if (!pipe->writers++)
47254 + if (atomic_inc_return(&pipe->writers) == 1)
47255 wake_up_partner(inode);
47256
47257 - if (!pipe->readers) {
47258 + if (!atomic_read(&pipe->readers)) {
47259 if (wait_for_partner(inode, &pipe->r_counter))
47260 goto err_wr;
47261 }
47262 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
47263 */
47264 filp->f_op = &rdwr_pipefifo_fops;
47265
47266 - pipe->readers++;
47267 - pipe->writers++;
47268 + atomic_inc(&pipe->readers);
47269 + atomic_inc(&pipe->writers);
47270 pipe->r_counter++;
47271 pipe->w_counter++;
47272 - if (pipe->readers == 1 || pipe->writers == 1)
47273 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
47274 wake_up_partner(inode);
47275 break;
47276
47277 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
47278 return 0;
47279
47280 err_rd:
47281 - if (!--pipe->readers)
47282 + if (atomic_dec_and_test(&pipe->readers))
47283 wake_up_interruptible(&pipe->wait);
47284 ret = -ERESTARTSYS;
47285 goto err;
47286
47287 err_wr:
47288 - if (!--pipe->writers)
47289 + if (atomic_dec_and_test(&pipe->writers))
47290 wake_up_interruptible(&pipe->wait);
47291 ret = -ERESTARTSYS;
47292 goto err;
47293
47294 err:
47295 - if (!pipe->readers && !pipe->writers)
47296 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
47297 free_pipe_info(inode);
47298
47299 err_nocleanup:
47300 diff --git a/fs/file.c b/fs/file.c
47301 index eff2316..8c8930c 100644
47302 --- a/fs/file.c
47303 +++ b/fs/file.c
47304 @@ -16,6 +16,7 @@
47305 #include <linux/slab.h>
47306 #include <linux/vmalloc.h>
47307 #include <linux/file.h>
47308 +#include <linux/security.h>
47309 #include <linux/fdtable.h>
47310 #include <linux/bitops.h>
47311 #include <linux/interrupt.h>
47312 @@ -898,6 +899,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
47313 if (!file)
47314 return __close_fd(files, fd);
47315
47316 + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
47317 if (fd >= rlimit(RLIMIT_NOFILE))
47318 return -EBADF;
47319
47320 @@ -924,6 +926,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
47321 if (unlikely(oldfd == newfd))
47322 return -EINVAL;
47323
47324 + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
47325 if (newfd >= rlimit(RLIMIT_NOFILE))
47326 return -EBADF;
47327
47328 @@ -979,6 +982,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
47329 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
47330 {
47331 int err;
47332 + gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
47333 if (from >= rlimit(RLIMIT_NOFILE))
47334 return -EINVAL;
47335 err = alloc_fd(from, flags);
47336 diff --git a/fs/filesystems.c b/fs/filesystems.c
47337 index da165f6..3671bdb 100644
47338 --- a/fs/filesystems.c
47339 +++ b/fs/filesystems.c
47340 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
47341 int len = dot ? dot - name : strlen(name);
47342
47343 fs = __get_fs_type(name, len);
47344 +
47345 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
47346 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
47347 +#else
47348 if (!fs && (request_module("%.*s", len, name) == 0))
47349 +#endif
47350 fs = __get_fs_type(name, len);
47351
47352 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
47353 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
47354 index 5df4775..f656176 100644
47355 --- a/fs/fs_struct.c
47356 +++ b/fs/fs_struct.c
47357 @@ -4,6 +4,7 @@
47358 #include <linux/path.h>
47359 #include <linux/slab.h>
47360 #include <linux/fs_struct.h>
47361 +#include <linux/grsecurity.h>
47362 #include "internal.h"
47363
47364 /*
47365 @@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
47366 write_seqcount_begin(&fs->seq);
47367 old_root = fs->root;
47368 fs->root = *path;
47369 + gr_set_chroot_entries(current, path);
47370 write_seqcount_end(&fs->seq);
47371 spin_unlock(&fs->lock);
47372 if (old_root.dentry)
47373 @@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
47374 return 1;
47375 }
47376
47377 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
47378 +{
47379 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
47380 + return 0;
47381 + *p = *new;
47382 +
47383 + /* This function is only called from pivot_root(). Leave our
47384 + gr_chroot_dentry and is_chrooted flags as-is, so that a
47385 + pivoted root isn't treated as a chroot
47386 + */
47387 + //gr_set_chroot_entries(task, new);
47388 +
47389 + return 1;
47390 +}
47391 +
47392 void chroot_fs_refs(struct path *old_root, struct path *new_root)
47393 {
47394 struct task_struct *g, *p;
47395 @@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
47396 int hits = 0;
47397 spin_lock(&fs->lock);
47398 write_seqcount_begin(&fs->seq);
47399 - hits += replace_path(&fs->root, old_root, new_root);
47400 + hits += replace_root_path(p, &fs->root, old_root, new_root);
47401 hits += replace_path(&fs->pwd, old_root, new_root);
47402 write_seqcount_end(&fs->seq);
47403 while (hits--) {
47404 @@ -94,12 +111,15 @@ void exit_fs(struct task_struct *tsk)
47405 {
47406 struct fs_struct *fs = tsk->fs;
47407
47408 + gr_put_exec_file(tsk);
47409 +
47410 if (fs) {
47411 int kill;
47412 task_lock(tsk);
47413 spin_lock(&fs->lock);
47414 tsk->fs = NULL;
47415 - kill = !--fs->users;
47416 + gr_clear_chroot_entries(tsk);
47417 + kill = !atomic_dec_return(&fs->users);
47418 spin_unlock(&fs->lock);
47419 task_unlock(tsk);
47420 if (kill)
47421 @@ -112,7 +132,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47422 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
47423 /* We don't need to lock fs - think why ;-) */
47424 if (fs) {
47425 - fs->users = 1;
47426 + atomic_set(&fs->users, 1);
47427 fs->in_exec = 0;
47428 spin_lock_init(&fs->lock);
47429 seqcount_init(&fs->seq);
47430 @@ -121,6 +141,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47431 spin_lock(&old->lock);
47432 fs->root = old->root;
47433 path_get(&fs->root);
47434 + /* instead of calling gr_set_chroot_entries here,
47435 + we call it from every caller of this function
47436 + */
47437 fs->pwd = old->pwd;
47438 path_get(&fs->pwd);
47439 spin_unlock(&old->lock);
47440 @@ -139,8 +162,9 @@ int unshare_fs_struct(void)
47441
47442 task_lock(current);
47443 spin_lock(&fs->lock);
47444 - kill = !--fs->users;
47445 + kill = !atomic_dec_return(&fs->users);
47446 current->fs = new_fs;
47447 + gr_set_chroot_entries(current, &new_fs->root);
47448 spin_unlock(&fs->lock);
47449 task_unlock(current);
47450
47451 @@ -153,13 +177,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
47452
47453 int current_umask(void)
47454 {
47455 - return current->fs->umask;
47456 + return current->fs->umask | gr_acl_umask();
47457 }
47458 EXPORT_SYMBOL(current_umask);
47459
47460 /* to be mentioned only in INIT_TASK */
47461 struct fs_struct init_fs = {
47462 - .users = 1,
47463 + .users = ATOMIC_INIT(1),
47464 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
47465 .seq = SEQCNT_ZERO,
47466 .umask = 0022,
47467 @@ -169,18 +193,21 @@ void daemonize_fs_struct(void)
47468 {
47469 struct fs_struct *fs = current->fs;
47470
47471 + gr_put_exec_file(current);
47472 +
47473 if (fs) {
47474 int kill;
47475
47476 task_lock(current);
47477
47478 spin_lock(&init_fs.lock);
47479 - init_fs.users++;
47480 + atomic_inc(&init_fs.users);
47481 spin_unlock(&init_fs.lock);
47482
47483 spin_lock(&fs->lock);
47484 current->fs = &init_fs;
47485 - kill = !--fs->users;
47486 + gr_set_chroot_entries(current, &current->fs->root);
47487 + kill = !atomic_dec_return(&fs->users);
47488 spin_unlock(&fs->lock);
47489
47490 task_unlock(current);
47491 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
47492 index 9905350..02eaec4 100644
47493 --- a/fs/fscache/cookie.c
47494 +++ b/fs/fscache/cookie.c
47495 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
47496 parent ? (char *) parent->def->name : "<no-parent>",
47497 def->name, netfs_data);
47498
47499 - fscache_stat(&fscache_n_acquires);
47500 + fscache_stat_unchecked(&fscache_n_acquires);
47501
47502 /* if there's no parent cookie, then we don't create one here either */
47503 if (!parent) {
47504 - fscache_stat(&fscache_n_acquires_null);
47505 + fscache_stat_unchecked(&fscache_n_acquires_null);
47506 _leave(" [no parent]");
47507 return NULL;
47508 }
47509 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
47510 /* allocate and initialise a cookie */
47511 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
47512 if (!cookie) {
47513 - fscache_stat(&fscache_n_acquires_oom);
47514 + fscache_stat_unchecked(&fscache_n_acquires_oom);
47515 _leave(" [ENOMEM]");
47516 return NULL;
47517 }
47518 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47519
47520 switch (cookie->def->type) {
47521 case FSCACHE_COOKIE_TYPE_INDEX:
47522 - fscache_stat(&fscache_n_cookie_index);
47523 + fscache_stat_unchecked(&fscache_n_cookie_index);
47524 break;
47525 case FSCACHE_COOKIE_TYPE_DATAFILE:
47526 - fscache_stat(&fscache_n_cookie_data);
47527 + fscache_stat_unchecked(&fscache_n_cookie_data);
47528 break;
47529 default:
47530 - fscache_stat(&fscache_n_cookie_special);
47531 + fscache_stat_unchecked(&fscache_n_cookie_special);
47532 break;
47533 }
47534
47535 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47536 if (fscache_acquire_non_index_cookie(cookie) < 0) {
47537 atomic_dec(&parent->n_children);
47538 __fscache_cookie_put(cookie);
47539 - fscache_stat(&fscache_n_acquires_nobufs);
47540 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
47541 _leave(" = NULL");
47542 return NULL;
47543 }
47544 }
47545
47546 - fscache_stat(&fscache_n_acquires_ok);
47547 + fscache_stat_unchecked(&fscache_n_acquires_ok);
47548 _leave(" = %p", cookie);
47549 return cookie;
47550 }
47551 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
47552 cache = fscache_select_cache_for_object(cookie->parent);
47553 if (!cache) {
47554 up_read(&fscache_addremove_sem);
47555 - fscache_stat(&fscache_n_acquires_no_cache);
47556 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
47557 _leave(" = -ENOMEDIUM [no cache]");
47558 return -ENOMEDIUM;
47559 }
47560 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
47561 object = cache->ops->alloc_object(cache, cookie);
47562 fscache_stat_d(&fscache_n_cop_alloc_object);
47563 if (IS_ERR(object)) {
47564 - fscache_stat(&fscache_n_object_no_alloc);
47565 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
47566 ret = PTR_ERR(object);
47567 goto error;
47568 }
47569
47570 - fscache_stat(&fscache_n_object_alloc);
47571 + fscache_stat_unchecked(&fscache_n_object_alloc);
47572
47573 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
47574
47575 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
47576 struct fscache_object *object;
47577 struct hlist_node *_p;
47578
47579 - fscache_stat(&fscache_n_updates);
47580 + fscache_stat_unchecked(&fscache_n_updates);
47581
47582 if (!cookie) {
47583 - fscache_stat(&fscache_n_updates_null);
47584 + fscache_stat_unchecked(&fscache_n_updates_null);
47585 _leave(" [no cookie]");
47586 return;
47587 }
47588 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47589 struct fscache_object *object;
47590 unsigned long event;
47591
47592 - fscache_stat(&fscache_n_relinquishes);
47593 + fscache_stat_unchecked(&fscache_n_relinquishes);
47594 if (retire)
47595 - fscache_stat(&fscache_n_relinquishes_retire);
47596 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
47597
47598 if (!cookie) {
47599 - fscache_stat(&fscache_n_relinquishes_null);
47600 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
47601 _leave(" [no cookie]");
47602 return;
47603 }
47604 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47605
47606 /* wait for the cookie to finish being instantiated (or to fail) */
47607 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
47608 - fscache_stat(&fscache_n_relinquishes_waitcrt);
47609 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
47610 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
47611 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
47612 }
47613 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
47614 index f6aad48..88dcf26 100644
47615 --- a/fs/fscache/internal.h
47616 +++ b/fs/fscache/internal.h
47617 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
47618 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
47619 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
47620
47621 -extern atomic_t fscache_n_op_pend;
47622 -extern atomic_t fscache_n_op_run;
47623 -extern atomic_t fscache_n_op_enqueue;
47624 -extern atomic_t fscache_n_op_deferred_release;
47625 -extern atomic_t fscache_n_op_release;
47626 -extern atomic_t fscache_n_op_gc;
47627 -extern atomic_t fscache_n_op_cancelled;
47628 -extern atomic_t fscache_n_op_rejected;
47629 +extern atomic_unchecked_t fscache_n_op_pend;
47630 +extern atomic_unchecked_t fscache_n_op_run;
47631 +extern atomic_unchecked_t fscache_n_op_enqueue;
47632 +extern atomic_unchecked_t fscache_n_op_deferred_release;
47633 +extern atomic_unchecked_t fscache_n_op_release;
47634 +extern atomic_unchecked_t fscache_n_op_gc;
47635 +extern atomic_unchecked_t fscache_n_op_cancelled;
47636 +extern atomic_unchecked_t fscache_n_op_rejected;
47637
47638 -extern atomic_t fscache_n_attr_changed;
47639 -extern atomic_t fscache_n_attr_changed_ok;
47640 -extern atomic_t fscache_n_attr_changed_nobufs;
47641 -extern atomic_t fscache_n_attr_changed_nomem;
47642 -extern atomic_t fscache_n_attr_changed_calls;
47643 +extern atomic_unchecked_t fscache_n_attr_changed;
47644 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
47645 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
47646 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
47647 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
47648
47649 -extern atomic_t fscache_n_allocs;
47650 -extern atomic_t fscache_n_allocs_ok;
47651 -extern atomic_t fscache_n_allocs_wait;
47652 -extern atomic_t fscache_n_allocs_nobufs;
47653 -extern atomic_t fscache_n_allocs_intr;
47654 -extern atomic_t fscache_n_allocs_object_dead;
47655 -extern atomic_t fscache_n_alloc_ops;
47656 -extern atomic_t fscache_n_alloc_op_waits;
47657 +extern atomic_unchecked_t fscache_n_allocs;
47658 +extern atomic_unchecked_t fscache_n_allocs_ok;
47659 +extern atomic_unchecked_t fscache_n_allocs_wait;
47660 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
47661 +extern atomic_unchecked_t fscache_n_allocs_intr;
47662 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
47663 +extern atomic_unchecked_t fscache_n_alloc_ops;
47664 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
47665
47666 -extern atomic_t fscache_n_retrievals;
47667 -extern atomic_t fscache_n_retrievals_ok;
47668 -extern atomic_t fscache_n_retrievals_wait;
47669 -extern atomic_t fscache_n_retrievals_nodata;
47670 -extern atomic_t fscache_n_retrievals_nobufs;
47671 -extern atomic_t fscache_n_retrievals_intr;
47672 -extern atomic_t fscache_n_retrievals_nomem;
47673 -extern atomic_t fscache_n_retrievals_object_dead;
47674 -extern atomic_t fscache_n_retrieval_ops;
47675 -extern atomic_t fscache_n_retrieval_op_waits;
47676 +extern atomic_unchecked_t fscache_n_retrievals;
47677 +extern atomic_unchecked_t fscache_n_retrievals_ok;
47678 +extern atomic_unchecked_t fscache_n_retrievals_wait;
47679 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
47680 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
47681 +extern atomic_unchecked_t fscache_n_retrievals_intr;
47682 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
47683 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
47684 +extern atomic_unchecked_t fscache_n_retrieval_ops;
47685 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
47686
47687 -extern atomic_t fscache_n_stores;
47688 -extern atomic_t fscache_n_stores_ok;
47689 -extern atomic_t fscache_n_stores_again;
47690 -extern atomic_t fscache_n_stores_nobufs;
47691 -extern atomic_t fscache_n_stores_oom;
47692 -extern atomic_t fscache_n_store_ops;
47693 -extern atomic_t fscache_n_store_calls;
47694 -extern atomic_t fscache_n_store_pages;
47695 -extern atomic_t fscache_n_store_radix_deletes;
47696 -extern atomic_t fscache_n_store_pages_over_limit;
47697 +extern atomic_unchecked_t fscache_n_stores;
47698 +extern atomic_unchecked_t fscache_n_stores_ok;
47699 +extern atomic_unchecked_t fscache_n_stores_again;
47700 +extern atomic_unchecked_t fscache_n_stores_nobufs;
47701 +extern atomic_unchecked_t fscache_n_stores_oom;
47702 +extern atomic_unchecked_t fscache_n_store_ops;
47703 +extern atomic_unchecked_t fscache_n_store_calls;
47704 +extern atomic_unchecked_t fscache_n_store_pages;
47705 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
47706 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
47707
47708 -extern atomic_t fscache_n_store_vmscan_not_storing;
47709 -extern atomic_t fscache_n_store_vmscan_gone;
47710 -extern atomic_t fscache_n_store_vmscan_busy;
47711 -extern atomic_t fscache_n_store_vmscan_cancelled;
47712 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
47713 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
47714 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
47715 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
47716
47717 -extern atomic_t fscache_n_marks;
47718 -extern atomic_t fscache_n_uncaches;
47719 +extern atomic_unchecked_t fscache_n_marks;
47720 +extern atomic_unchecked_t fscache_n_uncaches;
47721
47722 -extern atomic_t fscache_n_acquires;
47723 -extern atomic_t fscache_n_acquires_null;
47724 -extern atomic_t fscache_n_acquires_no_cache;
47725 -extern atomic_t fscache_n_acquires_ok;
47726 -extern atomic_t fscache_n_acquires_nobufs;
47727 -extern atomic_t fscache_n_acquires_oom;
47728 +extern atomic_unchecked_t fscache_n_acquires;
47729 +extern atomic_unchecked_t fscache_n_acquires_null;
47730 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
47731 +extern atomic_unchecked_t fscache_n_acquires_ok;
47732 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
47733 +extern atomic_unchecked_t fscache_n_acquires_oom;
47734
47735 -extern atomic_t fscache_n_updates;
47736 -extern atomic_t fscache_n_updates_null;
47737 -extern atomic_t fscache_n_updates_run;
47738 +extern atomic_unchecked_t fscache_n_updates;
47739 +extern atomic_unchecked_t fscache_n_updates_null;
47740 +extern atomic_unchecked_t fscache_n_updates_run;
47741
47742 -extern atomic_t fscache_n_relinquishes;
47743 -extern atomic_t fscache_n_relinquishes_null;
47744 -extern atomic_t fscache_n_relinquishes_waitcrt;
47745 -extern atomic_t fscache_n_relinquishes_retire;
47746 +extern atomic_unchecked_t fscache_n_relinquishes;
47747 +extern atomic_unchecked_t fscache_n_relinquishes_null;
47748 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47749 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
47750
47751 -extern atomic_t fscache_n_cookie_index;
47752 -extern atomic_t fscache_n_cookie_data;
47753 -extern atomic_t fscache_n_cookie_special;
47754 +extern atomic_unchecked_t fscache_n_cookie_index;
47755 +extern atomic_unchecked_t fscache_n_cookie_data;
47756 +extern atomic_unchecked_t fscache_n_cookie_special;
47757
47758 -extern atomic_t fscache_n_object_alloc;
47759 -extern atomic_t fscache_n_object_no_alloc;
47760 -extern atomic_t fscache_n_object_lookups;
47761 -extern atomic_t fscache_n_object_lookups_negative;
47762 -extern atomic_t fscache_n_object_lookups_positive;
47763 -extern atomic_t fscache_n_object_lookups_timed_out;
47764 -extern atomic_t fscache_n_object_created;
47765 -extern atomic_t fscache_n_object_avail;
47766 -extern atomic_t fscache_n_object_dead;
47767 +extern atomic_unchecked_t fscache_n_object_alloc;
47768 +extern atomic_unchecked_t fscache_n_object_no_alloc;
47769 +extern atomic_unchecked_t fscache_n_object_lookups;
47770 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
47771 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
47772 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
47773 +extern atomic_unchecked_t fscache_n_object_created;
47774 +extern atomic_unchecked_t fscache_n_object_avail;
47775 +extern atomic_unchecked_t fscache_n_object_dead;
47776
47777 -extern atomic_t fscache_n_checkaux_none;
47778 -extern atomic_t fscache_n_checkaux_okay;
47779 -extern atomic_t fscache_n_checkaux_update;
47780 -extern atomic_t fscache_n_checkaux_obsolete;
47781 +extern atomic_unchecked_t fscache_n_checkaux_none;
47782 +extern atomic_unchecked_t fscache_n_checkaux_okay;
47783 +extern atomic_unchecked_t fscache_n_checkaux_update;
47784 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
47785
47786 extern atomic_t fscache_n_cop_alloc_object;
47787 extern atomic_t fscache_n_cop_lookup_object;
47788 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
47789 atomic_inc(stat);
47790 }
47791
47792 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
47793 +{
47794 + atomic_inc_unchecked(stat);
47795 +}
47796 +
47797 static inline void fscache_stat_d(atomic_t *stat)
47798 {
47799 atomic_dec(stat);
47800 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
47801
47802 #define __fscache_stat(stat) (NULL)
47803 #define fscache_stat(stat) do {} while (0)
47804 +#define fscache_stat_unchecked(stat) do {} while (0)
47805 #define fscache_stat_d(stat) do {} while (0)
47806 #endif
47807
47808 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
47809 index b6b897c..0ffff9c 100644
47810 --- a/fs/fscache/object.c
47811 +++ b/fs/fscache/object.c
47812 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47813 /* update the object metadata on disk */
47814 case FSCACHE_OBJECT_UPDATING:
47815 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
47816 - fscache_stat(&fscache_n_updates_run);
47817 + fscache_stat_unchecked(&fscache_n_updates_run);
47818 fscache_stat(&fscache_n_cop_update_object);
47819 object->cache->ops->update_object(object);
47820 fscache_stat_d(&fscache_n_cop_update_object);
47821 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47822 spin_lock(&object->lock);
47823 object->state = FSCACHE_OBJECT_DEAD;
47824 spin_unlock(&object->lock);
47825 - fscache_stat(&fscache_n_object_dead);
47826 + fscache_stat_unchecked(&fscache_n_object_dead);
47827 goto terminal_transit;
47828
47829 /* handle the parent cache of this object being withdrawn from
47830 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
47831 spin_lock(&object->lock);
47832 object->state = FSCACHE_OBJECT_DEAD;
47833 spin_unlock(&object->lock);
47834 - fscache_stat(&fscache_n_object_dead);
47835 + fscache_stat_unchecked(&fscache_n_object_dead);
47836 goto terminal_transit;
47837
47838 /* complain about the object being woken up once it is
47839 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
47840 parent->cookie->def->name, cookie->def->name,
47841 object->cache->tag->name);
47842
47843 - fscache_stat(&fscache_n_object_lookups);
47844 + fscache_stat_unchecked(&fscache_n_object_lookups);
47845 fscache_stat(&fscache_n_cop_lookup_object);
47846 ret = object->cache->ops->lookup_object(object);
47847 fscache_stat_d(&fscache_n_cop_lookup_object);
47848 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
47849 if (ret == -ETIMEDOUT) {
47850 /* probably stuck behind another object, so move this one to
47851 * the back of the queue */
47852 - fscache_stat(&fscache_n_object_lookups_timed_out);
47853 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
47854 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
47855 }
47856
47857 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
47858
47859 spin_lock(&object->lock);
47860 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
47861 - fscache_stat(&fscache_n_object_lookups_negative);
47862 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
47863
47864 /* transit here to allow write requests to begin stacking up
47865 * and read requests to begin returning ENODATA */
47866 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
47867 * result, in which case there may be data available */
47868 spin_lock(&object->lock);
47869 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
47870 - fscache_stat(&fscache_n_object_lookups_positive);
47871 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
47872
47873 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
47874
47875 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
47876 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
47877 } else {
47878 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
47879 - fscache_stat(&fscache_n_object_created);
47880 + fscache_stat_unchecked(&fscache_n_object_created);
47881
47882 object->state = FSCACHE_OBJECT_AVAILABLE;
47883 spin_unlock(&object->lock);
47884 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
47885 fscache_enqueue_dependents(object);
47886
47887 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
47888 - fscache_stat(&fscache_n_object_avail);
47889 + fscache_stat_unchecked(&fscache_n_object_avail);
47890
47891 _leave("");
47892 }
47893 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
47894 enum fscache_checkaux result;
47895
47896 if (!object->cookie->def->check_aux) {
47897 - fscache_stat(&fscache_n_checkaux_none);
47898 + fscache_stat_unchecked(&fscache_n_checkaux_none);
47899 return FSCACHE_CHECKAUX_OKAY;
47900 }
47901
47902 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
47903 switch (result) {
47904 /* entry okay as is */
47905 case FSCACHE_CHECKAUX_OKAY:
47906 - fscache_stat(&fscache_n_checkaux_okay);
47907 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
47908 break;
47909
47910 /* entry requires update */
47911 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
47912 - fscache_stat(&fscache_n_checkaux_update);
47913 + fscache_stat_unchecked(&fscache_n_checkaux_update);
47914 break;
47915
47916 /* entry requires deletion */
47917 case FSCACHE_CHECKAUX_OBSOLETE:
47918 - fscache_stat(&fscache_n_checkaux_obsolete);
47919 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
47920 break;
47921
47922 default:
47923 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
47924 index 30afdfa..2256596 100644
47925 --- a/fs/fscache/operation.c
47926 +++ b/fs/fscache/operation.c
47927 @@ -17,7 +17,7 @@
47928 #include <linux/slab.h>
47929 #include "internal.h"
47930
47931 -atomic_t fscache_op_debug_id;
47932 +atomic_unchecked_t fscache_op_debug_id;
47933 EXPORT_SYMBOL(fscache_op_debug_id);
47934
47935 /**
47936 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
47937 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
47938 ASSERTCMP(atomic_read(&op->usage), >, 0);
47939
47940 - fscache_stat(&fscache_n_op_enqueue);
47941 + fscache_stat_unchecked(&fscache_n_op_enqueue);
47942 switch (op->flags & FSCACHE_OP_TYPE) {
47943 case FSCACHE_OP_ASYNC:
47944 _debug("queue async");
47945 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
47946 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
47947 if (op->processor)
47948 fscache_enqueue_operation(op);
47949 - fscache_stat(&fscache_n_op_run);
47950 + fscache_stat_unchecked(&fscache_n_op_run);
47951 }
47952
47953 /*
47954 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
47955 if (object->n_ops > 1) {
47956 atomic_inc(&op->usage);
47957 list_add_tail(&op->pend_link, &object->pending_ops);
47958 - fscache_stat(&fscache_n_op_pend);
47959 + fscache_stat_unchecked(&fscache_n_op_pend);
47960 } else if (!list_empty(&object->pending_ops)) {
47961 atomic_inc(&op->usage);
47962 list_add_tail(&op->pend_link, &object->pending_ops);
47963 - fscache_stat(&fscache_n_op_pend);
47964 + fscache_stat_unchecked(&fscache_n_op_pend);
47965 fscache_start_operations(object);
47966 } else {
47967 ASSERTCMP(object->n_in_progress, ==, 0);
47968 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
47969 object->n_exclusive++; /* reads and writes must wait */
47970 atomic_inc(&op->usage);
47971 list_add_tail(&op->pend_link, &object->pending_ops);
47972 - fscache_stat(&fscache_n_op_pend);
47973 + fscache_stat_unchecked(&fscache_n_op_pend);
47974 ret = 0;
47975 } else {
47976 /* not allowed to submit ops in any other state */
47977 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
47978 if (object->n_exclusive > 0) {
47979 atomic_inc(&op->usage);
47980 list_add_tail(&op->pend_link, &object->pending_ops);
47981 - fscache_stat(&fscache_n_op_pend);
47982 + fscache_stat_unchecked(&fscache_n_op_pend);
47983 } else if (!list_empty(&object->pending_ops)) {
47984 atomic_inc(&op->usage);
47985 list_add_tail(&op->pend_link, &object->pending_ops);
47986 - fscache_stat(&fscache_n_op_pend);
47987 + fscache_stat_unchecked(&fscache_n_op_pend);
47988 fscache_start_operations(object);
47989 } else {
47990 ASSERTCMP(object->n_exclusive, ==, 0);
47991 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
47992 object->n_ops++;
47993 atomic_inc(&op->usage);
47994 list_add_tail(&op->pend_link, &object->pending_ops);
47995 - fscache_stat(&fscache_n_op_pend);
47996 + fscache_stat_unchecked(&fscache_n_op_pend);
47997 ret = 0;
47998 } else if (object->state == FSCACHE_OBJECT_DYING ||
47999 object->state == FSCACHE_OBJECT_LC_DYING ||
48000 object->state == FSCACHE_OBJECT_WITHDRAWING) {
48001 - fscache_stat(&fscache_n_op_rejected);
48002 + fscache_stat_unchecked(&fscache_n_op_rejected);
48003 ret = -ENOBUFS;
48004 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
48005 fscache_report_unexpected_submission(object, op, ostate);
48006 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
48007
48008 ret = -EBUSY;
48009 if (!list_empty(&op->pend_link)) {
48010 - fscache_stat(&fscache_n_op_cancelled);
48011 + fscache_stat_unchecked(&fscache_n_op_cancelled);
48012 list_del_init(&op->pend_link);
48013 object->n_ops--;
48014 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
48015 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
48016 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
48017 BUG();
48018
48019 - fscache_stat(&fscache_n_op_release);
48020 + fscache_stat_unchecked(&fscache_n_op_release);
48021
48022 if (op->release) {
48023 op->release(op);
48024 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
48025 * lock, and defer it otherwise */
48026 if (!spin_trylock(&object->lock)) {
48027 _debug("defer put");
48028 - fscache_stat(&fscache_n_op_deferred_release);
48029 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
48030
48031 cache = object->cache;
48032 spin_lock(&cache->op_gc_list_lock);
48033 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
48034
48035 _debug("GC DEFERRED REL OBJ%x OP%x",
48036 object->debug_id, op->debug_id);
48037 - fscache_stat(&fscache_n_op_gc);
48038 + fscache_stat_unchecked(&fscache_n_op_gc);
48039
48040 ASSERTCMP(atomic_read(&op->usage), ==, 0);
48041
48042 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
48043 index 3f7a59b..cf196cc 100644
48044 --- a/fs/fscache/page.c
48045 +++ b/fs/fscache/page.c
48046 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48047 val = radix_tree_lookup(&cookie->stores, page->index);
48048 if (!val) {
48049 rcu_read_unlock();
48050 - fscache_stat(&fscache_n_store_vmscan_not_storing);
48051 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
48052 __fscache_uncache_page(cookie, page);
48053 return true;
48054 }
48055 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48056 spin_unlock(&cookie->stores_lock);
48057
48058 if (xpage) {
48059 - fscache_stat(&fscache_n_store_vmscan_cancelled);
48060 - fscache_stat(&fscache_n_store_radix_deletes);
48061 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
48062 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48063 ASSERTCMP(xpage, ==, page);
48064 } else {
48065 - fscache_stat(&fscache_n_store_vmscan_gone);
48066 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
48067 }
48068
48069 wake_up_bit(&cookie->flags, 0);
48070 @@ -107,7 +107,7 @@ page_busy:
48071 /* we might want to wait here, but that could deadlock the allocator as
48072 * the work threads writing to the cache may all end up sleeping
48073 * on memory allocation */
48074 - fscache_stat(&fscache_n_store_vmscan_busy);
48075 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
48076 return false;
48077 }
48078 EXPORT_SYMBOL(__fscache_maybe_release_page);
48079 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
48080 FSCACHE_COOKIE_STORING_TAG);
48081 if (!radix_tree_tag_get(&cookie->stores, page->index,
48082 FSCACHE_COOKIE_PENDING_TAG)) {
48083 - fscache_stat(&fscache_n_store_radix_deletes);
48084 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48085 xpage = radix_tree_delete(&cookie->stores, page->index);
48086 }
48087 spin_unlock(&cookie->stores_lock);
48088 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
48089
48090 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
48091
48092 - fscache_stat(&fscache_n_attr_changed_calls);
48093 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
48094
48095 if (fscache_object_is_active(object)) {
48096 fscache_stat(&fscache_n_cop_attr_changed);
48097 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48098
48099 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48100
48101 - fscache_stat(&fscache_n_attr_changed);
48102 + fscache_stat_unchecked(&fscache_n_attr_changed);
48103
48104 op = kzalloc(sizeof(*op), GFP_KERNEL);
48105 if (!op) {
48106 - fscache_stat(&fscache_n_attr_changed_nomem);
48107 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
48108 _leave(" = -ENOMEM");
48109 return -ENOMEM;
48110 }
48111 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48112 if (fscache_submit_exclusive_op(object, op) < 0)
48113 goto nobufs;
48114 spin_unlock(&cookie->lock);
48115 - fscache_stat(&fscache_n_attr_changed_ok);
48116 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
48117 fscache_put_operation(op);
48118 _leave(" = 0");
48119 return 0;
48120 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48121 nobufs:
48122 spin_unlock(&cookie->lock);
48123 kfree(op);
48124 - fscache_stat(&fscache_n_attr_changed_nobufs);
48125 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
48126 _leave(" = %d", -ENOBUFS);
48127 return -ENOBUFS;
48128 }
48129 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
48130 /* allocate a retrieval operation and attempt to submit it */
48131 op = kzalloc(sizeof(*op), GFP_NOIO);
48132 if (!op) {
48133 - fscache_stat(&fscache_n_retrievals_nomem);
48134 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48135 return NULL;
48136 }
48137
48138 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48139 return 0;
48140 }
48141
48142 - fscache_stat(&fscache_n_retrievals_wait);
48143 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
48144
48145 jif = jiffies;
48146 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
48147 fscache_wait_bit_interruptible,
48148 TASK_INTERRUPTIBLE) != 0) {
48149 - fscache_stat(&fscache_n_retrievals_intr);
48150 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
48151 _leave(" = -ERESTARTSYS");
48152 return -ERESTARTSYS;
48153 }
48154 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48155 */
48156 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48157 struct fscache_retrieval *op,
48158 - atomic_t *stat_op_waits,
48159 - atomic_t *stat_object_dead)
48160 + atomic_unchecked_t *stat_op_waits,
48161 + atomic_unchecked_t *stat_object_dead)
48162 {
48163 int ret;
48164
48165 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48166 goto check_if_dead;
48167
48168 _debug(">>> WT");
48169 - fscache_stat(stat_op_waits);
48170 + fscache_stat_unchecked(stat_op_waits);
48171 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
48172 fscache_wait_bit_interruptible,
48173 TASK_INTERRUPTIBLE) < 0) {
48174 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48175
48176 check_if_dead:
48177 if (unlikely(fscache_object_is_dead(object))) {
48178 - fscache_stat(stat_object_dead);
48179 + fscache_stat_unchecked(stat_object_dead);
48180 return -ENOBUFS;
48181 }
48182 return 0;
48183 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48184
48185 _enter("%p,%p,,,", cookie, page);
48186
48187 - fscache_stat(&fscache_n_retrievals);
48188 + fscache_stat_unchecked(&fscache_n_retrievals);
48189
48190 if (hlist_empty(&cookie->backing_objects))
48191 goto nobufs;
48192 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48193 goto nobufs_unlock;
48194 spin_unlock(&cookie->lock);
48195
48196 - fscache_stat(&fscache_n_retrieval_ops);
48197 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
48198
48199 /* pin the netfs read context in case we need to do the actual netfs
48200 * read because we've encountered a cache read failure */
48201 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48202
48203 error:
48204 if (ret == -ENOMEM)
48205 - fscache_stat(&fscache_n_retrievals_nomem);
48206 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48207 else if (ret == -ERESTARTSYS)
48208 - fscache_stat(&fscache_n_retrievals_intr);
48209 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
48210 else if (ret == -ENODATA)
48211 - fscache_stat(&fscache_n_retrievals_nodata);
48212 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48213 else if (ret < 0)
48214 - fscache_stat(&fscache_n_retrievals_nobufs);
48215 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48216 else
48217 - fscache_stat(&fscache_n_retrievals_ok);
48218 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
48219
48220 fscache_put_retrieval(op);
48221 _leave(" = %d", ret);
48222 @@ -429,7 +429,7 @@ nobufs_unlock:
48223 spin_unlock(&cookie->lock);
48224 kfree(op);
48225 nobufs:
48226 - fscache_stat(&fscache_n_retrievals_nobufs);
48227 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48228 _leave(" = -ENOBUFS");
48229 return -ENOBUFS;
48230 }
48231 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48232
48233 _enter("%p,,%d,,,", cookie, *nr_pages);
48234
48235 - fscache_stat(&fscache_n_retrievals);
48236 + fscache_stat_unchecked(&fscache_n_retrievals);
48237
48238 if (hlist_empty(&cookie->backing_objects))
48239 goto nobufs;
48240 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48241 goto nobufs_unlock;
48242 spin_unlock(&cookie->lock);
48243
48244 - fscache_stat(&fscache_n_retrieval_ops);
48245 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
48246
48247 /* pin the netfs read context in case we need to do the actual netfs
48248 * read because we've encountered a cache read failure */
48249 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48250
48251 error:
48252 if (ret == -ENOMEM)
48253 - fscache_stat(&fscache_n_retrievals_nomem);
48254 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48255 else if (ret == -ERESTARTSYS)
48256 - fscache_stat(&fscache_n_retrievals_intr);
48257 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
48258 else if (ret == -ENODATA)
48259 - fscache_stat(&fscache_n_retrievals_nodata);
48260 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48261 else if (ret < 0)
48262 - fscache_stat(&fscache_n_retrievals_nobufs);
48263 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48264 else
48265 - fscache_stat(&fscache_n_retrievals_ok);
48266 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
48267
48268 fscache_put_retrieval(op);
48269 _leave(" = %d", ret);
48270 @@ -545,7 +545,7 @@ nobufs_unlock:
48271 spin_unlock(&cookie->lock);
48272 kfree(op);
48273 nobufs:
48274 - fscache_stat(&fscache_n_retrievals_nobufs);
48275 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48276 _leave(" = -ENOBUFS");
48277 return -ENOBUFS;
48278 }
48279 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48280
48281 _enter("%p,%p,,,", cookie, page);
48282
48283 - fscache_stat(&fscache_n_allocs);
48284 + fscache_stat_unchecked(&fscache_n_allocs);
48285
48286 if (hlist_empty(&cookie->backing_objects))
48287 goto nobufs;
48288 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48289 goto nobufs_unlock;
48290 spin_unlock(&cookie->lock);
48291
48292 - fscache_stat(&fscache_n_alloc_ops);
48293 + fscache_stat_unchecked(&fscache_n_alloc_ops);
48294
48295 ret = fscache_wait_for_retrieval_activation(
48296 object, op,
48297 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48298
48299 error:
48300 if (ret == -ERESTARTSYS)
48301 - fscache_stat(&fscache_n_allocs_intr);
48302 + fscache_stat_unchecked(&fscache_n_allocs_intr);
48303 else if (ret < 0)
48304 - fscache_stat(&fscache_n_allocs_nobufs);
48305 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48306 else
48307 - fscache_stat(&fscache_n_allocs_ok);
48308 + fscache_stat_unchecked(&fscache_n_allocs_ok);
48309
48310 fscache_put_retrieval(op);
48311 _leave(" = %d", ret);
48312 @@ -625,7 +625,7 @@ nobufs_unlock:
48313 spin_unlock(&cookie->lock);
48314 kfree(op);
48315 nobufs:
48316 - fscache_stat(&fscache_n_allocs_nobufs);
48317 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48318 _leave(" = -ENOBUFS");
48319 return -ENOBUFS;
48320 }
48321 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48322
48323 spin_lock(&cookie->stores_lock);
48324
48325 - fscache_stat(&fscache_n_store_calls);
48326 + fscache_stat_unchecked(&fscache_n_store_calls);
48327
48328 /* find a page to store */
48329 page = NULL;
48330 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48331 page = results[0];
48332 _debug("gang %d [%lx]", n, page->index);
48333 if (page->index > op->store_limit) {
48334 - fscache_stat(&fscache_n_store_pages_over_limit);
48335 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
48336 goto superseded;
48337 }
48338
48339 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48340 spin_unlock(&cookie->stores_lock);
48341 spin_unlock(&object->lock);
48342
48343 - fscache_stat(&fscache_n_store_pages);
48344 + fscache_stat_unchecked(&fscache_n_store_pages);
48345 fscache_stat(&fscache_n_cop_write_page);
48346 ret = object->cache->ops->write_page(op, page);
48347 fscache_stat_d(&fscache_n_cop_write_page);
48348 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48349 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48350 ASSERT(PageFsCache(page));
48351
48352 - fscache_stat(&fscache_n_stores);
48353 + fscache_stat_unchecked(&fscache_n_stores);
48354
48355 op = kzalloc(sizeof(*op), GFP_NOIO);
48356 if (!op)
48357 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48358 spin_unlock(&cookie->stores_lock);
48359 spin_unlock(&object->lock);
48360
48361 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
48362 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
48363 op->store_limit = object->store_limit;
48364
48365 if (fscache_submit_op(object, &op->op) < 0)
48366 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48367
48368 spin_unlock(&cookie->lock);
48369 radix_tree_preload_end();
48370 - fscache_stat(&fscache_n_store_ops);
48371 - fscache_stat(&fscache_n_stores_ok);
48372 + fscache_stat_unchecked(&fscache_n_store_ops);
48373 + fscache_stat_unchecked(&fscache_n_stores_ok);
48374
48375 /* the work queue now carries its own ref on the object */
48376 fscache_put_operation(&op->op);
48377 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48378 return 0;
48379
48380 already_queued:
48381 - fscache_stat(&fscache_n_stores_again);
48382 + fscache_stat_unchecked(&fscache_n_stores_again);
48383 already_pending:
48384 spin_unlock(&cookie->stores_lock);
48385 spin_unlock(&object->lock);
48386 spin_unlock(&cookie->lock);
48387 radix_tree_preload_end();
48388 kfree(op);
48389 - fscache_stat(&fscache_n_stores_ok);
48390 + fscache_stat_unchecked(&fscache_n_stores_ok);
48391 _leave(" = 0");
48392 return 0;
48393
48394 @@ -851,14 +851,14 @@ nobufs:
48395 spin_unlock(&cookie->lock);
48396 radix_tree_preload_end();
48397 kfree(op);
48398 - fscache_stat(&fscache_n_stores_nobufs);
48399 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
48400 _leave(" = -ENOBUFS");
48401 return -ENOBUFS;
48402
48403 nomem_free:
48404 kfree(op);
48405 nomem:
48406 - fscache_stat(&fscache_n_stores_oom);
48407 + fscache_stat_unchecked(&fscache_n_stores_oom);
48408 _leave(" = -ENOMEM");
48409 return -ENOMEM;
48410 }
48411 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
48412 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48413 ASSERTCMP(page, !=, NULL);
48414
48415 - fscache_stat(&fscache_n_uncaches);
48416 + fscache_stat_unchecked(&fscache_n_uncaches);
48417
48418 /* cache withdrawal may beat us to it */
48419 if (!PageFsCache(page))
48420 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
48421 unsigned long loop;
48422
48423 #ifdef CONFIG_FSCACHE_STATS
48424 - atomic_add(pagevec->nr, &fscache_n_marks);
48425 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
48426 #endif
48427
48428 for (loop = 0; loop < pagevec->nr; loop++) {
48429 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
48430 index 4765190..2a067f2 100644
48431 --- a/fs/fscache/stats.c
48432 +++ b/fs/fscache/stats.c
48433 @@ -18,95 +18,95 @@
48434 /*
48435 * operation counters
48436 */
48437 -atomic_t fscache_n_op_pend;
48438 -atomic_t fscache_n_op_run;
48439 -atomic_t fscache_n_op_enqueue;
48440 -atomic_t fscache_n_op_requeue;
48441 -atomic_t fscache_n_op_deferred_release;
48442 -atomic_t fscache_n_op_release;
48443 -atomic_t fscache_n_op_gc;
48444 -atomic_t fscache_n_op_cancelled;
48445 -atomic_t fscache_n_op_rejected;
48446 +atomic_unchecked_t fscache_n_op_pend;
48447 +atomic_unchecked_t fscache_n_op_run;
48448 +atomic_unchecked_t fscache_n_op_enqueue;
48449 +atomic_unchecked_t fscache_n_op_requeue;
48450 +atomic_unchecked_t fscache_n_op_deferred_release;
48451 +atomic_unchecked_t fscache_n_op_release;
48452 +atomic_unchecked_t fscache_n_op_gc;
48453 +atomic_unchecked_t fscache_n_op_cancelled;
48454 +atomic_unchecked_t fscache_n_op_rejected;
48455
48456 -atomic_t fscache_n_attr_changed;
48457 -atomic_t fscache_n_attr_changed_ok;
48458 -atomic_t fscache_n_attr_changed_nobufs;
48459 -atomic_t fscache_n_attr_changed_nomem;
48460 -atomic_t fscache_n_attr_changed_calls;
48461 +atomic_unchecked_t fscache_n_attr_changed;
48462 +atomic_unchecked_t fscache_n_attr_changed_ok;
48463 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
48464 +atomic_unchecked_t fscache_n_attr_changed_nomem;
48465 +atomic_unchecked_t fscache_n_attr_changed_calls;
48466
48467 -atomic_t fscache_n_allocs;
48468 -atomic_t fscache_n_allocs_ok;
48469 -atomic_t fscache_n_allocs_wait;
48470 -atomic_t fscache_n_allocs_nobufs;
48471 -atomic_t fscache_n_allocs_intr;
48472 -atomic_t fscache_n_allocs_object_dead;
48473 -atomic_t fscache_n_alloc_ops;
48474 -atomic_t fscache_n_alloc_op_waits;
48475 +atomic_unchecked_t fscache_n_allocs;
48476 +atomic_unchecked_t fscache_n_allocs_ok;
48477 +atomic_unchecked_t fscache_n_allocs_wait;
48478 +atomic_unchecked_t fscache_n_allocs_nobufs;
48479 +atomic_unchecked_t fscache_n_allocs_intr;
48480 +atomic_unchecked_t fscache_n_allocs_object_dead;
48481 +atomic_unchecked_t fscache_n_alloc_ops;
48482 +atomic_unchecked_t fscache_n_alloc_op_waits;
48483
48484 -atomic_t fscache_n_retrievals;
48485 -atomic_t fscache_n_retrievals_ok;
48486 -atomic_t fscache_n_retrievals_wait;
48487 -atomic_t fscache_n_retrievals_nodata;
48488 -atomic_t fscache_n_retrievals_nobufs;
48489 -atomic_t fscache_n_retrievals_intr;
48490 -atomic_t fscache_n_retrievals_nomem;
48491 -atomic_t fscache_n_retrievals_object_dead;
48492 -atomic_t fscache_n_retrieval_ops;
48493 -atomic_t fscache_n_retrieval_op_waits;
48494 +atomic_unchecked_t fscache_n_retrievals;
48495 +atomic_unchecked_t fscache_n_retrievals_ok;
48496 +atomic_unchecked_t fscache_n_retrievals_wait;
48497 +atomic_unchecked_t fscache_n_retrievals_nodata;
48498 +atomic_unchecked_t fscache_n_retrievals_nobufs;
48499 +atomic_unchecked_t fscache_n_retrievals_intr;
48500 +atomic_unchecked_t fscache_n_retrievals_nomem;
48501 +atomic_unchecked_t fscache_n_retrievals_object_dead;
48502 +atomic_unchecked_t fscache_n_retrieval_ops;
48503 +atomic_unchecked_t fscache_n_retrieval_op_waits;
48504
48505 -atomic_t fscache_n_stores;
48506 -atomic_t fscache_n_stores_ok;
48507 -atomic_t fscache_n_stores_again;
48508 -atomic_t fscache_n_stores_nobufs;
48509 -atomic_t fscache_n_stores_oom;
48510 -atomic_t fscache_n_store_ops;
48511 -atomic_t fscache_n_store_calls;
48512 -atomic_t fscache_n_store_pages;
48513 -atomic_t fscache_n_store_radix_deletes;
48514 -atomic_t fscache_n_store_pages_over_limit;
48515 +atomic_unchecked_t fscache_n_stores;
48516 +atomic_unchecked_t fscache_n_stores_ok;
48517 +atomic_unchecked_t fscache_n_stores_again;
48518 +atomic_unchecked_t fscache_n_stores_nobufs;
48519 +atomic_unchecked_t fscache_n_stores_oom;
48520 +atomic_unchecked_t fscache_n_store_ops;
48521 +atomic_unchecked_t fscache_n_store_calls;
48522 +atomic_unchecked_t fscache_n_store_pages;
48523 +atomic_unchecked_t fscache_n_store_radix_deletes;
48524 +atomic_unchecked_t fscache_n_store_pages_over_limit;
48525
48526 -atomic_t fscache_n_store_vmscan_not_storing;
48527 -atomic_t fscache_n_store_vmscan_gone;
48528 -atomic_t fscache_n_store_vmscan_busy;
48529 -atomic_t fscache_n_store_vmscan_cancelled;
48530 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
48531 +atomic_unchecked_t fscache_n_store_vmscan_gone;
48532 +atomic_unchecked_t fscache_n_store_vmscan_busy;
48533 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
48534
48535 -atomic_t fscache_n_marks;
48536 -atomic_t fscache_n_uncaches;
48537 +atomic_unchecked_t fscache_n_marks;
48538 +atomic_unchecked_t fscache_n_uncaches;
48539
48540 -atomic_t fscache_n_acquires;
48541 -atomic_t fscache_n_acquires_null;
48542 -atomic_t fscache_n_acquires_no_cache;
48543 -atomic_t fscache_n_acquires_ok;
48544 -atomic_t fscache_n_acquires_nobufs;
48545 -atomic_t fscache_n_acquires_oom;
48546 +atomic_unchecked_t fscache_n_acquires;
48547 +atomic_unchecked_t fscache_n_acquires_null;
48548 +atomic_unchecked_t fscache_n_acquires_no_cache;
48549 +atomic_unchecked_t fscache_n_acquires_ok;
48550 +atomic_unchecked_t fscache_n_acquires_nobufs;
48551 +atomic_unchecked_t fscache_n_acquires_oom;
48552
48553 -atomic_t fscache_n_updates;
48554 -atomic_t fscache_n_updates_null;
48555 -atomic_t fscache_n_updates_run;
48556 +atomic_unchecked_t fscache_n_updates;
48557 +atomic_unchecked_t fscache_n_updates_null;
48558 +atomic_unchecked_t fscache_n_updates_run;
48559
48560 -atomic_t fscache_n_relinquishes;
48561 -atomic_t fscache_n_relinquishes_null;
48562 -atomic_t fscache_n_relinquishes_waitcrt;
48563 -atomic_t fscache_n_relinquishes_retire;
48564 +atomic_unchecked_t fscache_n_relinquishes;
48565 +atomic_unchecked_t fscache_n_relinquishes_null;
48566 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
48567 +atomic_unchecked_t fscache_n_relinquishes_retire;
48568
48569 -atomic_t fscache_n_cookie_index;
48570 -atomic_t fscache_n_cookie_data;
48571 -atomic_t fscache_n_cookie_special;
48572 +atomic_unchecked_t fscache_n_cookie_index;
48573 +atomic_unchecked_t fscache_n_cookie_data;
48574 +atomic_unchecked_t fscache_n_cookie_special;
48575
48576 -atomic_t fscache_n_object_alloc;
48577 -atomic_t fscache_n_object_no_alloc;
48578 -atomic_t fscache_n_object_lookups;
48579 -atomic_t fscache_n_object_lookups_negative;
48580 -atomic_t fscache_n_object_lookups_positive;
48581 -atomic_t fscache_n_object_lookups_timed_out;
48582 -atomic_t fscache_n_object_created;
48583 -atomic_t fscache_n_object_avail;
48584 -atomic_t fscache_n_object_dead;
48585 +atomic_unchecked_t fscache_n_object_alloc;
48586 +atomic_unchecked_t fscache_n_object_no_alloc;
48587 +atomic_unchecked_t fscache_n_object_lookups;
48588 +atomic_unchecked_t fscache_n_object_lookups_negative;
48589 +atomic_unchecked_t fscache_n_object_lookups_positive;
48590 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
48591 +atomic_unchecked_t fscache_n_object_created;
48592 +atomic_unchecked_t fscache_n_object_avail;
48593 +atomic_unchecked_t fscache_n_object_dead;
48594
48595 -atomic_t fscache_n_checkaux_none;
48596 -atomic_t fscache_n_checkaux_okay;
48597 -atomic_t fscache_n_checkaux_update;
48598 -atomic_t fscache_n_checkaux_obsolete;
48599 +atomic_unchecked_t fscache_n_checkaux_none;
48600 +atomic_unchecked_t fscache_n_checkaux_okay;
48601 +atomic_unchecked_t fscache_n_checkaux_update;
48602 +atomic_unchecked_t fscache_n_checkaux_obsolete;
48603
48604 atomic_t fscache_n_cop_alloc_object;
48605 atomic_t fscache_n_cop_lookup_object;
48606 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
48607 seq_puts(m, "FS-Cache statistics\n");
48608
48609 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
48610 - atomic_read(&fscache_n_cookie_index),
48611 - atomic_read(&fscache_n_cookie_data),
48612 - atomic_read(&fscache_n_cookie_special));
48613 + atomic_read_unchecked(&fscache_n_cookie_index),
48614 + atomic_read_unchecked(&fscache_n_cookie_data),
48615 + atomic_read_unchecked(&fscache_n_cookie_special));
48616
48617 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
48618 - atomic_read(&fscache_n_object_alloc),
48619 - atomic_read(&fscache_n_object_no_alloc),
48620 - atomic_read(&fscache_n_object_avail),
48621 - atomic_read(&fscache_n_object_dead));
48622 + atomic_read_unchecked(&fscache_n_object_alloc),
48623 + atomic_read_unchecked(&fscache_n_object_no_alloc),
48624 + atomic_read_unchecked(&fscache_n_object_avail),
48625 + atomic_read_unchecked(&fscache_n_object_dead));
48626 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
48627 - atomic_read(&fscache_n_checkaux_none),
48628 - atomic_read(&fscache_n_checkaux_okay),
48629 - atomic_read(&fscache_n_checkaux_update),
48630 - atomic_read(&fscache_n_checkaux_obsolete));
48631 + atomic_read_unchecked(&fscache_n_checkaux_none),
48632 + atomic_read_unchecked(&fscache_n_checkaux_okay),
48633 + atomic_read_unchecked(&fscache_n_checkaux_update),
48634 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
48635
48636 seq_printf(m, "Pages : mrk=%u unc=%u\n",
48637 - atomic_read(&fscache_n_marks),
48638 - atomic_read(&fscache_n_uncaches));
48639 + atomic_read_unchecked(&fscache_n_marks),
48640 + atomic_read_unchecked(&fscache_n_uncaches));
48641
48642 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
48643 " oom=%u\n",
48644 - atomic_read(&fscache_n_acquires),
48645 - atomic_read(&fscache_n_acquires_null),
48646 - atomic_read(&fscache_n_acquires_no_cache),
48647 - atomic_read(&fscache_n_acquires_ok),
48648 - atomic_read(&fscache_n_acquires_nobufs),
48649 - atomic_read(&fscache_n_acquires_oom));
48650 + atomic_read_unchecked(&fscache_n_acquires),
48651 + atomic_read_unchecked(&fscache_n_acquires_null),
48652 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
48653 + atomic_read_unchecked(&fscache_n_acquires_ok),
48654 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
48655 + atomic_read_unchecked(&fscache_n_acquires_oom));
48656
48657 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
48658 - atomic_read(&fscache_n_object_lookups),
48659 - atomic_read(&fscache_n_object_lookups_negative),
48660 - atomic_read(&fscache_n_object_lookups_positive),
48661 - atomic_read(&fscache_n_object_created),
48662 - atomic_read(&fscache_n_object_lookups_timed_out));
48663 + atomic_read_unchecked(&fscache_n_object_lookups),
48664 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
48665 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
48666 + atomic_read_unchecked(&fscache_n_object_created),
48667 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
48668
48669 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
48670 - atomic_read(&fscache_n_updates),
48671 - atomic_read(&fscache_n_updates_null),
48672 - atomic_read(&fscache_n_updates_run));
48673 + atomic_read_unchecked(&fscache_n_updates),
48674 + atomic_read_unchecked(&fscache_n_updates_null),
48675 + atomic_read_unchecked(&fscache_n_updates_run));
48676
48677 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
48678 - atomic_read(&fscache_n_relinquishes),
48679 - atomic_read(&fscache_n_relinquishes_null),
48680 - atomic_read(&fscache_n_relinquishes_waitcrt),
48681 - atomic_read(&fscache_n_relinquishes_retire));
48682 + atomic_read_unchecked(&fscache_n_relinquishes),
48683 + atomic_read_unchecked(&fscache_n_relinquishes_null),
48684 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
48685 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
48686
48687 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
48688 - atomic_read(&fscache_n_attr_changed),
48689 - atomic_read(&fscache_n_attr_changed_ok),
48690 - atomic_read(&fscache_n_attr_changed_nobufs),
48691 - atomic_read(&fscache_n_attr_changed_nomem),
48692 - atomic_read(&fscache_n_attr_changed_calls));
48693 + atomic_read_unchecked(&fscache_n_attr_changed),
48694 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
48695 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
48696 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
48697 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
48698
48699 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
48700 - atomic_read(&fscache_n_allocs),
48701 - atomic_read(&fscache_n_allocs_ok),
48702 - atomic_read(&fscache_n_allocs_wait),
48703 - atomic_read(&fscache_n_allocs_nobufs),
48704 - atomic_read(&fscache_n_allocs_intr));
48705 + atomic_read_unchecked(&fscache_n_allocs),
48706 + atomic_read_unchecked(&fscache_n_allocs_ok),
48707 + atomic_read_unchecked(&fscache_n_allocs_wait),
48708 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
48709 + atomic_read_unchecked(&fscache_n_allocs_intr));
48710 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
48711 - atomic_read(&fscache_n_alloc_ops),
48712 - atomic_read(&fscache_n_alloc_op_waits),
48713 - atomic_read(&fscache_n_allocs_object_dead));
48714 + atomic_read_unchecked(&fscache_n_alloc_ops),
48715 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
48716 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
48717
48718 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
48719 " int=%u oom=%u\n",
48720 - atomic_read(&fscache_n_retrievals),
48721 - atomic_read(&fscache_n_retrievals_ok),
48722 - atomic_read(&fscache_n_retrievals_wait),
48723 - atomic_read(&fscache_n_retrievals_nodata),
48724 - atomic_read(&fscache_n_retrievals_nobufs),
48725 - atomic_read(&fscache_n_retrievals_intr),
48726 - atomic_read(&fscache_n_retrievals_nomem));
48727 + atomic_read_unchecked(&fscache_n_retrievals),
48728 + atomic_read_unchecked(&fscache_n_retrievals_ok),
48729 + atomic_read_unchecked(&fscache_n_retrievals_wait),
48730 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
48731 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
48732 + atomic_read_unchecked(&fscache_n_retrievals_intr),
48733 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
48734 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
48735 - atomic_read(&fscache_n_retrieval_ops),
48736 - atomic_read(&fscache_n_retrieval_op_waits),
48737 - atomic_read(&fscache_n_retrievals_object_dead));
48738 + atomic_read_unchecked(&fscache_n_retrieval_ops),
48739 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
48740 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
48741
48742 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
48743 - atomic_read(&fscache_n_stores),
48744 - atomic_read(&fscache_n_stores_ok),
48745 - atomic_read(&fscache_n_stores_again),
48746 - atomic_read(&fscache_n_stores_nobufs),
48747 - atomic_read(&fscache_n_stores_oom));
48748 + atomic_read_unchecked(&fscache_n_stores),
48749 + atomic_read_unchecked(&fscache_n_stores_ok),
48750 + atomic_read_unchecked(&fscache_n_stores_again),
48751 + atomic_read_unchecked(&fscache_n_stores_nobufs),
48752 + atomic_read_unchecked(&fscache_n_stores_oom));
48753 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
48754 - atomic_read(&fscache_n_store_ops),
48755 - atomic_read(&fscache_n_store_calls),
48756 - atomic_read(&fscache_n_store_pages),
48757 - atomic_read(&fscache_n_store_radix_deletes),
48758 - atomic_read(&fscache_n_store_pages_over_limit));
48759 + atomic_read_unchecked(&fscache_n_store_ops),
48760 + atomic_read_unchecked(&fscache_n_store_calls),
48761 + atomic_read_unchecked(&fscache_n_store_pages),
48762 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
48763 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
48764
48765 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
48766 - atomic_read(&fscache_n_store_vmscan_not_storing),
48767 - atomic_read(&fscache_n_store_vmscan_gone),
48768 - atomic_read(&fscache_n_store_vmscan_busy),
48769 - atomic_read(&fscache_n_store_vmscan_cancelled));
48770 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
48771 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
48772 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
48773 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
48774
48775 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
48776 - atomic_read(&fscache_n_op_pend),
48777 - atomic_read(&fscache_n_op_run),
48778 - atomic_read(&fscache_n_op_enqueue),
48779 - atomic_read(&fscache_n_op_cancelled),
48780 - atomic_read(&fscache_n_op_rejected));
48781 + atomic_read_unchecked(&fscache_n_op_pend),
48782 + atomic_read_unchecked(&fscache_n_op_run),
48783 + atomic_read_unchecked(&fscache_n_op_enqueue),
48784 + atomic_read_unchecked(&fscache_n_op_cancelled),
48785 + atomic_read_unchecked(&fscache_n_op_rejected));
48786 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
48787 - atomic_read(&fscache_n_op_deferred_release),
48788 - atomic_read(&fscache_n_op_release),
48789 - atomic_read(&fscache_n_op_gc));
48790 + atomic_read_unchecked(&fscache_n_op_deferred_release),
48791 + atomic_read_unchecked(&fscache_n_op_release),
48792 + atomic_read_unchecked(&fscache_n_op_gc));
48793
48794 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
48795 atomic_read(&fscache_n_cop_alloc_object),
48796 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
48797 index ee8d550..7189d8c 100644
48798 --- a/fs/fuse/cuse.c
48799 +++ b/fs/fuse/cuse.c
48800 @@ -585,10 +585,12 @@ static int __init cuse_init(void)
48801 INIT_LIST_HEAD(&cuse_conntbl[i]);
48802
48803 /* inherit and extend fuse_dev_operations */
48804 - cuse_channel_fops = fuse_dev_operations;
48805 - cuse_channel_fops.owner = THIS_MODULE;
48806 - cuse_channel_fops.open = cuse_channel_open;
48807 - cuse_channel_fops.release = cuse_channel_release;
48808 + pax_open_kernel();
48809 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
48810 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
48811 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
48812 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
48813 + pax_close_kernel();
48814
48815 cuse_class = class_create(THIS_MODULE, "cuse");
48816 if (IS_ERR(cuse_class))
48817 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
48818 index 8c23fa7..0e3aac7 100644
48819 --- a/fs/fuse/dev.c
48820 +++ b/fs/fuse/dev.c
48821 @@ -1241,7 +1241,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
48822 ret = 0;
48823 pipe_lock(pipe);
48824
48825 - if (!pipe->readers) {
48826 + if (!atomic_read(&pipe->readers)) {
48827 send_sig(SIGPIPE, current, 0);
48828 if (!ret)
48829 ret = -EPIPE;
48830 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
48831 index 324bc08..4fdd56e 100644
48832 --- a/fs/fuse/dir.c
48833 +++ b/fs/fuse/dir.c
48834 @@ -1226,7 +1226,7 @@ static char *read_link(struct dentry *dentry)
48835 return link;
48836 }
48837
48838 -static void free_link(char *link)
48839 +static void free_link(const char *link)
48840 {
48841 if (!IS_ERR(link))
48842 free_page((unsigned long) link);
48843 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
48844 index 381893c..3793318 100644
48845 --- a/fs/gfs2/inode.c
48846 +++ b/fs/gfs2/inode.c
48847 @@ -1490,7 +1490,7 @@ out:
48848
48849 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48850 {
48851 - char *s = nd_get_link(nd);
48852 + const char *s = nd_get_link(nd);
48853 if (!IS_ERR(s))
48854 kfree(s);
48855 }
48856 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
48857 index c5bc355..5a513bb 100644
48858 --- a/fs/hugetlbfs/inode.c
48859 +++ b/fs/hugetlbfs/inode.c
48860 @@ -923,7 +923,7 @@ static struct file_system_type hugetlbfs_fs_type = {
48861 .kill_sb = kill_litter_super,
48862 };
48863
48864 -static struct vfsmount *hugetlbfs_vfsmount;
48865 +struct vfsmount *hugetlbfs_vfsmount;
48866
48867 static int can_do_hugetlb_shm(void)
48868 {
48869 diff --git a/fs/inode.c b/fs/inode.c
48870 index 64999f1..8fad608 100644
48871 --- a/fs/inode.c
48872 +++ b/fs/inode.c
48873 @@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
48874
48875 #ifdef CONFIG_SMP
48876 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
48877 - static atomic_t shared_last_ino;
48878 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
48879 + static atomic_unchecked_t shared_last_ino;
48880 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
48881
48882 res = next - LAST_INO_BATCH;
48883 }
48884 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
48885 index 4a6cf28..d3a29d3 100644
48886 --- a/fs/jffs2/erase.c
48887 +++ b/fs/jffs2/erase.c
48888 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
48889 struct jffs2_unknown_node marker = {
48890 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
48891 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
48892 - .totlen = cpu_to_je32(c->cleanmarker_size)
48893 + .totlen = cpu_to_je32(c->cleanmarker_size),
48894 + .hdr_crc = cpu_to_je32(0)
48895 };
48896
48897 jffs2_prealloc_raw_node_refs(c, jeb, 1);
48898 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
48899 index a6597d6..41b30ec 100644
48900 --- a/fs/jffs2/wbuf.c
48901 +++ b/fs/jffs2/wbuf.c
48902 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
48903 {
48904 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
48905 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
48906 - .totlen = constant_cpu_to_je32(8)
48907 + .totlen = constant_cpu_to_je32(8),
48908 + .hdr_crc = constant_cpu_to_je32(0)
48909 };
48910
48911 /*
48912 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
48913 index 1a543be..d803c40 100644
48914 --- a/fs/jfs/super.c
48915 +++ b/fs/jfs/super.c
48916 @@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
48917
48918 jfs_inode_cachep =
48919 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
48920 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
48921 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
48922 init_once);
48923 if (jfs_inode_cachep == NULL)
48924 return -ENOMEM;
48925 diff --git a/fs/libfs.c b/fs/libfs.c
48926 index 7cc37ca..b3e3eec 100644
48927 --- a/fs/libfs.c
48928 +++ b/fs/libfs.c
48929 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
48930
48931 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
48932 struct dentry *next;
48933 + char d_name[sizeof(next->d_iname)];
48934 + const unsigned char *name;
48935 +
48936 next = list_entry(p, struct dentry, d_u.d_child);
48937 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
48938 if (!simple_positive(next)) {
48939 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
48940
48941 spin_unlock(&next->d_lock);
48942 spin_unlock(&dentry->d_lock);
48943 - if (filldir(dirent, next->d_name.name,
48944 + name = next->d_name.name;
48945 + if (name == next->d_iname) {
48946 + memcpy(d_name, name, next->d_name.len);
48947 + name = d_name;
48948 + }
48949 + if (filldir(dirent, name,
48950 next->d_name.len, filp->f_pos,
48951 next->d_inode->i_ino,
48952 dt_type(next->d_inode)) < 0)
48953 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
48954 index 05d2912..760abfa 100644
48955 --- a/fs/lockd/clntproc.c
48956 +++ b/fs/lockd/clntproc.c
48957 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
48958 /*
48959 * Cookie counter for NLM requests
48960 */
48961 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
48962 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
48963
48964 void nlmclnt_next_cookie(struct nlm_cookie *c)
48965 {
48966 - u32 cookie = atomic_inc_return(&nlm_cookie);
48967 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
48968
48969 memcpy(c->data, &cookie, 4);
48970 c->len=4;
48971 diff --git a/fs/locks.c b/fs/locks.c
48972 index a94e331..060bce3 100644
48973 --- a/fs/locks.c
48974 +++ b/fs/locks.c
48975 @@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
48976 return;
48977
48978 if (filp->f_op && filp->f_op->flock) {
48979 - struct file_lock fl = {
48980 + struct file_lock flock = {
48981 .fl_pid = current->tgid,
48982 .fl_file = filp,
48983 .fl_flags = FL_FLOCK,
48984 .fl_type = F_UNLCK,
48985 .fl_end = OFFSET_MAX,
48986 };
48987 - filp->f_op->flock(filp, F_SETLKW, &fl);
48988 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
48989 - fl.fl_ops->fl_release_private(&fl);
48990 + filp->f_op->flock(filp, F_SETLKW, &flock);
48991 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
48992 + flock.fl_ops->fl_release_private(&flock);
48993 }
48994
48995 lock_flocks();
48996 diff --git a/fs/namei.c b/fs/namei.c
48997 index 5f4cdf3..959a013 100644
48998 --- a/fs/namei.c
48999 +++ b/fs/namei.c
49000 @@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
49001 if (ret != -EACCES)
49002 return ret;
49003
49004 +#ifdef CONFIG_GRKERNSEC
49005 + /* we'll block if we have to log due to a denied capability use */
49006 + if (mask & MAY_NOT_BLOCK)
49007 + return -ECHILD;
49008 +#endif
49009 +
49010 if (S_ISDIR(inode->i_mode)) {
49011 /* DACs are overridable for directories */
49012 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
49013 - return 0;
49014 if (!(mask & MAY_WRITE))
49015 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49016 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49017 + inode_capable(inode, CAP_DAC_READ_SEARCH))
49018 return 0;
49019 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
49020 + return 0;
49021 return -EACCES;
49022 }
49023 /*
49024 + * Searching includes executable on directories, else just read.
49025 + */
49026 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49027 + if (mask == MAY_READ)
49028 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49029 + inode_capable(inode, CAP_DAC_READ_SEARCH))
49030 + return 0;
49031 +
49032 + /*
49033 * Read/write DACs are always overridable.
49034 * Executable DACs are overridable when there is
49035 * at least one exec bit set.
49036 @@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
49037 if (inode_capable(inode, CAP_DAC_OVERRIDE))
49038 return 0;
49039
49040 - /*
49041 - * Searching includes executable on directories, else just read.
49042 - */
49043 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49044 - if (mask == MAY_READ)
49045 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49046 - return 0;
49047 -
49048 return -EACCES;
49049 }
49050
49051 @@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49052 {
49053 struct dentry *dentry = link->dentry;
49054 int error;
49055 - char *s;
49056 + const char *s;
49057
49058 BUG_ON(nd->flags & LOOKUP_RCU);
49059
49060 @@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49061 if (error)
49062 goto out_put_nd_path;
49063
49064 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
49065 + dentry->d_inode, dentry, nd->path.mnt)) {
49066 + error = -EACCES;
49067 + goto out_put_nd_path;
49068 + }
49069 +
49070 nd->last_type = LAST_BIND;
49071 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
49072 error = PTR_ERR(*p);
49073 @@ -1605,6 +1619,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
49074 break;
49075 res = walk_component(nd, path, &nd->last,
49076 nd->last_type, LOOKUP_FOLLOW);
49077 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
49078 + res = -EACCES;
49079 put_link(nd, &link, cookie);
49080 } while (res > 0);
49081
49082 @@ -1703,7 +1719,7 @@ EXPORT_SYMBOL(full_name_hash);
49083 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
49084 {
49085 unsigned long a, b, adata, bdata, mask, hash, len;
49086 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49087 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49088
49089 hash = a = 0;
49090 len = -sizeof(unsigned long);
49091 @@ -1993,6 +2009,8 @@ static int path_lookupat(int dfd, const char *name,
49092 if (err)
49093 break;
49094 err = lookup_last(nd, &path);
49095 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
49096 + err = -EACCES;
49097 put_link(nd, &link, cookie);
49098 }
49099 }
49100 @@ -2000,6 +2018,21 @@ static int path_lookupat(int dfd, const char *name,
49101 if (!err)
49102 err = complete_walk(nd);
49103
49104 + if (!(nd->flags & LOOKUP_PARENT)) {
49105 +#ifdef CONFIG_GRKERNSEC
49106 + if (flags & LOOKUP_RCU) {
49107 + if (!err)
49108 + path_put(&nd->path);
49109 + err = -ECHILD;
49110 + } else
49111 +#endif
49112 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49113 + if (!err)
49114 + path_put(&nd->path);
49115 + err = -ENOENT;
49116 + }
49117 + }
49118 +
49119 if (!err && nd->flags & LOOKUP_DIRECTORY) {
49120 if (!nd->inode->i_op->lookup) {
49121 path_put(&nd->path);
49122 @@ -2027,8 +2060,17 @@ static int filename_lookup(int dfd, struct filename *name,
49123 retval = path_lookupat(dfd, name->name,
49124 flags | LOOKUP_REVAL, nd);
49125
49126 - if (likely(!retval))
49127 + if (likely(!retval)) {
49128 + if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
49129 +#ifdef CONFIG_GRKERNSEC
49130 + if (flags & LOOKUP_RCU)
49131 + return -ECHILD;
49132 +#endif
49133 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
49134 + return -ENOENT;
49135 + }
49136 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
49137 + }
49138 return retval;
49139 }
49140
49141 @@ -2402,6 +2444,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
49142 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
49143 return -EPERM;
49144
49145 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
49146 + return -EPERM;
49147 + if (gr_handle_rawio(inode))
49148 + return -EPERM;
49149 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
49150 + return -EACCES;
49151 +
49152 return 0;
49153 }
49154
49155 @@ -2623,7 +2672,7 @@ looked_up:
49156 * cleared otherwise prior to returning.
49157 */
49158 static int lookup_open(struct nameidata *nd, struct path *path,
49159 - struct file *file,
49160 + struct path *link, struct file *file,
49161 const struct open_flags *op,
49162 bool got_write, int *opened)
49163 {
49164 @@ -2658,6 +2707,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49165 /* Negative dentry, just create the file */
49166 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
49167 umode_t mode = op->mode;
49168 +
49169 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
49170 + error = -EACCES;
49171 + goto out_dput;
49172 + }
49173 +
49174 + if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
49175 + error = -EACCES;
49176 + goto out_dput;
49177 + }
49178 +
49179 if (!IS_POSIXACL(dir->d_inode))
49180 mode &= ~current_umask();
49181 /*
49182 @@ -2679,6 +2739,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49183 nd->flags & LOOKUP_EXCL);
49184 if (error)
49185 goto out_dput;
49186 + else
49187 + gr_handle_create(dentry, nd->path.mnt);
49188 }
49189 out_no_open:
49190 path->dentry = dentry;
49191 @@ -2693,7 +2755,7 @@ out_dput:
49192 /*
49193 * Handle the last step of open()
49194 */
49195 -static int do_last(struct nameidata *nd, struct path *path,
49196 +static int do_last(struct nameidata *nd, struct path *path, struct path *link,
49197 struct file *file, const struct open_flags *op,
49198 int *opened, struct filename *name)
49199 {
49200 @@ -2722,16 +2784,44 @@ static int do_last(struct nameidata *nd, struct path *path,
49201 error = complete_walk(nd);
49202 if (error)
49203 return error;
49204 +#ifdef CONFIG_GRKERNSEC
49205 + if (nd->flags & LOOKUP_RCU) {
49206 + error = -ECHILD;
49207 + goto out;
49208 + }
49209 +#endif
49210 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49211 + error = -ENOENT;
49212 + goto out;
49213 + }
49214 audit_inode(name, nd->path.dentry, 0);
49215 if (open_flag & O_CREAT) {
49216 error = -EISDIR;
49217 goto out;
49218 }
49219 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
49220 + error = -EACCES;
49221 + goto out;
49222 + }
49223 goto finish_open;
49224 case LAST_BIND:
49225 error = complete_walk(nd);
49226 if (error)
49227 return error;
49228 +#ifdef CONFIG_GRKERNSEC
49229 + if (nd->flags & LOOKUP_RCU) {
49230 + error = -ECHILD;
49231 + goto out;
49232 + }
49233 +#endif
49234 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
49235 + error = -ENOENT;
49236 + goto out;
49237 + }
49238 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
49239 + error = -EACCES;
49240 + goto out;
49241 + }
49242 audit_inode(name, dir, 0);
49243 goto finish_open;
49244 }
49245 @@ -2780,7 +2870,7 @@ retry_lookup:
49246 */
49247 }
49248 mutex_lock(&dir->d_inode->i_mutex);
49249 - error = lookup_open(nd, path, file, op, got_write, opened);
49250 + error = lookup_open(nd, path, link, file, op, got_write, opened);
49251 mutex_unlock(&dir->d_inode->i_mutex);
49252
49253 if (error <= 0) {
49254 @@ -2804,11 +2894,28 @@ retry_lookup:
49255 goto finish_open_created;
49256 }
49257
49258 + if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
49259 + error = -ENOENT;
49260 + goto exit_dput;
49261 + }
49262 + if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
49263 + error = -EACCES;
49264 + goto exit_dput;
49265 + }
49266 +
49267 /*
49268 * create/update audit record if it already exists.
49269 */
49270 - if (path->dentry->d_inode)
49271 + if (path->dentry->d_inode) {
49272 + /* only check if O_CREAT is specified, all other checks need to go
49273 + into may_open */
49274 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
49275 + error = -EACCES;
49276 + goto exit_dput;
49277 + }
49278 +
49279 audit_inode(name, path->dentry, 0);
49280 + }
49281
49282 /*
49283 * If atomic_open() acquired write access it is dropped now due to
49284 @@ -2849,6 +2956,11 @@ finish_lookup:
49285 }
49286 }
49287 BUG_ON(inode != path->dentry->d_inode);
49288 + /* if we're resolving a symlink to another symlink */
49289 + if (link && gr_handle_symlink_owner(link, inode)) {
49290 + error = -EACCES;
49291 + goto out;
49292 + }
49293 return 1;
49294 }
49295
49296 @@ -2858,7 +2970,6 @@ finish_lookup:
49297 save_parent.dentry = nd->path.dentry;
49298 save_parent.mnt = mntget(path->mnt);
49299 nd->path.dentry = path->dentry;
49300 -
49301 }
49302 nd->inode = inode;
49303 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
49304 @@ -2867,6 +2978,22 @@ finish_lookup:
49305 path_put(&save_parent);
49306 return error;
49307 }
49308 +
49309 +#ifdef CONFIG_GRKERNSEC
49310 + if (nd->flags & LOOKUP_RCU) {
49311 + error = -ECHILD;
49312 + goto out;
49313 + }
49314 +#endif
49315 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49316 + error = -ENOENT;
49317 + goto out;
49318 + }
49319 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
49320 + error = -EACCES;
49321 + goto out;
49322 + }
49323 +
49324 error = -EISDIR;
49325 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
49326 goto out;
49327 @@ -2965,7 +3092,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49328 if (unlikely(error))
49329 goto out;
49330
49331 - error = do_last(nd, &path, file, op, &opened, pathname);
49332 + error = do_last(nd, &path, NULL, file, op, &opened, pathname);
49333 while (unlikely(error > 0)) { /* trailing symlink */
49334 struct path link = path;
49335 void *cookie;
49336 @@ -2983,7 +3110,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49337 error = follow_link(&link, nd, &cookie);
49338 if (unlikely(error))
49339 break;
49340 - error = do_last(nd, &path, file, op, &opened, pathname);
49341 + error = do_last(nd, &path, &link, file, op, &opened, pathname);
49342 put_link(nd, &link, cookie);
49343 }
49344 out:
49345 @@ -3073,8 +3200,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
49346 goto unlock;
49347
49348 error = -EEXIST;
49349 - if (dentry->d_inode)
49350 + if (dentry->d_inode) {
49351 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
49352 + error = -ENOENT;
49353 + }
49354 goto fail;
49355 + }
49356 /*
49357 * Special case - lookup gave negative, but... we had foo/bar/
49358 * From the vfs_mknod() POV we just have a negative dentry -
49359 @@ -3125,6 +3256,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
49360 }
49361 EXPORT_SYMBOL(user_path_create);
49362
49363 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, int is_dir)
49364 +{
49365 + struct filename *tmp = getname(pathname);
49366 + struct dentry *res;
49367 + if (IS_ERR(tmp))
49368 + return ERR_CAST(tmp);
49369 + res = kern_path_create(dfd, tmp->name, path, is_dir);
49370 + if (IS_ERR(res))
49371 + putname(tmp);
49372 + else
49373 + *to = tmp;
49374 + return res;
49375 +}
49376 +
49377 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
49378 {
49379 int error = may_create(dir, dentry);
49380 @@ -3186,6 +3331,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49381
49382 if (!IS_POSIXACL(path.dentry->d_inode))
49383 mode &= ~current_umask();
49384 +
49385 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
49386 + error = -EPERM;
49387 + goto out;
49388 + }
49389 +
49390 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
49391 + error = -EACCES;
49392 + goto out;
49393 + }
49394 +
49395 error = security_path_mknod(&path, dentry, mode, dev);
49396 if (error)
49397 goto out;
49398 @@ -3202,6 +3358,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49399 break;
49400 }
49401 out:
49402 + if (!error)
49403 + gr_handle_create(dentry, path.mnt);
49404 done_path_create(&path, dentry);
49405 return error;
49406 }
49407 @@ -3248,9 +3406,18 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
49408
49409 if (!IS_POSIXACL(path.dentry->d_inode))
49410 mode &= ~current_umask();
49411 +
49412 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
49413 + error = -EACCES;
49414 + goto out;
49415 + }
49416 +
49417 error = security_path_mkdir(&path, dentry, mode);
49418 if (!error)
49419 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
49420 + if (!error)
49421 + gr_handle_create(dentry, path.mnt);
49422 +out:
49423 done_path_create(&path, dentry);
49424 return error;
49425 }
49426 @@ -3327,6 +3494,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
49427 struct filename *name;
49428 struct dentry *dentry;
49429 struct nameidata nd;
49430 + ino_t saved_ino = 0;
49431 + dev_t saved_dev = 0;
49432
49433 name = user_path_parent(dfd, pathname, &nd);
49434 if (IS_ERR(name))
49435 @@ -3358,10 +3527,21 @@ static long do_rmdir(int dfd, const char __user *pathname)
49436 error = -ENOENT;
49437 goto exit3;
49438 }
49439 +
49440 + saved_ino = dentry->d_inode->i_ino;
49441 + saved_dev = gr_get_dev_from_dentry(dentry);
49442 +
49443 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
49444 + error = -EACCES;
49445 + goto exit3;
49446 + }
49447 +
49448 error = security_path_rmdir(&nd.path, dentry);
49449 if (error)
49450 goto exit3;
49451 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
49452 + if (!error && (saved_dev || saved_ino))
49453 + gr_handle_delete(saved_ino, saved_dev);
49454 exit3:
49455 dput(dentry);
49456 exit2:
49457 @@ -3423,6 +3603,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49458 struct dentry *dentry;
49459 struct nameidata nd;
49460 struct inode *inode = NULL;
49461 + ino_t saved_ino = 0;
49462 + dev_t saved_dev = 0;
49463
49464 name = user_path_parent(dfd, pathname, &nd);
49465 if (IS_ERR(name))
49466 @@ -3448,10 +3630,22 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49467 if (!inode)
49468 goto slashes;
49469 ihold(inode);
49470 +
49471 + if (inode->i_nlink <= 1) {
49472 + saved_ino = inode->i_ino;
49473 + saved_dev = gr_get_dev_from_dentry(dentry);
49474 + }
49475 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
49476 + error = -EACCES;
49477 + goto exit2;
49478 + }
49479 +
49480 error = security_path_unlink(&nd.path, dentry);
49481 if (error)
49482 goto exit2;
49483 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
49484 + if (!error && (saved_ino || saved_dev))
49485 + gr_handle_delete(saved_ino, saved_dev);
49486 exit2:
49487 dput(dentry);
49488 }
49489 @@ -3523,9 +3717,17 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
49490 if (IS_ERR(dentry))
49491 goto out_putname;
49492
49493 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
49494 + error = -EACCES;
49495 + goto out;
49496 + }
49497 +
49498 error = security_path_symlink(&path, dentry, from->name);
49499 if (!error)
49500 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
49501 + if (!error)
49502 + gr_handle_create(dentry, path.mnt);
49503 +out:
49504 done_path_create(&path, dentry);
49505 out_putname:
49506 putname(from);
49507 @@ -3595,6 +3797,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49508 {
49509 struct dentry *new_dentry;
49510 struct path old_path, new_path;
49511 + struct filename *to = NULL;
49512 int how = 0;
49513 int error;
49514
49515 @@ -3618,7 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49516 if (error)
49517 return error;
49518
49519 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
49520 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
49521 error = PTR_ERR(new_dentry);
49522 if (IS_ERR(new_dentry))
49523 goto out;
49524 @@ -3629,11 +3832,28 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49525 error = may_linkat(&old_path);
49526 if (unlikely(error))
49527 goto out_dput;
49528 +
49529 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
49530 + old_path.dentry->d_inode,
49531 + old_path.dentry->d_inode->i_mode, to)) {
49532 + error = -EACCES;
49533 + goto out_dput;
49534 + }
49535 +
49536 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
49537 + old_path.dentry, old_path.mnt, to)) {
49538 + error = -EACCES;
49539 + goto out_dput;
49540 + }
49541 +
49542 error = security_path_link(old_path.dentry, &new_path, new_dentry);
49543 if (error)
49544 goto out_dput;
49545 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
49546 + if (!error)
49547 + gr_handle_create(new_dentry, new_path.mnt);
49548 out_dput:
49549 + putname(to);
49550 done_path_create(&new_path, new_dentry);
49551 out:
49552 path_put(&old_path);
49553 @@ -3873,12 +4093,21 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
49554 if (new_dentry == trap)
49555 goto exit5;
49556
49557 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
49558 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
49559 + to);
49560 + if (error)
49561 + goto exit5;
49562 +
49563 error = security_path_rename(&oldnd.path, old_dentry,
49564 &newnd.path, new_dentry);
49565 if (error)
49566 goto exit5;
49567 error = vfs_rename(old_dir->d_inode, old_dentry,
49568 new_dir->d_inode, new_dentry);
49569 + if (!error)
49570 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
49571 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
49572 exit5:
49573 dput(new_dentry);
49574 exit4:
49575 @@ -3903,6 +4132,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
49576
49577 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
49578 {
49579 + char tmpbuf[64];
49580 + const char *newlink;
49581 int len;
49582
49583 len = PTR_ERR(link);
49584 @@ -3912,7 +4143,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
49585 len = strlen(link);
49586 if (len > (unsigned) buflen)
49587 len = buflen;
49588 - if (copy_to_user(buffer, link, len))
49589 +
49590 + if (len < sizeof(tmpbuf)) {
49591 + memcpy(tmpbuf, link, len);
49592 + newlink = tmpbuf;
49593 + } else
49594 + newlink = link;
49595 +
49596 + if (copy_to_user(buffer, newlink, len))
49597 len = -EFAULT;
49598 out:
49599 return len;
49600 diff --git a/fs/namespace.c b/fs/namespace.c
49601 index 2496062..e26f6d6 100644
49602 --- a/fs/namespace.c
49603 +++ b/fs/namespace.c
49604 @@ -1212,6 +1212,9 @@ static int do_umount(struct mount *mnt, int flags)
49605 if (!(sb->s_flags & MS_RDONLY))
49606 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
49607 up_write(&sb->s_umount);
49608 +
49609 + gr_log_remount(mnt->mnt_devname, retval);
49610 +
49611 return retval;
49612 }
49613
49614 @@ -1231,6 +1234,9 @@ static int do_umount(struct mount *mnt, int flags)
49615 br_write_unlock(&vfsmount_lock);
49616 up_write(&namespace_sem);
49617 release_mounts(&umount_list);
49618 +
49619 + gr_log_unmount(mnt->mnt_devname, retval);
49620 +
49621 return retval;
49622 }
49623
49624 @@ -2244,6 +2250,16 @@ long do_mount(const char *dev_name, const char *dir_name,
49625 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
49626 MS_STRICTATIME);
49627
49628 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
49629 + retval = -EPERM;
49630 + goto dput_out;
49631 + }
49632 +
49633 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
49634 + retval = -EPERM;
49635 + goto dput_out;
49636 + }
49637 +
49638 if (flags & MS_REMOUNT)
49639 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
49640 data_page);
49641 @@ -2258,6 +2274,9 @@ long do_mount(const char *dev_name, const char *dir_name,
49642 dev_name, data_page);
49643 dput_out:
49644 path_put(&path);
49645 +
49646 + gr_log_mount(dev_name, dir_name, retval);
49647 +
49648 return retval;
49649 }
49650
49651 @@ -2516,6 +2535,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
49652 if (error)
49653 goto out2;
49654
49655 + if (gr_handle_chroot_pivot()) {
49656 + error = -EPERM;
49657 + goto out2;
49658 + }
49659 +
49660 get_fs_root(current->fs, &root);
49661 error = lock_mount(&old);
49662 if (error)
49663 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
49664 index 6fa01ae..2790820 100644
49665 --- a/fs/nfs/inode.c
49666 +++ b/fs/nfs/inode.c
49667 @@ -1029,16 +1029,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
49668 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
49669 }
49670
49671 -static atomic_long_t nfs_attr_generation_counter;
49672 +static atomic_long_unchecked_t nfs_attr_generation_counter;
49673
49674 static unsigned long nfs_read_attr_generation_counter(void)
49675 {
49676 - return atomic_long_read(&nfs_attr_generation_counter);
49677 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
49678 }
49679
49680 unsigned long nfs_inc_attr_generation_counter(void)
49681 {
49682 - return atomic_long_inc_return(&nfs_attr_generation_counter);
49683 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
49684 }
49685
49686 void nfs_fattr_init(struct nfs_fattr *fattr)
49687 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
49688 index c120b48..8ac4140 100644
49689 --- a/fs/nfsd/vfs.c
49690 +++ b/fs/nfsd/vfs.c
49691 @@ -941,7 +941,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49692 } else {
49693 oldfs = get_fs();
49694 set_fs(KERNEL_DS);
49695 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
49696 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
49697 set_fs(oldfs);
49698 }
49699
49700 @@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49701
49702 /* Write the data. */
49703 oldfs = get_fs(); set_fs(KERNEL_DS);
49704 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
49705 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
49706 set_fs(oldfs);
49707 if (host_err < 0)
49708 goto out_nfserr;
49709 @@ -1581,7 +1581,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
49710 */
49711
49712 oldfs = get_fs(); set_fs(KERNEL_DS);
49713 - host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
49714 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
49715 set_fs(oldfs);
49716
49717 if (host_err < 0)
49718 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
49719 index 6fcaeb8..9d16d04 100644
49720 --- a/fs/notify/fanotify/fanotify_user.c
49721 +++ b/fs/notify/fanotify/fanotify_user.c
49722 @@ -250,8 +250,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
49723
49724 fd = fanotify_event_metadata.fd;
49725 ret = -EFAULT;
49726 - if (copy_to_user(buf, &fanotify_event_metadata,
49727 - fanotify_event_metadata.event_len))
49728 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
49729 + copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
49730 goto out_close_fd;
49731
49732 ret = prepare_for_access_response(group, event, fd);
49733 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
49734 index c887b13..0fdf472 100644
49735 --- a/fs/notify/notification.c
49736 +++ b/fs/notify/notification.c
49737 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
49738 * get set to 0 so it will never get 'freed'
49739 */
49740 static struct fsnotify_event *q_overflow_event;
49741 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49742 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49743
49744 /**
49745 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
49746 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
49747 */
49748 u32 fsnotify_get_cookie(void)
49749 {
49750 - return atomic_inc_return(&fsnotify_sync_cookie);
49751 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
49752 }
49753 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
49754
49755 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
49756 index 99e3610..02c1068 100644
49757 --- a/fs/ntfs/dir.c
49758 +++ b/fs/ntfs/dir.c
49759 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
49760 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
49761 ~(s64)(ndir->itype.index.block_size - 1)));
49762 /* Bounds checks. */
49763 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
49764 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
49765 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
49766 "inode 0x%lx or driver bug.", vdir->i_ino);
49767 goto err_out;
49768 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
49769 index 1ecf464..e1ff8bf 100644
49770 --- a/fs/ntfs/file.c
49771 +++ b/fs/ntfs/file.c
49772 @@ -2232,6 +2232,6 @@ const struct inode_operations ntfs_file_inode_ops = {
49773 #endif /* NTFS_RW */
49774 };
49775
49776 -const struct file_operations ntfs_empty_file_ops = {};
49777 +const struct file_operations ntfs_empty_file_ops __read_only;
49778
49779 -const struct inode_operations ntfs_empty_inode_ops = {};
49780 +const struct inode_operations ntfs_empty_inode_ops __read_only;
49781 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
49782 index a9f78c7..ed8a381 100644
49783 --- a/fs/ocfs2/localalloc.c
49784 +++ b/fs/ocfs2/localalloc.c
49785 @@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
49786 goto bail;
49787 }
49788
49789 - atomic_inc(&osb->alloc_stats.moves);
49790 + atomic_inc_unchecked(&osb->alloc_stats.moves);
49791
49792 bail:
49793 if (handle)
49794 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
49795 index d355e6e..578d905 100644
49796 --- a/fs/ocfs2/ocfs2.h
49797 +++ b/fs/ocfs2/ocfs2.h
49798 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
49799
49800 struct ocfs2_alloc_stats
49801 {
49802 - atomic_t moves;
49803 - atomic_t local_data;
49804 - atomic_t bitmap_data;
49805 - atomic_t bg_allocs;
49806 - atomic_t bg_extends;
49807 + atomic_unchecked_t moves;
49808 + atomic_unchecked_t local_data;
49809 + atomic_unchecked_t bitmap_data;
49810 + atomic_unchecked_t bg_allocs;
49811 + atomic_unchecked_t bg_extends;
49812 };
49813
49814 enum ocfs2_local_alloc_state
49815 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
49816 index f169da4..9112253 100644
49817 --- a/fs/ocfs2/suballoc.c
49818 +++ b/fs/ocfs2/suballoc.c
49819 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
49820 mlog_errno(status);
49821 goto bail;
49822 }
49823 - atomic_inc(&osb->alloc_stats.bg_extends);
49824 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
49825
49826 /* You should never ask for this much metadata */
49827 BUG_ON(bits_wanted >
49828 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
49829 mlog_errno(status);
49830 goto bail;
49831 }
49832 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49833 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49834
49835 *suballoc_loc = res.sr_bg_blkno;
49836 *suballoc_bit_start = res.sr_bit_offset;
49837 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
49838 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
49839 res->sr_bits);
49840
49841 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49842 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49843
49844 BUG_ON(res->sr_bits != 1);
49845
49846 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
49847 mlog_errno(status);
49848 goto bail;
49849 }
49850 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49851 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
49852
49853 BUG_ON(res.sr_bits != 1);
49854
49855 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
49856 cluster_start,
49857 num_clusters);
49858 if (!status)
49859 - atomic_inc(&osb->alloc_stats.local_data);
49860 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
49861 } else {
49862 if (min_clusters > (osb->bitmap_cpg - 1)) {
49863 /* The only paths asking for contiguousness
49864 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
49865 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
49866 res.sr_bg_blkno,
49867 res.sr_bit_offset);
49868 - atomic_inc(&osb->alloc_stats.bitmap_data);
49869 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
49870 *num_clusters = res.sr_bits;
49871 }
49872 }
49873 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
49874 index 0e91ec2..f4b3fc6 100644
49875 --- a/fs/ocfs2/super.c
49876 +++ b/fs/ocfs2/super.c
49877 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
49878 "%10s => GlobalAllocs: %d LocalAllocs: %d "
49879 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
49880 "Stats",
49881 - atomic_read(&osb->alloc_stats.bitmap_data),
49882 - atomic_read(&osb->alloc_stats.local_data),
49883 - atomic_read(&osb->alloc_stats.bg_allocs),
49884 - atomic_read(&osb->alloc_stats.moves),
49885 - atomic_read(&osb->alloc_stats.bg_extends));
49886 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
49887 + atomic_read_unchecked(&osb->alloc_stats.local_data),
49888 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
49889 + atomic_read_unchecked(&osb->alloc_stats.moves),
49890 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
49891
49892 out += snprintf(buf + out, len - out,
49893 "%10s => State: %u Descriptor: %llu Size: %u bits "
49894 @@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
49895 spin_lock_init(&osb->osb_xattr_lock);
49896 ocfs2_init_steal_slots(osb);
49897
49898 - atomic_set(&osb->alloc_stats.moves, 0);
49899 - atomic_set(&osb->alloc_stats.local_data, 0);
49900 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
49901 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
49902 - atomic_set(&osb->alloc_stats.bg_extends, 0);
49903 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
49904 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
49905 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
49906 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
49907 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
49908
49909 /* Copy the blockcheck stats from the superblock probe */
49910 osb->osb_ecc_stats = *stats;
49911 diff --git a/fs/open.c b/fs/open.c
49912 index 59071f5..c6229a0 100644
49913 --- a/fs/open.c
49914 +++ b/fs/open.c
49915 @@ -31,6 +31,8 @@
49916 #include <linux/ima.h>
49917 #include <linux/dnotify.h>
49918
49919 +#define CREATE_TRACE_POINTS
49920 +#include <trace/events/fs.h>
49921 #include "internal.h"
49922
49923 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
49924 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
49925 error = locks_verify_truncate(inode, NULL, length);
49926 if (!error)
49927 error = security_path_truncate(&path);
49928 +
49929 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
49930 + error = -EACCES;
49931 +
49932 if (!error)
49933 error = do_truncate(path.dentry, length, 0, NULL);
49934
49935 @@ -362,6 +368,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
49936 if (__mnt_is_readonly(path.mnt))
49937 res = -EROFS;
49938
49939 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
49940 + res = -EACCES;
49941 +
49942 out_path_release:
49943 path_put(&path);
49944 out:
49945 @@ -388,6 +397,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
49946 if (error)
49947 goto dput_and_out;
49948
49949 + gr_log_chdir(path.dentry, path.mnt);
49950 +
49951 set_fs_pwd(current->fs, &path);
49952
49953 dput_and_out:
49954 @@ -413,6 +424,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
49955 goto out_putf;
49956
49957 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
49958 +
49959 + if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
49960 + error = -EPERM;
49961 +
49962 + if (!error)
49963 + gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
49964 +
49965 if (!error)
49966 set_fs_pwd(current->fs, &f.file->f_path);
49967 out_putf:
49968 @@ -441,7 +459,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
49969 if (error)
49970 goto dput_and_out;
49971
49972 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
49973 + goto dput_and_out;
49974 +
49975 set_fs_root(current->fs, &path);
49976 +
49977 + gr_handle_chroot_chdir(&path);
49978 +
49979 error = 0;
49980 dput_and_out:
49981 path_put(&path);
49982 @@ -459,6 +483,16 @@ static int chmod_common(struct path *path, umode_t mode)
49983 if (error)
49984 return error;
49985 mutex_lock(&inode->i_mutex);
49986 +
49987 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
49988 + error = -EACCES;
49989 + goto out_unlock;
49990 + }
49991 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
49992 + error = -EACCES;
49993 + goto out_unlock;
49994 + }
49995 +
49996 error = security_path_chmod(path, mode);
49997 if (error)
49998 goto out_unlock;
49999 @@ -514,6 +548,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
50000 uid = make_kuid(current_user_ns(), user);
50001 gid = make_kgid(current_user_ns(), group);
50002
50003 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
50004 + return -EACCES;
50005 +
50006 newattrs.ia_valid = ATTR_CTIME;
50007 if (user != (uid_t) -1) {
50008 if (!uid_valid(uid))
50009 @@ -925,6 +962,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
50010 } else {
50011 fsnotify_open(f);
50012 fd_install(fd, f);
50013 + trace_do_sys_open(tmp->name, flags, mode);
50014 }
50015 }
50016 putname(tmp);
50017 diff --git a/fs/pipe.c b/fs/pipe.c
50018 index bd3479d..fb92c4d 100644
50019 --- a/fs/pipe.c
50020 +++ b/fs/pipe.c
50021 @@ -438,9 +438,9 @@ redo:
50022 }
50023 if (bufs) /* More to do? */
50024 continue;
50025 - if (!pipe->writers)
50026 + if (!atomic_read(&pipe->writers))
50027 break;
50028 - if (!pipe->waiting_writers) {
50029 + if (!atomic_read(&pipe->waiting_writers)) {
50030 /* syscall merging: Usually we must not sleep
50031 * if O_NONBLOCK is set, or if we got some data.
50032 * But if a writer sleeps in kernel space, then
50033 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
50034 mutex_lock(&inode->i_mutex);
50035 pipe = inode->i_pipe;
50036
50037 - if (!pipe->readers) {
50038 + if (!atomic_read(&pipe->readers)) {
50039 send_sig(SIGPIPE, current, 0);
50040 ret = -EPIPE;
50041 goto out;
50042 @@ -553,7 +553,7 @@ redo1:
50043 for (;;) {
50044 int bufs;
50045
50046 - if (!pipe->readers) {
50047 + if (!atomic_read(&pipe->readers)) {
50048 send_sig(SIGPIPE, current, 0);
50049 if (!ret)
50050 ret = -EPIPE;
50051 @@ -644,9 +644,9 @@ redo2:
50052 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50053 do_wakeup = 0;
50054 }
50055 - pipe->waiting_writers++;
50056 + atomic_inc(&pipe->waiting_writers);
50057 pipe_wait(pipe);
50058 - pipe->waiting_writers--;
50059 + atomic_dec(&pipe->waiting_writers);
50060 }
50061 out:
50062 mutex_unlock(&inode->i_mutex);
50063 @@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50064 mask = 0;
50065 if (filp->f_mode & FMODE_READ) {
50066 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
50067 - if (!pipe->writers && filp->f_version != pipe->w_counter)
50068 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
50069 mask |= POLLHUP;
50070 }
50071
50072 @@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50073 * Most Unices do not set POLLERR for FIFOs but on Linux they
50074 * behave exactly like pipes for poll().
50075 */
50076 - if (!pipe->readers)
50077 + if (!atomic_read(&pipe->readers))
50078 mask |= POLLERR;
50079 }
50080
50081 @@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
50082
50083 mutex_lock(&inode->i_mutex);
50084 pipe = inode->i_pipe;
50085 - pipe->readers -= decr;
50086 - pipe->writers -= decw;
50087 + atomic_sub(decr, &pipe->readers);
50088 + atomic_sub(decw, &pipe->writers);
50089
50090 - if (!pipe->readers && !pipe->writers) {
50091 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
50092 free_pipe_info(inode);
50093 } else {
50094 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
50095 @@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
50096
50097 if (inode->i_pipe) {
50098 ret = 0;
50099 - inode->i_pipe->readers++;
50100 + atomic_inc(&inode->i_pipe->readers);
50101 }
50102
50103 mutex_unlock(&inode->i_mutex);
50104 @@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
50105
50106 if (inode->i_pipe) {
50107 ret = 0;
50108 - inode->i_pipe->writers++;
50109 + atomic_inc(&inode->i_pipe->writers);
50110 }
50111
50112 mutex_unlock(&inode->i_mutex);
50113 @@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
50114 if (inode->i_pipe) {
50115 ret = 0;
50116 if (filp->f_mode & FMODE_READ)
50117 - inode->i_pipe->readers++;
50118 + atomic_inc(&inode->i_pipe->readers);
50119 if (filp->f_mode & FMODE_WRITE)
50120 - inode->i_pipe->writers++;
50121 + atomic_inc(&inode->i_pipe->writers);
50122 }
50123
50124 mutex_unlock(&inode->i_mutex);
50125 @@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
50126 inode->i_pipe = NULL;
50127 }
50128
50129 -static struct vfsmount *pipe_mnt __read_mostly;
50130 +struct vfsmount *pipe_mnt __read_mostly;
50131
50132 /*
50133 * pipefs_dname() is called from d_path().
50134 @@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
50135 goto fail_iput;
50136 inode->i_pipe = pipe;
50137
50138 - pipe->readers = pipe->writers = 1;
50139 + atomic_set(&pipe->readers, 1);
50140 + atomic_set(&pipe->writers, 1);
50141 inode->i_fop = &rdwr_pipefifo_fops;
50142
50143 /*
50144 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
50145 index 15af622..0e9f4467 100644
50146 --- a/fs/proc/Kconfig
50147 +++ b/fs/proc/Kconfig
50148 @@ -30,12 +30,12 @@ config PROC_FS
50149
50150 config PROC_KCORE
50151 bool "/proc/kcore support" if !ARM
50152 - depends on PROC_FS && MMU
50153 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
50154
50155 config PROC_VMCORE
50156 bool "/proc/vmcore support"
50157 - depends on PROC_FS && CRASH_DUMP
50158 - default y
50159 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
50160 + default n
50161 help
50162 Exports the dump image of crashed kernel in ELF format.
50163
50164 @@ -59,8 +59,8 @@ config PROC_SYSCTL
50165 limited in memory.
50166
50167 config PROC_PAGE_MONITOR
50168 - default y
50169 - depends on PROC_FS && MMU
50170 + default n
50171 + depends on PROC_FS && MMU && !GRKERNSEC
50172 bool "Enable /proc page monitoring" if EXPERT
50173 help
50174 Various /proc files exist to monitor process memory utilization:
50175 diff --git a/fs/proc/array.c b/fs/proc/array.c
50176 index c1c207c..01ce725 100644
50177 --- a/fs/proc/array.c
50178 +++ b/fs/proc/array.c
50179 @@ -60,6 +60,7 @@
50180 #include <linux/tty.h>
50181 #include <linux/string.h>
50182 #include <linux/mman.h>
50183 +#include <linux/grsecurity.h>
50184 #include <linux/proc_fs.h>
50185 #include <linux/ioport.h>
50186 #include <linux/uaccess.h>
50187 @@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
50188 seq_putc(m, '\n');
50189 }
50190
50191 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50192 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
50193 +{
50194 + if (p->mm)
50195 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
50196 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
50197 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
50198 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
50199 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
50200 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
50201 + else
50202 + seq_printf(m, "PaX:\t-----\n");
50203 +}
50204 +#endif
50205 +
50206 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50207 struct pid *pid, struct task_struct *task)
50208 {
50209 @@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50210 task_cpus_allowed(m, task);
50211 cpuset_task_status_allowed(m, task);
50212 task_context_switch_counts(m, task);
50213 +
50214 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50215 + task_pax(m, task);
50216 +#endif
50217 +
50218 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
50219 + task_grsec_rbac(m, task);
50220 +#endif
50221 +
50222 return 0;
50223 }
50224
50225 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50226 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50227 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
50228 + _mm->pax_flags & MF_PAX_SEGMEXEC))
50229 +#endif
50230 +
50231 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50232 struct pid *pid, struct task_struct *task, int whole)
50233 {
50234 @@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50235 char tcomm[sizeof(task->comm)];
50236 unsigned long flags;
50237
50238 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50239 + if (current->exec_id != m->exec_id) {
50240 + gr_log_badprocpid("stat");
50241 + return 0;
50242 + }
50243 +#endif
50244 +
50245 state = *get_task_state(task);
50246 vsize = eip = esp = 0;
50247 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50248 @@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50249 gtime = task->gtime;
50250 }
50251
50252 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50253 + if (PAX_RAND_FLAGS(mm)) {
50254 + eip = 0;
50255 + esp = 0;
50256 + wchan = 0;
50257 + }
50258 +#endif
50259 +#ifdef CONFIG_GRKERNSEC_HIDESYM
50260 + wchan = 0;
50261 + eip =0;
50262 + esp =0;
50263 +#endif
50264 +
50265 /* scale priority and nice values from timeslices to -20..20 */
50266 /* to make it look like a "normal" Unix priority/nice value */
50267 priority = task_prio(task);
50268 @@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50269 seq_put_decimal_ull(m, ' ', vsize);
50270 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
50271 seq_put_decimal_ull(m, ' ', rsslim);
50272 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50273 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
50274 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
50275 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
50276 +#else
50277 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
50278 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
50279 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
50280 +#endif
50281 seq_put_decimal_ull(m, ' ', esp);
50282 seq_put_decimal_ull(m, ' ', eip);
50283 /* The signal information here is obsolete.
50284 @@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50285 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
50286 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
50287
50288 - if (mm && permitted) {
50289 + if (mm && permitted
50290 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50291 + && !PAX_RAND_FLAGS(mm)
50292 +#endif
50293 + ) {
50294 seq_put_decimal_ull(m, ' ', mm->start_data);
50295 seq_put_decimal_ull(m, ' ', mm->end_data);
50296 seq_put_decimal_ull(m, ' ', mm->start_brk);
50297 @@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50298 struct pid *pid, struct task_struct *task)
50299 {
50300 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
50301 - struct mm_struct *mm = get_task_mm(task);
50302 + struct mm_struct *mm;
50303
50304 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50305 + if (current->exec_id != m->exec_id) {
50306 + gr_log_badprocpid("statm");
50307 + return 0;
50308 + }
50309 +#endif
50310 + mm = get_task_mm(task);
50311 if (mm) {
50312 size = task_statm(mm, &shared, &text, &data, &resident);
50313 mmput(mm);
50314 @@ -580,6 +648,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50315 return 0;
50316 }
50317
50318 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50319 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
50320 +{
50321 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
50322 +}
50323 +#endif
50324 +
50325 #ifdef CONFIG_CHECKPOINT_RESTORE
50326 static struct pid *
50327 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
50328 diff --git a/fs/proc/base.c b/fs/proc/base.c
50329 index 9e28356..c485b3c 100644
50330 --- a/fs/proc/base.c
50331 +++ b/fs/proc/base.c
50332 @@ -111,6 +111,14 @@ struct pid_entry {
50333 union proc_op op;
50334 };
50335
50336 +struct getdents_callback {
50337 + struct linux_dirent __user * current_dir;
50338 + struct linux_dirent __user * previous;
50339 + struct file * file;
50340 + int count;
50341 + int error;
50342 +};
50343 +
50344 #define NOD(NAME, MODE, IOP, FOP, OP) { \
50345 .name = (NAME), \
50346 .len = sizeof(NAME) - 1, \
50347 @@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
50348 if (!mm->arg_end)
50349 goto out_mm; /* Shh! No looking before we're done */
50350
50351 + if (gr_acl_handle_procpidmem(task))
50352 + goto out_mm;
50353 +
50354 len = mm->arg_end - mm->arg_start;
50355
50356 if (len > PAGE_SIZE)
50357 @@ -235,12 +246,28 @@ out:
50358 return res;
50359 }
50360
50361 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50362 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50363 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
50364 + _mm->pax_flags & MF_PAX_SEGMEXEC))
50365 +#endif
50366 +
50367 static int proc_pid_auxv(struct task_struct *task, char *buffer)
50368 {
50369 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
50370 int res = PTR_ERR(mm);
50371 if (mm && !IS_ERR(mm)) {
50372 unsigned int nwords = 0;
50373 +
50374 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50375 + /* allow if we're currently ptracing this task */
50376 + if (PAX_RAND_FLAGS(mm) &&
50377 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
50378 + mmput(mm);
50379 + return 0;
50380 + }
50381 +#endif
50382 +
50383 do {
50384 nwords += 2;
50385 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
50386 @@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
50387 }
50388
50389
50390 -#ifdef CONFIG_KALLSYMS
50391 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50392 /*
50393 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
50394 * Returns the resolved symbol. If that fails, simply return the address.
50395 @@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
50396 mutex_unlock(&task->signal->cred_guard_mutex);
50397 }
50398
50399 -#ifdef CONFIG_STACKTRACE
50400 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50401
50402 #define MAX_STACK_TRACE_DEPTH 64
50403
50404 @@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
50405 return count;
50406 }
50407
50408 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50409 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50410 static int proc_pid_syscall(struct task_struct *task, char *buffer)
50411 {
50412 long nr;
50413 @@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
50414 /************************************************************************/
50415
50416 /* permission checks */
50417 -static int proc_fd_access_allowed(struct inode *inode)
50418 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
50419 {
50420 struct task_struct *task;
50421 int allowed = 0;
50422 @@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
50423 */
50424 task = get_proc_task(inode);
50425 if (task) {
50426 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50427 + if (log)
50428 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50429 + else
50430 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50431 put_task_struct(task);
50432 }
50433 return allowed;
50434 @@ -562,10 +592,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
50435 struct task_struct *task,
50436 int hide_pid_min)
50437 {
50438 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50439 + return false;
50440 +
50441 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50442 + rcu_read_lock();
50443 + {
50444 + const struct cred *tmpcred = current_cred();
50445 + const struct cred *cred = __task_cred(task);
50446 +
50447 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
50448 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50449 + || in_group_p(grsec_proc_gid)
50450 +#endif
50451 + ) {
50452 + rcu_read_unlock();
50453 + return true;
50454 + }
50455 + }
50456 + rcu_read_unlock();
50457 +
50458 + if (!pid->hide_pid)
50459 + return false;
50460 +#endif
50461 +
50462 if (pid->hide_pid < hide_pid_min)
50463 return true;
50464 if (in_group_p(pid->pid_gid))
50465 return true;
50466 +
50467 return ptrace_may_access(task, PTRACE_MODE_READ);
50468 }
50469
50470 @@ -583,7 +638,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
50471 put_task_struct(task);
50472
50473 if (!has_perms) {
50474 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50475 + {
50476 +#else
50477 if (pid->hide_pid == 2) {
50478 +#endif
50479 /*
50480 * Let's make getdents(), stat(), and open()
50481 * consistent with each other. If a process
50482 @@ -681,6 +740,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50483 if (!task)
50484 return -ESRCH;
50485
50486 + if (gr_acl_handle_procpidmem(task)) {
50487 + put_task_struct(task);
50488 + return -EPERM;
50489 + }
50490 +
50491 mm = mm_access(task, mode);
50492 put_task_struct(task);
50493
50494 @@ -696,6 +760,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50495
50496 file->private_data = mm;
50497
50498 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50499 + file->f_version = current->exec_id;
50500 +#endif
50501 +
50502 return 0;
50503 }
50504
50505 @@ -717,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
50506 ssize_t copied;
50507 char *page;
50508
50509 +#ifdef CONFIG_GRKERNSEC
50510 + if (write)
50511 + return -EPERM;
50512 +#endif
50513 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50514 + if (file->f_version != current->exec_id) {
50515 + gr_log_badprocpid("mem");
50516 + return 0;
50517 + }
50518 +#endif
50519 +
50520 if (!mm)
50521 return 0;
50522
50523 @@ -821,6 +900,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
50524 if (!mm)
50525 return 0;
50526
50527 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50528 + if (file->f_version != current->exec_id) {
50529 + gr_log_badprocpid("environ");
50530 + return 0;
50531 + }
50532 +#endif
50533 +
50534 page = (char *)__get_free_page(GFP_TEMPORARY);
50535 if (!page)
50536 return -ENOMEM;
50537 @@ -1436,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
50538 int error = -EACCES;
50539
50540 /* Are we allowed to snoop on the tasks file descriptors? */
50541 - if (!proc_fd_access_allowed(inode))
50542 + if (!proc_fd_access_allowed(inode, 0))
50543 goto out;
50544
50545 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50546 @@ -1480,8 +1566,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
50547 struct path path;
50548
50549 /* Are we allowed to snoop on the tasks file descriptors? */
50550 - if (!proc_fd_access_allowed(inode))
50551 - goto out;
50552 + /* logging this is needed for learning on chromium to work properly,
50553 + but we don't want to flood the logs from 'ps' which does a readlink
50554 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
50555 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
50556 + */
50557 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
50558 + if (!proc_fd_access_allowed(inode,0))
50559 + goto out;
50560 + } else {
50561 + if (!proc_fd_access_allowed(inode,1))
50562 + goto out;
50563 + }
50564
50565 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50566 if (error)
50567 @@ -1531,7 +1627,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
50568 rcu_read_lock();
50569 cred = __task_cred(task);
50570 inode->i_uid = cred->euid;
50571 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50572 + inode->i_gid = grsec_proc_gid;
50573 +#else
50574 inode->i_gid = cred->egid;
50575 +#endif
50576 rcu_read_unlock();
50577 }
50578 security_task_to_inode(task, inode);
50579 @@ -1567,10 +1667,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
50580 return -ENOENT;
50581 }
50582 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50583 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50584 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50585 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50586 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50587 +#endif
50588 task_dumpable(task)) {
50589 cred = __task_cred(task);
50590 stat->uid = cred->euid;
50591 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50592 + stat->gid = grsec_proc_gid;
50593 +#else
50594 stat->gid = cred->egid;
50595 +#endif
50596 }
50597 }
50598 rcu_read_unlock();
50599 @@ -1608,11 +1717,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
50600
50601 if (task) {
50602 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50603 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50604 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50605 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50606 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50607 +#endif
50608 task_dumpable(task)) {
50609 rcu_read_lock();
50610 cred = __task_cred(task);
50611 inode->i_uid = cred->euid;
50612 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50613 + inode->i_gid = grsec_proc_gid;
50614 +#else
50615 inode->i_gid = cred->egid;
50616 +#endif
50617 rcu_read_unlock();
50618 } else {
50619 inode->i_uid = GLOBAL_ROOT_UID;
50620 @@ -2065,6 +2183,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
50621 if (!task)
50622 goto out_no_task;
50623
50624 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50625 + goto out;
50626 +
50627 /*
50628 * Yes, it does not scale. And it should not. Don't add
50629 * new entries into /proc/<tgid>/ without very good reasons.
50630 @@ -2109,6 +2230,9 @@ static int proc_pident_readdir(struct file *filp,
50631 if (!task)
50632 goto out_no_task;
50633
50634 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50635 + goto out;
50636 +
50637 ret = 0;
50638 i = filp->f_pos;
50639 switch (i) {
50640 @@ -2380,7 +2504,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
50641 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
50642 void *cookie)
50643 {
50644 - char *s = nd_get_link(nd);
50645 + const char *s = nd_get_link(nd);
50646 if (!IS_ERR(s))
50647 kfree(s);
50648 }
50649 @@ -2662,7 +2786,7 @@ static const struct pid_entry tgid_base_stuff[] = {
50650 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
50651 #endif
50652 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50653 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50654 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50655 INF("syscall", S_IRUGO, proc_pid_syscall),
50656 #endif
50657 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50658 @@ -2687,10 +2811,10 @@ static const struct pid_entry tgid_base_stuff[] = {
50659 #ifdef CONFIG_SECURITY
50660 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50661 #endif
50662 -#ifdef CONFIG_KALLSYMS
50663 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50664 INF("wchan", S_IRUGO, proc_pid_wchan),
50665 #endif
50666 -#ifdef CONFIG_STACKTRACE
50667 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50668 ONE("stack", S_IRUGO, proc_pid_stack),
50669 #endif
50670 #ifdef CONFIG_SCHEDSTATS
50671 @@ -2724,6 +2848,9 @@ static const struct pid_entry tgid_base_stuff[] = {
50672 #ifdef CONFIG_HARDWALL
50673 INF("hardwall", S_IRUGO, proc_pid_hardwall),
50674 #endif
50675 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50676 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
50677 +#endif
50678 #ifdef CONFIG_USER_NS
50679 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
50680 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
50681 @@ -2856,7 +2983,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
50682 if (!inode)
50683 goto out;
50684
50685 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50686 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
50687 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50688 + inode->i_gid = grsec_proc_gid;
50689 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
50690 +#else
50691 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
50692 +#endif
50693 inode->i_op = &proc_tgid_base_inode_operations;
50694 inode->i_fop = &proc_tgid_base_operations;
50695 inode->i_flags|=S_IMMUTABLE;
50696 @@ -2898,7 +3032,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
50697 if (!task)
50698 goto out;
50699
50700 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50701 + goto out_put_task;
50702 +
50703 result = proc_pid_instantiate(dir, dentry, task, NULL);
50704 +out_put_task:
50705 put_task_struct(task);
50706 out:
50707 return result;
50708 @@ -2961,6 +3099,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
50709 static int fake_filldir(void *buf, const char *name, int namelen,
50710 loff_t offset, u64 ino, unsigned d_type)
50711 {
50712 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
50713 + __buf->error = -EINVAL;
50714 return 0;
50715 }
50716
50717 @@ -3027,7 +3167,7 @@ static const struct pid_entry tid_base_stuff[] = {
50718 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
50719 #endif
50720 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50721 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50722 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50723 INF("syscall", S_IRUGO, proc_pid_syscall),
50724 #endif
50725 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50726 @@ -3054,10 +3194,10 @@ static const struct pid_entry tid_base_stuff[] = {
50727 #ifdef CONFIG_SECURITY
50728 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50729 #endif
50730 -#ifdef CONFIG_KALLSYMS
50731 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50732 INF("wchan", S_IRUGO, proc_pid_wchan),
50733 #endif
50734 -#ifdef CONFIG_STACKTRACE
50735 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50736 ONE("stack", S_IRUGO, proc_pid_stack),
50737 #endif
50738 #ifdef CONFIG_SCHEDSTATS
50739 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
50740 index 82676e3..5f8518a 100644
50741 --- a/fs/proc/cmdline.c
50742 +++ b/fs/proc/cmdline.c
50743 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
50744
50745 static int __init proc_cmdline_init(void)
50746 {
50747 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
50748 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
50749 +#else
50750 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
50751 +#endif
50752 return 0;
50753 }
50754 module_init(proc_cmdline_init);
50755 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
50756 index b143471..bb105e5 100644
50757 --- a/fs/proc/devices.c
50758 +++ b/fs/proc/devices.c
50759 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
50760
50761 static int __init proc_devices_init(void)
50762 {
50763 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
50764 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
50765 +#else
50766 proc_create("devices", 0, NULL, &proc_devinfo_operations);
50767 +#endif
50768 return 0;
50769 }
50770 module_init(proc_devices_init);
50771 diff --git a/fs/proc/fd.c b/fs/proc/fd.c
50772 index f28a875..c467953 100644
50773 --- a/fs/proc/fd.c
50774 +++ b/fs/proc/fd.c
50775 @@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
50776 if (!task)
50777 return -ENOENT;
50778
50779 - files = get_files_struct(task);
50780 + if (!gr_acl_handle_procpidmem(task))
50781 + files = get_files_struct(task);
50782 put_task_struct(task);
50783
50784 if (files) {
50785 @@ -300,11 +301,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
50786 */
50787 int proc_fd_permission(struct inode *inode, int mask)
50788 {
50789 + struct task_struct *task;
50790 int rv = generic_permission(inode, mask);
50791 - if (rv == 0)
50792 - return 0;
50793 +
50794 if (task_pid(current) == proc_pid(inode))
50795 rv = 0;
50796 +
50797 + task = get_proc_task(inode);
50798 + if (task == NULL)
50799 + return rv;
50800 +
50801 + if (gr_acl_handle_procpidmem(task))
50802 + rv = -EACCES;
50803 +
50804 + put_task_struct(task);
50805 +
50806 return rv;
50807 }
50808
50809 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
50810 index 3b22bbd..895b58c 100644
50811 --- a/fs/proc/inode.c
50812 +++ b/fs/proc/inode.c
50813 @@ -21,11 +21,17 @@
50814 #include <linux/seq_file.h>
50815 #include <linux/slab.h>
50816 #include <linux/mount.h>
50817 +#include <linux/grsecurity.h>
50818
50819 #include <asm/uaccess.h>
50820
50821 #include "internal.h"
50822
50823 +#ifdef CONFIG_PROC_SYSCTL
50824 +extern const struct inode_operations proc_sys_inode_operations;
50825 +extern const struct inode_operations proc_sys_dir_operations;
50826 +#endif
50827 +
50828 static void proc_evict_inode(struct inode *inode)
50829 {
50830 struct proc_dir_entry *de;
50831 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
50832 ns_ops = PROC_I(inode)->ns_ops;
50833 if (ns_ops && ns_ops->put)
50834 ns_ops->put(PROC_I(inode)->ns);
50835 +
50836 +#ifdef CONFIG_PROC_SYSCTL
50837 + if (inode->i_op == &proc_sys_inode_operations ||
50838 + inode->i_op == &proc_sys_dir_operations)
50839 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
50840 +#endif
50841 +
50842 }
50843
50844 static struct kmem_cache * proc_inode_cachep;
50845 @@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
50846 if (de->mode) {
50847 inode->i_mode = de->mode;
50848 inode->i_uid = de->uid;
50849 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50850 + inode->i_gid = grsec_proc_gid;
50851 +#else
50852 inode->i_gid = de->gid;
50853 +#endif
50854 }
50855 if (de->size)
50856 inode->i_size = de->size;
50857 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
50858 index 43973b0..a20e704 100644
50859 --- a/fs/proc/internal.h
50860 +++ b/fs/proc/internal.h
50861 @@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50862 struct pid *pid, struct task_struct *task);
50863 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50864 struct pid *pid, struct task_struct *task);
50865 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50866 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
50867 +#endif
50868 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
50869
50870 extern const struct file_operations proc_tid_children_operations;
50871 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
50872 index 86c67ee..cdca321 100644
50873 --- a/fs/proc/kcore.c
50874 +++ b/fs/proc/kcore.c
50875 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50876 * the addresses in the elf_phdr on our list.
50877 */
50878 start = kc_offset_to_vaddr(*fpos - elf_buflen);
50879 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
50880 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
50881 + if (tsz > buflen)
50882 tsz = buflen;
50883 -
50884 +
50885 while (buflen) {
50886 struct kcore_list *m;
50887
50888 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50889 kfree(elf_buf);
50890 } else {
50891 if (kern_addr_valid(start)) {
50892 - unsigned long n;
50893 + char *elf_buf;
50894 + mm_segment_t oldfs;
50895
50896 - n = copy_to_user(buffer, (char *)start, tsz);
50897 - /*
50898 - * We cannot distinguish between fault on source
50899 - * and fault on destination. When this happens
50900 - * we clear too and hope it will trigger the
50901 - * EFAULT again.
50902 - */
50903 - if (n) {
50904 - if (clear_user(buffer + tsz - n,
50905 - n))
50906 + elf_buf = kmalloc(tsz, GFP_KERNEL);
50907 + if (!elf_buf)
50908 + return -ENOMEM;
50909 + oldfs = get_fs();
50910 + set_fs(KERNEL_DS);
50911 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
50912 + set_fs(oldfs);
50913 + if (copy_to_user(buffer, elf_buf, tsz)) {
50914 + kfree(elf_buf);
50915 return -EFAULT;
50916 + }
50917 }
50918 + set_fs(oldfs);
50919 + kfree(elf_buf);
50920 } else {
50921 if (clear_user(buffer, tsz))
50922 return -EFAULT;
50923 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
50924
50925 static int open_kcore(struct inode *inode, struct file *filp)
50926 {
50927 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
50928 + return -EPERM;
50929 +#endif
50930 if (!capable(CAP_SYS_RAWIO))
50931 return -EPERM;
50932 if (kcore_need_update)
50933 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
50934 index 80e4645..53e5fcf 100644
50935 --- a/fs/proc/meminfo.c
50936 +++ b/fs/proc/meminfo.c
50937 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
50938 vmi.used >> 10,
50939 vmi.largest_chunk >> 10
50940 #ifdef CONFIG_MEMORY_FAILURE
50941 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
50942 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
50943 #endif
50944 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
50945 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
50946 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
50947 index b1822dd..df622cb 100644
50948 --- a/fs/proc/nommu.c
50949 +++ b/fs/proc/nommu.c
50950 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
50951 if (len < 1)
50952 len = 1;
50953 seq_printf(m, "%*c", len, ' ');
50954 - seq_path(m, &file->f_path, "");
50955 + seq_path(m, &file->f_path, "\n\\");
50956 }
50957
50958 seq_putc(m, '\n');
50959 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
50960 index fe72cd0..cb9b67d 100644
50961 --- a/fs/proc/proc_net.c
50962 +++ b/fs/proc/proc_net.c
50963 @@ -23,6 +23,7 @@
50964 #include <linux/nsproxy.h>
50965 #include <net/net_namespace.h>
50966 #include <linux/seq_file.h>
50967 +#include <linux/grsecurity.h>
50968
50969 #include "internal.h"
50970
50971 @@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
50972 struct task_struct *task;
50973 struct nsproxy *ns;
50974 struct net *net = NULL;
50975 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50976 + const struct cred *cred = current_cred();
50977 +#endif
50978 +
50979 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50980 + if (cred->fsuid)
50981 + return net;
50982 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50983 + if (cred->fsuid && !in_group_p(grsec_proc_gid))
50984 + return net;
50985 +#endif
50986
50987 rcu_read_lock();
50988 task = pid_task(proc_pid(dir), PIDTYPE_PID);
50989 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
50990 index a781bdf..6665284 100644
50991 --- a/fs/proc/proc_sysctl.c
50992 +++ b/fs/proc/proc_sysctl.c
50993 @@ -12,11 +12,15 @@
50994 #include <linux/module.h>
50995 #include "internal.h"
50996
50997 +extern int gr_handle_chroot_sysctl(const int op);
50998 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
50999 + const int op);
51000 +
51001 static const struct dentry_operations proc_sys_dentry_operations;
51002 static const struct file_operations proc_sys_file_operations;
51003 -static const struct inode_operations proc_sys_inode_operations;
51004 +const struct inode_operations proc_sys_inode_operations;
51005 static const struct file_operations proc_sys_dir_file_operations;
51006 -static const struct inode_operations proc_sys_dir_operations;
51007 +const struct inode_operations proc_sys_dir_operations;
51008
51009 void proc_sys_poll_notify(struct ctl_table_poll *poll)
51010 {
51011 @@ -465,6 +469,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
51012
51013 err = NULL;
51014 d_set_d_op(dentry, &proc_sys_dentry_operations);
51015 +
51016 + gr_handle_proc_create(dentry, inode);
51017 +
51018 d_add(dentry, inode);
51019
51020 out:
51021 @@ -480,18 +487,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51022 struct inode *inode = filp->f_path.dentry->d_inode;
51023 struct ctl_table_header *head = grab_header(inode);
51024 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
51025 + int op = write ? MAY_WRITE : MAY_READ;
51026 ssize_t error;
51027 size_t res;
51028
51029 if (IS_ERR(head))
51030 return PTR_ERR(head);
51031
51032 +
51033 /*
51034 * At this point we know that the sysctl was not unregistered
51035 * and won't be until we finish.
51036 */
51037 error = -EPERM;
51038 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
51039 + if (sysctl_perm(head->root, table, op))
51040 goto out;
51041
51042 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
51043 @@ -499,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51044 if (!table->proc_handler)
51045 goto out;
51046
51047 +#ifdef CONFIG_GRKERNSEC
51048 + error = -EPERM;
51049 + if (gr_handle_chroot_sysctl(op))
51050 + goto out;
51051 + dget(filp->f_path.dentry);
51052 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
51053 + dput(filp->f_path.dentry);
51054 + goto out;
51055 + }
51056 + dput(filp->f_path.dentry);
51057 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
51058 + goto out;
51059 + if (write && !capable(CAP_SYS_ADMIN))
51060 + goto out;
51061 +#endif
51062 +
51063 /* careful: calling conventions are nasty here */
51064 res = count;
51065 error = table->proc_handler(table, write, buf, &res, ppos);
51066 @@ -596,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
51067 return -ENOMEM;
51068 } else {
51069 d_set_d_op(child, &proc_sys_dentry_operations);
51070 +
51071 + gr_handle_proc_create(child, inode);
51072 +
51073 d_add(child, inode);
51074 }
51075 } else {
51076 @@ -639,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
51077 if ((*pos)++ < file->f_pos)
51078 return 0;
51079
51080 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
51081 + return 0;
51082 +
51083 if (unlikely(S_ISLNK(table->mode)))
51084 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
51085 else
51086 @@ -756,6 +787,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
51087 if (IS_ERR(head))
51088 return PTR_ERR(head);
51089
51090 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
51091 + return -ENOENT;
51092 +
51093 generic_fillattr(inode, stat);
51094 if (table)
51095 stat->mode = (stat->mode & S_IFMT) | table->mode;
51096 @@ -778,13 +812,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
51097 .llseek = generic_file_llseek,
51098 };
51099
51100 -static const struct inode_operations proc_sys_inode_operations = {
51101 +const struct inode_operations proc_sys_inode_operations = {
51102 .permission = proc_sys_permission,
51103 .setattr = proc_sys_setattr,
51104 .getattr = proc_sys_getattr,
51105 };
51106
51107 -static const struct inode_operations proc_sys_dir_operations = {
51108 +const struct inode_operations proc_sys_dir_operations = {
51109 .lookup = proc_sys_lookup,
51110 .permission = proc_sys_permission,
51111 .setattr = proc_sys_setattr,
51112 diff --git a/fs/proc/root.c b/fs/proc/root.c
51113 index 9889a92..2613b48 100644
51114 --- a/fs/proc/root.c
51115 +++ b/fs/proc/root.c
51116 @@ -187,7 +187,15 @@ void __init proc_root_init(void)
51117 #ifdef CONFIG_PROC_DEVICETREE
51118 proc_device_tree_init();
51119 #endif
51120 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
51121 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51122 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
51123 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51124 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51125 +#endif
51126 +#else
51127 proc_mkdir("bus", NULL);
51128 +#endif
51129 proc_sys_init();
51130 }
51131
51132 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
51133 index 90c63f9..e662cfc 100644
51134 --- a/fs/proc/task_mmu.c
51135 +++ b/fs/proc/task_mmu.c
51136 @@ -11,12 +11,19 @@
51137 #include <linux/rmap.h>
51138 #include <linux/swap.h>
51139 #include <linux/swapops.h>
51140 +#include <linux/grsecurity.h>
51141
51142 #include <asm/elf.h>
51143 #include <asm/uaccess.h>
51144 #include <asm/tlbflush.h>
51145 #include "internal.h"
51146
51147 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51148 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
51149 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
51150 + _mm->pax_flags & MF_PAX_SEGMEXEC))
51151 +#endif
51152 +
51153 void task_mem(struct seq_file *m, struct mm_struct *mm)
51154 {
51155 unsigned long data, text, lib, swap;
51156 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51157 "VmExe:\t%8lu kB\n"
51158 "VmLib:\t%8lu kB\n"
51159 "VmPTE:\t%8lu kB\n"
51160 - "VmSwap:\t%8lu kB\n",
51161 - hiwater_vm << (PAGE_SHIFT-10),
51162 + "VmSwap:\t%8lu kB\n"
51163 +
51164 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51165 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
51166 +#endif
51167 +
51168 + ,hiwater_vm << (PAGE_SHIFT-10),
51169 total_vm << (PAGE_SHIFT-10),
51170 mm->locked_vm << (PAGE_SHIFT-10),
51171 mm->pinned_vm << (PAGE_SHIFT-10),
51172 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51173 data << (PAGE_SHIFT-10),
51174 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
51175 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
51176 - swap << (PAGE_SHIFT-10));
51177 + swap << (PAGE_SHIFT-10)
51178 +
51179 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51180 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51181 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
51182 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
51183 +#else
51184 + , mm->context.user_cs_base
51185 + , mm->context.user_cs_limit
51186 +#endif
51187 +#endif
51188 +
51189 + );
51190 }
51191
51192 unsigned long task_vsize(struct mm_struct *mm)
51193 @@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51194 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
51195 }
51196
51197 - /* We don't show the stack guard page in /proc/maps */
51198 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51199 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
51200 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
51201 +#else
51202 start = vma->vm_start;
51203 - if (stack_guard_page_start(vma, start))
51204 - start += PAGE_SIZE;
51205 end = vma->vm_end;
51206 - if (stack_guard_page_end(vma, end))
51207 - end -= PAGE_SIZE;
51208 +#endif
51209
51210 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
51211 start,
51212 @@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51213 flags & VM_WRITE ? 'w' : '-',
51214 flags & VM_EXEC ? 'x' : '-',
51215 flags & VM_MAYSHARE ? 's' : 'p',
51216 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51217 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
51218 +#else
51219 pgoff,
51220 +#endif
51221 MAJOR(dev), MINOR(dev), ino, &len);
51222
51223 /*
51224 @@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51225 */
51226 if (file) {
51227 pad_len_spaces(m, len);
51228 - seq_path(m, &file->f_path, "\n");
51229 + seq_path(m, &file->f_path, "\n\\");
51230 goto done;
51231 }
51232
51233 @@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51234 * Thread stack in /proc/PID/task/TID/maps or
51235 * the main process stack.
51236 */
51237 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
51238 - vma->vm_end >= mm->start_stack)) {
51239 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
51240 + (vma->vm_start <= mm->start_stack &&
51241 + vma->vm_end >= mm->start_stack)) {
51242 name = "[stack]";
51243 } else {
51244 /* Thread stack in /proc/PID/maps */
51245 @@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
51246 struct proc_maps_private *priv = m->private;
51247 struct task_struct *task = priv->task;
51248
51249 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51250 + if (current->exec_id != m->exec_id) {
51251 + gr_log_badprocpid("maps");
51252 + return 0;
51253 + }
51254 +#endif
51255 +
51256 show_map_vma(m, vma, is_pid);
51257
51258 if (m->count < m->size) /* vma is copied successfully */
51259 @@ -538,12 +574,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51260 .private = &mss,
51261 };
51262
51263 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51264 + if (current->exec_id != m->exec_id) {
51265 + gr_log_badprocpid("smaps");
51266 + return 0;
51267 + }
51268 +#endif
51269 memset(&mss, 0, sizeof mss);
51270 - mss.vma = vma;
51271 - /* mmap_sem is held in m_start */
51272 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51273 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51274 -
51275 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51276 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
51277 +#endif
51278 + mss.vma = vma;
51279 + /* mmap_sem is held in m_start */
51280 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51281 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51282 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51283 + }
51284 +#endif
51285 show_map_vma(m, vma, is_pid);
51286
51287 seq_printf(m,
51288 @@ -561,7 +608,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51289 "KernelPageSize: %8lu kB\n"
51290 "MMUPageSize: %8lu kB\n"
51291 "Locked: %8lu kB\n",
51292 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51293 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
51294 +#else
51295 (vma->vm_end - vma->vm_start) >> 10,
51296 +#endif
51297 mss.resident >> 10,
51298 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
51299 mss.shared_clean >> 10,
51300 @@ -1211,6 +1262,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51301 int n;
51302 char buffer[50];
51303
51304 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51305 + if (current->exec_id != m->exec_id) {
51306 + gr_log_badprocpid("numa_maps");
51307 + return 0;
51308 + }
51309 +#endif
51310 +
51311 if (!mm)
51312 return 0;
51313
51314 @@ -1228,11 +1286,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51315 mpol_to_str(buffer, sizeof(buffer), pol, 0);
51316 mpol_cond_put(pol);
51317
51318 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51319 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
51320 +#else
51321 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
51322 +#endif
51323
51324 if (file) {
51325 seq_printf(m, " file=");
51326 - seq_path(m, &file->f_path, "\n\t= ");
51327 + seq_path(m, &file->f_path, "\n\t\\= ");
51328 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
51329 seq_printf(m, " heap");
51330 } else {
51331 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
51332 index 1ccfa53..0848f95 100644
51333 --- a/fs/proc/task_nommu.c
51334 +++ b/fs/proc/task_nommu.c
51335 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51336 else
51337 bytes += kobjsize(mm);
51338
51339 - if (current->fs && current->fs->users > 1)
51340 + if (current->fs && atomic_read(&current->fs->users) > 1)
51341 sbytes += kobjsize(current->fs);
51342 else
51343 bytes += kobjsize(current->fs);
51344 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
51345
51346 if (file) {
51347 pad_len_spaces(m, len);
51348 - seq_path(m, &file->f_path, "");
51349 + seq_path(m, &file->f_path, "\n\\");
51350 } else if (mm) {
51351 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
51352
51353 diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
51354 index 2d57e1a..43b1280 100644
51355 --- a/fs/pstore/ftrace.c
51356 +++ b/fs/pstore/ftrace.c
51357 @@ -28,7 +28,9 @@
51358 #include "internal.h"
51359
51360 static void notrace pstore_ftrace_call(unsigned long ip,
51361 - unsigned long parent_ip)
51362 + unsigned long parent_ip,
51363 + struct ftrace_ops *op,
51364 + struct pt_regs *regs)
51365 {
51366 unsigned long flags;
51367 struct pstore_ftrace_record rec = {};
51368 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
51369 index 16e8abb..2dcf914 100644
51370 --- a/fs/quota/netlink.c
51371 +++ b/fs/quota/netlink.c
51372 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
51373 void quota_send_warning(struct kqid qid, dev_t dev,
51374 const char warntype)
51375 {
51376 - static atomic_t seq;
51377 + static atomic_unchecked_t seq;
51378 struct sk_buff *skb;
51379 void *msg_head;
51380 int ret;
51381 @@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
51382 "VFS: Not enough memory to send quota warning.\n");
51383 return;
51384 }
51385 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
51386 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
51387 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
51388 if (!msg_head) {
51389 printk(KERN_ERR
51390 diff --git a/fs/read_write.c b/fs/read_write.c
51391 index d065348..8e2b43d 100644
51392 --- a/fs/read_write.c
51393 +++ b/fs/read_write.c
51394 @@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
51395 if (retval > 0) {
51396 add_rchar(current, retval);
51397 add_wchar(current, retval);
51398 + fsnotify_access(in.file);
51399 + fsnotify_modify(out.file);
51400 }
51401
51402 inc_syscr(current);
51403 diff --git a/fs/readdir.c b/fs/readdir.c
51404 index 5e69ef5..e5d9099 100644
51405 --- a/fs/readdir.c
51406 +++ b/fs/readdir.c
51407 @@ -17,6 +17,7 @@
51408 #include <linux/security.h>
51409 #include <linux/syscalls.h>
51410 #include <linux/unistd.h>
51411 +#include <linux/namei.h>
51412
51413 #include <asm/uaccess.h>
51414
51415 @@ -67,6 +68,7 @@ struct old_linux_dirent {
51416
51417 struct readdir_callback {
51418 struct old_linux_dirent __user * dirent;
51419 + struct file * file;
51420 int result;
51421 };
51422
51423 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
51424 buf->result = -EOVERFLOW;
51425 return -EOVERFLOW;
51426 }
51427 +
51428 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51429 + return 0;
51430 +
51431 buf->result++;
51432 dirent = buf->dirent;
51433 if (!access_ok(VERIFY_WRITE, dirent,
51434 @@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
51435
51436 buf.result = 0;
51437 buf.dirent = dirent;
51438 + buf.file = f.file;
51439
51440 error = vfs_readdir(f.file, fillonedir, &buf);
51441 if (buf.result)
51442 @@ -139,6 +146,7 @@ struct linux_dirent {
51443 struct getdents_callback {
51444 struct linux_dirent __user * current_dir;
51445 struct linux_dirent __user * previous;
51446 + struct file * file;
51447 int count;
51448 int error;
51449 };
51450 @@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
51451 buf->error = -EOVERFLOW;
51452 return -EOVERFLOW;
51453 }
51454 +
51455 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51456 + return 0;
51457 +
51458 dirent = buf->previous;
51459 if (dirent) {
51460 if (__put_user(offset, &dirent->d_off))
51461 @@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51462 buf.previous = NULL;
51463 buf.count = count;
51464 buf.error = 0;
51465 + buf.file = f.file;
51466
51467 error = vfs_readdir(f.file, filldir, &buf);
51468 if (error >= 0)
51469 @@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51470 struct getdents_callback64 {
51471 struct linux_dirent64 __user * current_dir;
51472 struct linux_dirent64 __user * previous;
51473 + struct file *file;
51474 int count;
51475 int error;
51476 };
51477 @@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
51478 buf->error = -EINVAL; /* only used if we fail.. */
51479 if (reclen > buf->count)
51480 return -EINVAL;
51481 +
51482 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51483 + return 0;
51484 +
51485 dirent = buf->previous;
51486 if (dirent) {
51487 if (__put_user(offset, &dirent->d_off))
51488 @@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51489
51490 buf.current_dir = dirent;
51491 buf.previous = NULL;
51492 + buf.file = f.file;
51493 buf.count = count;
51494 buf.error = 0;
51495
51496 @@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51497 error = buf.error;
51498 lastdirent = buf.previous;
51499 if (lastdirent) {
51500 - typeof(lastdirent->d_off) d_off = f.file->f_pos;
51501 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
51502 if (__put_user(d_off, &lastdirent->d_off))
51503 error = -EFAULT;
51504 else
51505 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
51506 index 2b7882b..1c5ef48 100644
51507 --- a/fs/reiserfs/do_balan.c
51508 +++ b/fs/reiserfs/do_balan.c
51509 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
51510 return;
51511 }
51512
51513 - atomic_inc(&(fs_generation(tb->tb_sb)));
51514 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
51515 do_balance_starts(tb);
51516
51517 /* balance leaf returns 0 except if combining L R and S into
51518 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
51519 index e60e870..f40ac16 100644
51520 --- a/fs/reiserfs/procfs.c
51521 +++ b/fs/reiserfs/procfs.c
51522 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
51523 "SMALL_TAILS " : "NO_TAILS ",
51524 replay_only(sb) ? "REPLAY_ONLY " : "",
51525 convert_reiserfs(sb) ? "CONV " : "",
51526 - atomic_read(&r->s_generation_counter),
51527 + atomic_read_unchecked(&r->s_generation_counter),
51528 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
51529 SF(s_do_balance), SF(s_unneeded_left_neighbor),
51530 SF(s_good_search_by_key_reada), SF(s_bmaps),
51531 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
51532 index 33215f5..c5d427a 100644
51533 --- a/fs/reiserfs/reiserfs.h
51534 +++ b/fs/reiserfs/reiserfs.h
51535 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
51536 /* Comment? -Hans */
51537 wait_queue_head_t s_wait;
51538 /* To be obsoleted soon by per buffer seals.. -Hans */
51539 - atomic_t s_generation_counter; // increased by one every time the
51540 + atomic_unchecked_t s_generation_counter; // increased by one every time the
51541 // tree gets re-balanced
51542 unsigned long s_properties; /* File system properties. Currently holds
51543 on-disk FS format */
51544 @@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
51545 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
51546
51547 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
51548 -#define get_generation(s) atomic_read (&fs_generation(s))
51549 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
51550 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
51551 #define __fs_changed(gen,s) (gen != get_generation (s))
51552 #define fs_changed(gen,s) \
51553 diff --git a/fs/select.c b/fs/select.c
51554 index 2ef72d9..f213b17 100644
51555 --- a/fs/select.c
51556 +++ b/fs/select.c
51557 @@ -20,6 +20,7 @@
51558 #include <linux/export.h>
51559 #include <linux/slab.h>
51560 #include <linux/poll.h>
51561 +#include <linux/security.h>
51562 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
51563 #include <linux/file.h>
51564 #include <linux/fdtable.h>
51565 @@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
51566 struct poll_list *walk = head;
51567 unsigned long todo = nfds;
51568
51569 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
51570 if (nfds > rlimit(RLIMIT_NOFILE))
51571 return -EINVAL;
51572
51573 diff --git a/fs/seq_file.c b/fs/seq_file.c
51574 index 99dffab..884a1eb 100644
51575 --- a/fs/seq_file.c
51576 +++ b/fs/seq_file.c
51577 @@ -10,6 +10,7 @@
51578 #include <linux/seq_file.h>
51579 #include <linux/slab.h>
51580 #include <linux/cred.h>
51581 +#include <linux/sched.h>
51582
51583 #include <asm/uaccess.h>
51584 #include <asm/page.h>
51585 @@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
51586 #ifdef CONFIG_USER_NS
51587 p->user_ns = file->f_cred->user_ns;
51588 #endif
51589 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51590 + p->exec_id = current->exec_id;
51591 +#endif
51592
51593 /*
51594 * Wrappers around seq_open(e.g. swaps_open) need to be
51595 @@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51596 return 0;
51597 }
51598 if (!m->buf) {
51599 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51600 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51601 if (!m->buf)
51602 return -ENOMEM;
51603 }
51604 @@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51605 Eoverflow:
51606 m->op->stop(m, p);
51607 kfree(m->buf);
51608 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51609 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51610 return !m->buf ? -ENOMEM : -EAGAIN;
51611 }
51612
51613 @@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51614
51615 /* grab buffer if we didn't have one */
51616 if (!m->buf) {
51617 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51618 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51619 if (!m->buf)
51620 goto Enomem;
51621 }
51622 @@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51623 goto Fill;
51624 m->op->stop(m, p);
51625 kfree(m->buf);
51626 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51627 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51628 if (!m->buf)
51629 goto Enomem;
51630 m->count = 0;
51631 @@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
51632 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
51633 void *data)
51634 {
51635 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
51636 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
51637 int res = -ENOMEM;
51638
51639 if (op) {
51640 diff --git a/fs/splice.c b/fs/splice.c
51641 index 13e5b47..2262998 100644
51642 --- a/fs/splice.c
51643 +++ b/fs/splice.c
51644 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51645 pipe_lock(pipe);
51646
51647 for (;;) {
51648 - if (!pipe->readers) {
51649 + if (!atomic_read(&pipe->readers)) {
51650 send_sig(SIGPIPE, current, 0);
51651 if (!ret)
51652 ret = -EPIPE;
51653 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51654 do_wakeup = 0;
51655 }
51656
51657 - pipe->waiting_writers++;
51658 + atomic_inc(&pipe->waiting_writers);
51659 pipe_wait(pipe);
51660 - pipe->waiting_writers--;
51661 + atomic_dec(&pipe->waiting_writers);
51662 }
51663
51664 pipe_unlock(pipe);
51665 @@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
51666 old_fs = get_fs();
51667 set_fs(get_ds());
51668 /* The cast to a user pointer is valid due to the set_fs() */
51669 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
51670 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
51671 set_fs(old_fs);
51672
51673 return res;
51674 @@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
51675 old_fs = get_fs();
51676 set_fs(get_ds());
51677 /* The cast to a user pointer is valid due to the set_fs() */
51678 - res = vfs_write(file, (const char __user *)buf, count, &pos);
51679 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
51680 set_fs(old_fs);
51681
51682 return res;
51683 @@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
51684 goto err;
51685
51686 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
51687 - vec[i].iov_base = (void __user *) page_address(page);
51688 + vec[i].iov_base = (void __force_user *) page_address(page);
51689 vec[i].iov_len = this_len;
51690 spd.pages[i] = page;
51691 spd.nr_pages++;
51692 @@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
51693 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
51694 {
51695 while (!pipe->nrbufs) {
51696 - if (!pipe->writers)
51697 + if (!atomic_read(&pipe->writers))
51698 return 0;
51699
51700 - if (!pipe->waiting_writers && sd->num_spliced)
51701 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
51702 return 0;
51703
51704 if (sd->flags & SPLICE_F_NONBLOCK)
51705 @@ -1190,7 +1190,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
51706 * out of the pipe right after the splice_to_pipe(). So set
51707 * PIPE_READERS appropriately.
51708 */
51709 - pipe->readers = 1;
51710 + atomic_set(&pipe->readers, 1);
51711
51712 current->splice_pipe = pipe;
51713 }
51714 @@ -1739,9 +1739,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51715 ret = -ERESTARTSYS;
51716 break;
51717 }
51718 - if (!pipe->writers)
51719 + if (!atomic_read(&pipe->writers))
51720 break;
51721 - if (!pipe->waiting_writers) {
51722 + if (!atomic_read(&pipe->waiting_writers)) {
51723 if (flags & SPLICE_F_NONBLOCK) {
51724 ret = -EAGAIN;
51725 break;
51726 @@ -1773,7 +1773,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51727 pipe_lock(pipe);
51728
51729 while (pipe->nrbufs >= pipe->buffers) {
51730 - if (!pipe->readers) {
51731 + if (!atomic_read(&pipe->readers)) {
51732 send_sig(SIGPIPE, current, 0);
51733 ret = -EPIPE;
51734 break;
51735 @@ -1786,9 +1786,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
51736 ret = -ERESTARTSYS;
51737 break;
51738 }
51739 - pipe->waiting_writers++;
51740 + atomic_inc(&pipe->waiting_writers);
51741 pipe_wait(pipe);
51742 - pipe->waiting_writers--;
51743 + atomic_dec(&pipe->waiting_writers);
51744 }
51745
51746 pipe_unlock(pipe);
51747 @@ -1824,14 +1824,14 @@ retry:
51748 pipe_double_lock(ipipe, opipe);
51749
51750 do {
51751 - if (!opipe->readers) {
51752 + if (!atomic_read(&opipe->readers)) {
51753 send_sig(SIGPIPE, current, 0);
51754 if (!ret)
51755 ret = -EPIPE;
51756 break;
51757 }
51758
51759 - if (!ipipe->nrbufs && !ipipe->writers)
51760 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
51761 break;
51762
51763 /*
51764 @@ -1928,7 +1928,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
51765 pipe_double_lock(ipipe, opipe);
51766
51767 do {
51768 - if (!opipe->readers) {
51769 + if (!atomic_read(&opipe->readers)) {
51770 send_sig(SIGPIPE, current, 0);
51771 if (!ret)
51772 ret = -EPIPE;
51773 @@ -1973,7 +1973,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
51774 * return EAGAIN if we have the potential of some data in the
51775 * future, otherwise just return 0
51776 */
51777 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
51778 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
51779 ret = -EAGAIN;
51780
51781 pipe_unlock(ipipe);
51782 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
51783 index 2fbdff6..5530a61 100644
51784 --- a/fs/sysfs/dir.c
51785 +++ b/fs/sysfs/dir.c
51786 @@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
51787 struct sysfs_dirent *sd;
51788 int rc;
51789
51790 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
51791 + const char *parent_name = parent_sd->s_name;
51792 +
51793 + mode = S_IFDIR | S_IRWXU;
51794 +
51795 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
51796 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
51797 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
51798 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
51799 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
51800 +#endif
51801 +
51802 /* allocate */
51803 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
51804 if (!sd)
51805 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
51806 index 00012e3..8392349 100644
51807 --- a/fs/sysfs/file.c
51808 +++ b/fs/sysfs/file.c
51809 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
51810
51811 struct sysfs_open_dirent {
51812 atomic_t refcnt;
51813 - atomic_t event;
51814 + atomic_unchecked_t event;
51815 wait_queue_head_t poll;
51816 struct list_head buffers; /* goes through sysfs_buffer.list */
51817 };
51818 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
51819 if (!sysfs_get_active(attr_sd))
51820 return -ENODEV;
51821
51822 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
51823 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
51824 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
51825
51826 sysfs_put_active(attr_sd);
51827 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
51828 return -ENOMEM;
51829
51830 atomic_set(&new_od->refcnt, 0);
51831 - atomic_set(&new_od->event, 1);
51832 + atomic_set_unchecked(&new_od->event, 1);
51833 init_waitqueue_head(&new_od->poll);
51834 INIT_LIST_HEAD(&new_od->buffers);
51835 goto retry;
51836 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
51837
51838 sysfs_put_active(attr_sd);
51839
51840 - if (buffer->event != atomic_read(&od->event))
51841 + if (buffer->event != atomic_read_unchecked(&od->event))
51842 goto trigger;
51843
51844 return DEFAULT_POLLMASK;
51845 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
51846
51847 od = sd->s_attr.open;
51848 if (od) {
51849 - atomic_inc(&od->event);
51850 + atomic_inc_unchecked(&od->event);
51851 wake_up_interruptible(&od->poll);
51852 }
51853
51854 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
51855 index 3c9eb56..9dea5be 100644
51856 --- a/fs/sysfs/symlink.c
51857 +++ b/fs/sysfs/symlink.c
51858 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
51859
51860 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
51861 {
51862 - char *page = nd_get_link(nd);
51863 + const char *page = nd_get_link(nd);
51864 if (!IS_ERR(page))
51865 free_page((unsigned long)page);
51866 }
51867 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
51868 index c175b4d..8f36a16 100644
51869 --- a/fs/udf/misc.c
51870 +++ b/fs/udf/misc.c
51871 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
51872
51873 u8 udf_tag_checksum(const struct tag *t)
51874 {
51875 - u8 *data = (u8 *)t;
51876 + const u8 *data = (const u8 *)t;
51877 u8 checksum = 0;
51878 int i;
51879 for (i = 0; i < sizeof(struct tag); ++i)
51880 diff --git a/fs/utimes.c b/fs/utimes.c
51881 index bb0696a..552054b 100644
51882 --- a/fs/utimes.c
51883 +++ b/fs/utimes.c
51884 @@ -1,6 +1,7 @@
51885 #include <linux/compiler.h>
51886 #include <linux/file.h>
51887 #include <linux/fs.h>
51888 +#include <linux/security.h>
51889 #include <linux/linkage.h>
51890 #include <linux/mount.h>
51891 #include <linux/namei.h>
51892 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
51893 goto mnt_drop_write_and_out;
51894 }
51895 }
51896 +
51897 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
51898 + error = -EACCES;
51899 + goto mnt_drop_write_and_out;
51900 + }
51901 +
51902 mutex_lock(&inode->i_mutex);
51903 error = notify_change(path->dentry, &newattrs);
51904 mutex_unlock(&inode->i_mutex);
51905 diff --git a/fs/xattr.c b/fs/xattr.c
51906 index e21c119..21dfc7c 100644
51907 --- a/fs/xattr.c
51908 +++ b/fs/xattr.c
51909 @@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
51910 * Extended attribute SET operations
51911 */
51912 static long
51913 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
51914 +setxattr(struct path *path, const char __user *name, const void __user *value,
51915 size_t size, int flags)
51916 {
51917 int error;
51918 @@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
51919 posix_acl_fix_xattr_from_user(kvalue, size);
51920 }
51921
51922 - error = vfs_setxattr(d, kname, kvalue, size, flags);
51923 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
51924 + error = -EACCES;
51925 + goto out;
51926 + }
51927 +
51928 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
51929 out:
51930 if (vvalue)
51931 vfree(vvalue);
51932 @@ -376,7 +381,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
51933 return error;
51934 error = mnt_want_write(path.mnt);
51935 if (!error) {
51936 - error = setxattr(path.dentry, name, value, size, flags);
51937 + error = setxattr(&path, name, value, size, flags);
51938 mnt_drop_write(path.mnt);
51939 }
51940 path_put(&path);
51941 @@ -395,7 +400,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
51942 return error;
51943 error = mnt_want_write(path.mnt);
51944 if (!error) {
51945 - error = setxattr(path.dentry, name, value, size, flags);
51946 + error = setxattr(&path, name, value, size, flags);
51947 mnt_drop_write(path.mnt);
51948 }
51949 path_put(&path);
51950 @@ -406,16 +411,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
51951 const void __user *,value, size_t, size, int, flags)
51952 {
51953 struct fd f = fdget(fd);
51954 - struct dentry *dentry;
51955 int error = -EBADF;
51956
51957 if (!f.file)
51958 return error;
51959 - dentry = f.file->f_path.dentry;
51960 - audit_inode(NULL, dentry, 0);
51961 + audit_inode(NULL, f.file->f_path.dentry, 0);
51962 error = mnt_want_write_file(f.file);
51963 if (!error) {
51964 - error = setxattr(dentry, name, value, size, flags);
51965 + error = setxattr(&f.file->f_path, name, value, size, flags);
51966 mnt_drop_write_file(f.file);
51967 }
51968 fdput(f);
51969 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
51970 index 9fbea87..6b19972 100644
51971 --- a/fs/xattr_acl.c
51972 +++ b/fs/xattr_acl.c
51973 @@ -76,8 +76,8 @@ struct posix_acl *
51974 posix_acl_from_xattr(struct user_namespace *user_ns,
51975 const void *value, size_t size)
51976 {
51977 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
51978 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
51979 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
51980 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
51981 int count;
51982 struct posix_acl *acl;
51983 struct posix_acl_entry *acl_e;
51984 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
51985 index 83d0cf3..2ef526b 100644
51986 --- a/fs/xfs/xfs_bmap.c
51987 +++ b/fs/xfs/xfs_bmap.c
51988 @@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
51989 int nmap,
51990 int ret_nmap);
51991 #else
51992 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
51993 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
51994 #endif /* DEBUG */
51995
51996 STATIC int
51997 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
51998 index 1b9fc3e..e1bdde0 100644
51999 --- a/fs/xfs/xfs_dir2_sf.c
52000 +++ b/fs/xfs/xfs_dir2_sf.c
52001 @@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
52002 }
52003
52004 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
52005 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52006 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
52007 + char name[sfep->namelen];
52008 + memcpy(name, sfep->name, sfep->namelen);
52009 + if (filldir(dirent, name, sfep->namelen,
52010 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
52011 + *offset = off & 0x7fffffff;
52012 + return 0;
52013 + }
52014 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52015 off & 0x7fffffff, ino, DT_UNKNOWN)) {
52016 *offset = off & 0x7fffffff;
52017 return 0;
52018 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
52019 index c1df3c6..f987db6 100644
52020 --- a/fs/xfs/xfs_ioctl.c
52021 +++ b/fs/xfs/xfs_ioctl.c
52022 @@ -126,7 +126,7 @@ xfs_find_handle(
52023 }
52024
52025 error = -EFAULT;
52026 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
52027 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
52028 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
52029 goto out_put;
52030
52031 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
52032 index 4e00cf0..3374374 100644
52033 --- a/fs/xfs/xfs_iops.c
52034 +++ b/fs/xfs/xfs_iops.c
52035 @@ -394,7 +394,7 @@ xfs_vn_put_link(
52036 struct nameidata *nd,
52037 void *p)
52038 {
52039 - char *s = nd_get_link(nd);
52040 + const char *s = nd_get_link(nd);
52041
52042 if (!IS_ERR(s))
52043 kfree(s);
52044 diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
52045 index deee09e..1c74028 100644
52046 --- a/fs/xfs/xfs_mount.h
52047 +++ b/fs/xfs/xfs_mount.h
52048 @@ -193,7 +193,7 @@ typedef struct xfs_mount {
52049 #ifdef HAVE_PERCPU_SB
52050 xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
52051 unsigned long m_icsb_counters; /* disabled per-cpu counters */
52052 - struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
52053 + notifier_block_no_const m_icsb_notifier; /* hotplug cpu notifier */
52054 struct mutex m_icsb_mutex; /* balancer sync lock */
52055 #endif
52056 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
52057 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
52058 new file mode 100644
52059 index 0000000..3900064
52060 --- /dev/null
52061 +++ b/grsecurity/Kconfig
52062 @@ -0,0 +1,964 @@
52063 +#
52064 +# grecurity configuration
52065 +#
52066 +menu "Memory Protections"
52067 +depends on GRKERNSEC
52068 +
52069 +config GRKERNSEC_KMEM
52070 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
52071 + default y if GRKERNSEC_CONFIG_AUTO
52072 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52073 + help
52074 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52075 + be written to or read from to modify or leak the contents of the running
52076 + kernel. /dev/port will also not be allowed to be opened. If you have module
52077 + support disabled, enabling this will close up four ways that are
52078 + currently used to insert malicious code into the running kernel.
52079 + Even with all these features enabled, we still highly recommend that
52080 + you use the RBAC system, as it is still possible for an attacker to
52081 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52082 + If you are not using XFree86, you may be able to stop this additional
52083 + case by enabling the 'Disable privileged I/O' option. Though nothing
52084 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52085 + but only to video memory, which is the only writing we allow in this
52086 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52087 + not be allowed to mprotect it with PROT_WRITE later.
52088 + It is highly recommended that you say Y here if you meet all the
52089 + conditions above.
52090 +
52091 +config GRKERNSEC_VM86
52092 + bool "Restrict VM86 mode"
52093 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52094 + depends on X86_32
52095 +
52096 + help
52097 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52098 + make use of a special execution mode on 32bit x86 processors called
52099 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52100 + video cards and will still work with this option enabled. The purpose
52101 + of the option is to prevent exploitation of emulation errors in
52102 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52103 + Nearly all users should be able to enable this option.
52104 +
52105 +config GRKERNSEC_IO
52106 + bool "Disable privileged I/O"
52107 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52108 + depends on X86
52109 + select RTC_CLASS
52110 + select RTC_INTF_DEV
52111 + select RTC_DRV_CMOS
52112 +
52113 + help
52114 + If you say Y here, all ioperm and iopl calls will return an error.
52115 + Ioperm and iopl can be used to modify the running kernel.
52116 + Unfortunately, some programs need this access to operate properly,
52117 + the most notable of which are XFree86 and hwclock. hwclock can be
52118 + remedied by having RTC support in the kernel, so real-time
52119 + clock support is enabled if this option is enabled, to ensure
52120 + that hwclock operates correctly. XFree86 still will not
52121 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52122 + IF YOU USE XFree86. If you use XFree86 and you still want to
52123 + protect your kernel against modification, use the RBAC system.
52124 +
52125 +config GRKERNSEC_JIT_HARDEN
52126 + bool "Harden BPF JIT against spray attacks"
52127 + default y if GRKERNSEC_CONFIG_AUTO
52128 + depends on BPF_JIT
52129 + help
52130 + If you say Y here, the native code generated by the kernel's Berkeley
52131 + Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
52132 + attacks that attempt to fit attacker-beneficial instructions in
52133 + 32bit immediate fields of JIT-generated native instructions. The
52134 + attacker will generally aim to cause an unintended instruction sequence
52135 + of JIT-generated native code to execute by jumping into the middle of
52136 + a generated instruction. This feature effectively randomizes the 32bit
52137 + immediate constants present in the generated code to thwart such attacks.
52138 +
52139 + If you're using KERNEXEC, it's recommended that you enable this option
52140 + to supplement the hardening of the kernel.
52141 +
52142 +config GRKERNSEC_PROC_MEMMAP
52143 + bool "Harden ASLR against information leaks and entropy reduction"
52144 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
52145 + depends on PAX_NOEXEC || PAX_ASLR
52146 + help
52147 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52148 + give no information about the addresses of its mappings if
52149 + PaX features that rely on random addresses are enabled on the task.
52150 + In addition to sanitizing this information and disabling other
52151 + dangerous sources of information, this option causes reads of sensitive
52152 + /proc/<pid> entries where the file descriptor was opened in a different
52153 + task than the one performing the read. Such attempts are logged.
52154 + This option also limits argv/env strings for suid/sgid binaries
52155 + to 512KB to prevent a complete exhaustion of the stack entropy provided
52156 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
52157 + binaries to prevent alternative mmap layouts from being abused.
52158 +
52159 + If you use PaX it is essential that you say Y here as it closes up
52160 + several holes that make full ASLR useless locally.
52161 +
52162 +config GRKERNSEC_BRUTE
52163 + bool "Deter exploit bruteforcing"
52164 + default y if GRKERNSEC_CONFIG_AUTO
52165 + help
52166 + If you say Y here, attempts to bruteforce exploits against forking
52167 + daemons such as apache or sshd, as well as against suid/sgid binaries
52168 + will be deterred. When a child of a forking daemon is killed by PaX
52169 + or crashes due to an illegal instruction or other suspicious signal,
52170 + the parent process will be delayed 30 seconds upon every subsequent
52171 + fork until the administrator is able to assess the situation and
52172 + restart the daemon.
52173 + In the suid/sgid case, the attempt is logged, the user has all their
52174 + processes terminated, and they are prevented from executing any further
52175 + processes for 15 minutes.
52176 + It is recommended that you also enable signal logging in the auditing
52177 + section so that logs are generated when a process triggers a suspicious
52178 + signal.
52179 + If the sysctl option is enabled, a sysctl option with name
52180 + "deter_bruteforce" is created.
52181 +
52182 +
52183 +config GRKERNSEC_MODHARDEN
52184 + bool "Harden module auto-loading"
52185 + default y if GRKERNSEC_CONFIG_AUTO
52186 + depends on MODULES
52187 + help
52188 + If you say Y here, module auto-loading in response to use of some
52189 + feature implemented by an unloaded module will be restricted to
52190 + root users. Enabling this option helps defend against attacks
52191 + by unprivileged users who abuse the auto-loading behavior to
52192 + cause a vulnerable module to load that is then exploited.
52193 +
52194 + If this option prevents a legitimate use of auto-loading for a
52195 + non-root user, the administrator can execute modprobe manually
52196 + with the exact name of the module mentioned in the alert log.
52197 + Alternatively, the administrator can add the module to the list
52198 + of modules loaded at boot by modifying init scripts.
52199 +
52200 + Modification of init scripts will most likely be needed on
52201 + Ubuntu servers with encrypted home directory support enabled,
52202 + as the first non-root user logging in will cause the ecb(aes),
52203 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52204 +
52205 +config GRKERNSEC_HIDESYM
52206 + bool "Hide kernel symbols"
52207 + default y if GRKERNSEC_CONFIG_AUTO
52208 + select PAX_USERCOPY_SLABS
52209 + help
52210 + If you say Y here, getting information on loaded modules, and
52211 + displaying all kernel symbols through a syscall will be restricted
52212 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52213 + /proc/kallsyms will be restricted to the root user. The RBAC
52214 + system can hide that entry even from root.
52215 +
52216 + This option also prevents leaking of kernel addresses through
52217 + several /proc entries.
52218 +
52219 + Note that this option is only effective provided the following
52220 + conditions are met:
52221 + 1) The kernel using grsecurity is not precompiled by some distribution
52222 + 2) You have also enabled GRKERNSEC_DMESG
52223 + 3) You are using the RBAC system and hiding other files such as your
52224 + kernel image and System.map. Alternatively, enabling this option
52225 + causes the permissions on /boot, /lib/modules, and the kernel
52226 + source directory to change at compile time to prevent
52227 + reading by non-root users.
52228 + If the above conditions are met, this option will aid in providing a
52229 + useful protection against local kernel exploitation of overflows
52230 + and arbitrary read/write vulnerabilities.
52231 +
52232 +config GRKERNSEC_KERN_LOCKOUT
52233 + bool "Active kernel exploit response"
52234 + default y if GRKERNSEC_CONFIG_AUTO
52235 + depends on X86 || ARM || PPC || SPARC
52236 + help
52237 + If you say Y here, when a PaX alert is triggered due to suspicious
52238 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52239 + or an OOPS occurs due to bad memory accesses, instead of just
52240 + terminating the offending process (and potentially allowing
52241 + a subsequent exploit from the same user), we will take one of two
52242 + actions:
52243 + If the user was root, we will panic the system
52244 + If the user was non-root, we will log the attempt, terminate
52245 + all processes owned by the user, then prevent them from creating
52246 + any new processes until the system is restarted
52247 + This deters repeated kernel exploitation/bruteforcing attempts
52248 + and is useful for later forensics.
52249 +
52250 +endmenu
52251 +menu "Role Based Access Control Options"
52252 +depends on GRKERNSEC
52253 +
52254 +config GRKERNSEC_RBAC_DEBUG
52255 + bool
52256 +
52257 +config GRKERNSEC_NO_RBAC
52258 + bool "Disable RBAC system"
52259 + help
52260 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52261 + preventing the RBAC system from being enabled. You should only say Y
52262 + here if you have no intention of using the RBAC system, so as to prevent
52263 + an attacker with root access from misusing the RBAC system to hide files
52264 + and processes when loadable module support and /dev/[k]mem have been
52265 + locked down.
52266 +
52267 +config GRKERNSEC_ACL_HIDEKERN
52268 + bool "Hide kernel processes"
52269 + help
52270 + If you say Y here, all kernel threads will be hidden to all
52271 + processes but those whose subject has the "view hidden processes"
52272 + flag.
52273 +
52274 +config GRKERNSEC_ACL_MAXTRIES
52275 + int "Maximum tries before password lockout"
52276 + default 3
52277 + help
52278 + This option enforces the maximum number of times a user can attempt
52279 + to authorize themselves with the grsecurity RBAC system before being
52280 + denied the ability to attempt authorization again for a specified time.
52281 + The lower the number, the harder it will be to brute-force a password.
52282 +
52283 +config GRKERNSEC_ACL_TIMEOUT
52284 + int "Time to wait after max password tries, in seconds"
52285 + default 30
52286 + help
52287 + This option specifies the time the user must wait after attempting to
52288 + authorize to the RBAC system with the maximum number of invalid
52289 + passwords. The higher the number, the harder it will be to brute-force
52290 + a password.
52291 +
52292 +endmenu
52293 +menu "Filesystem Protections"
52294 +depends on GRKERNSEC
52295 +
52296 +config GRKERNSEC_PROC
52297 + bool "Proc restrictions"
52298 + default y if GRKERNSEC_CONFIG_AUTO
52299 + help
52300 + If you say Y here, the permissions of the /proc filesystem
52301 + will be altered to enhance system security and privacy. You MUST
52302 + choose either a user only restriction or a user and group restriction.
52303 + Depending upon the option you choose, you can either restrict users to
52304 + see only the processes they themselves run, or choose a group that can
52305 + view all processes and files normally restricted to root if you choose
52306 + the "restrict to user only" option. NOTE: If you're running identd or
52307 + ntpd as a non-root user, you will have to run it as the group you
52308 + specify here.
52309 +
52310 +config GRKERNSEC_PROC_USER
52311 + bool "Restrict /proc to user only"
52312 + depends on GRKERNSEC_PROC
52313 + help
52314 + If you say Y here, non-root users will only be able to view their own
52315 + processes, and restricts them from viewing network-related information,
52316 + and viewing kernel symbol and module information.
52317 +
52318 +config GRKERNSEC_PROC_USERGROUP
52319 + bool "Allow special group"
52320 + default y if GRKERNSEC_CONFIG_AUTO
52321 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52322 + help
52323 + If you say Y here, you will be able to select a group that will be
52324 + able to view all processes and network-related information. If you've
52325 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52326 + remain hidden. This option is useful if you want to run identd as
52327 + a non-root user. The group you select may also be chosen at boot time
52328 + via "grsec_proc_gid=" on the kernel commandline.
52329 +
52330 +config GRKERNSEC_PROC_GID
52331 + int "GID for special group"
52332 + depends on GRKERNSEC_PROC_USERGROUP
52333 + default 1001
52334 +
52335 +config GRKERNSEC_PROC_ADD
52336 + bool "Additional restrictions"
52337 + default y if GRKERNSEC_CONFIG_AUTO
52338 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52339 + help
52340 + If you say Y here, additional restrictions will be placed on
52341 + /proc that keep normal users from viewing device information and
52342 + slabinfo information that could be useful for exploits.
52343 +
52344 +config GRKERNSEC_LINK
52345 + bool "Linking restrictions"
52346 + default y if GRKERNSEC_CONFIG_AUTO
52347 + help
52348 + If you say Y here, /tmp race exploits will be prevented, since users
52349 + will no longer be able to follow symlinks owned by other users in
52350 + world-writable +t directories (e.g. /tmp), unless the owner of the
52351 + symlink is the owner of the directory. users will also not be
52352 + able to hardlink to files they do not own. If the sysctl option is
52353 + enabled, a sysctl option with name "linking_restrictions" is created.
52354 +
52355 +config GRKERNSEC_SYMLINKOWN
52356 + bool "Kernel-enforced SymlinksIfOwnerMatch"
52357 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52358 + help
52359 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
52360 + that prevents it from being used as a security feature. As Apache
52361 + verifies the symlink by performing a stat() against the target of
52362 + the symlink before it is followed, an attacker can setup a symlink
52363 + to point to a same-owned file, then replace the symlink with one
52364 + that targets another user's file just after Apache "validates" the
52365 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
52366 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
52367 + will be in place for the group you specify. If the sysctl option
52368 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
52369 + created.
52370 +
52371 +config GRKERNSEC_SYMLINKOWN_GID
52372 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
52373 + depends on GRKERNSEC_SYMLINKOWN
52374 + default 1006
52375 + help
52376 + Setting this GID determines what group kernel-enforced
52377 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
52378 + is enabled, a sysctl option with name "symlinkown_gid" is created.
52379 +
52380 +config GRKERNSEC_FIFO
52381 + bool "FIFO restrictions"
52382 + default y if GRKERNSEC_CONFIG_AUTO
52383 + help
52384 + If you say Y here, users will not be able to write to FIFOs they don't
52385 + own in world-writable +t directories (e.g. /tmp), unless the owner of
52386 + the FIFO is the same owner of the directory it's held in. If the sysctl
52387 + option is enabled, a sysctl option with name "fifo_restrictions" is
52388 + created.
52389 +
52390 +config GRKERNSEC_SYSFS_RESTRICT
52391 + bool "Sysfs/debugfs restriction"
52392 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52393 + depends on SYSFS
52394 + help
52395 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52396 + any filesystem normally mounted under it (e.g. debugfs) will be
52397 + mostly accessible only by root. These filesystems generally provide access
52398 + to hardware and debug information that isn't appropriate for unprivileged
52399 + users of the system. Sysfs and debugfs have also become a large source
52400 + of new vulnerabilities, ranging from infoleaks to local compromise.
52401 + There has been very little oversight with an eye toward security involved
52402 + in adding new exporters of information to these filesystems, so their
52403 + use is discouraged.
52404 + For reasons of compatibility, a few directories have been whitelisted
52405 + for access by non-root users:
52406 + /sys/fs/selinux
52407 + /sys/fs/fuse
52408 + /sys/devices/system/cpu
52409 +
52410 +config GRKERNSEC_ROFS
52411 + bool "Runtime read-only mount protection"
52412 + help
52413 + If you say Y here, a sysctl option with name "romount_protect" will
52414 + be created. By setting this option to 1 at runtime, filesystems
52415 + will be protected in the following ways:
52416 + * No new writable mounts will be allowed
52417 + * Existing read-only mounts won't be able to be remounted read/write
52418 + * Write operations will be denied on all block devices
52419 + This option acts independently of grsec_lock: once it is set to 1,
52420 + it cannot be turned off. Therefore, please be mindful of the resulting
52421 + behavior if this option is enabled in an init script on a read-only
52422 + filesystem. This feature is mainly intended for secure embedded systems.
52423 +
52424 +config GRKERNSEC_CHROOT
52425 + bool "Chroot jail restrictions"
52426 + default y if GRKERNSEC_CONFIG_AUTO
52427 + help
52428 + If you say Y here, you will be able to choose several options that will
52429 + make breaking out of a chrooted jail much more difficult. If you
52430 + encounter no software incompatibilities with the following options, it
52431 + is recommended that you enable each one.
52432 +
52433 +config GRKERNSEC_CHROOT_MOUNT
52434 + bool "Deny mounts"
52435 + default y if GRKERNSEC_CONFIG_AUTO
52436 + depends on GRKERNSEC_CHROOT
52437 + help
52438 + If you say Y here, processes inside a chroot will not be able to
52439 + mount or remount filesystems. If the sysctl option is enabled, a
52440 + sysctl option with name "chroot_deny_mount" is created.
52441 +
52442 +config GRKERNSEC_CHROOT_DOUBLE
52443 + bool "Deny double-chroots"
52444 + default y if GRKERNSEC_CONFIG_AUTO
52445 + depends on GRKERNSEC_CHROOT
52446 + help
52447 + If you say Y here, processes inside a chroot will not be able to chroot
52448 + again outside the chroot. This is a widely used method of breaking
52449 + out of a chroot jail and should not be allowed. If the sysctl
52450 + option is enabled, a sysctl option with name
52451 + "chroot_deny_chroot" is created.
52452 +
52453 +config GRKERNSEC_CHROOT_PIVOT
52454 + bool "Deny pivot_root in chroot"
52455 + default y if GRKERNSEC_CONFIG_AUTO
52456 + depends on GRKERNSEC_CHROOT
52457 + help
52458 + If you say Y here, processes inside a chroot will not be able to use
52459 + a function called pivot_root() that was introduced in Linux 2.3.41. It
52460 + works similar to chroot in that it changes the root filesystem. This
52461 + function could be misused in a chrooted process to attempt to break out
52462 + of the chroot, and therefore should not be allowed. If the sysctl
52463 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
52464 + created.
52465 +
52466 +config GRKERNSEC_CHROOT_CHDIR
52467 + bool "Enforce chdir(\"/\") on all chroots"
52468 + default y if GRKERNSEC_CONFIG_AUTO
52469 + depends on GRKERNSEC_CHROOT
52470 + help
52471 + If you say Y here, the current working directory of all newly-chrooted
52472 + applications will be set to the the root directory of the chroot.
52473 + The man page on chroot(2) states:
52474 + Note that this call does not change the current working
52475 + directory, so that `.' can be outside the tree rooted at
52476 + `/'. In particular, the super-user can escape from a
52477 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52478 +
52479 + It is recommended that you say Y here, since it's not known to break
52480 + any software. If the sysctl option is enabled, a sysctl option with
52481 + name "chroot_enforce_chdir" is created.
52482 +
52483 +config GRKERNSEC_CHROOT_CHMOD
52484 + bool "Deny (f)chmod +s"
52485 + default y if GRKERNSEC_CONFIG_AUTO
52486 + depends on GRKERNSEC_CHROOT
52487 + help
52488 + If you say Y here, processes inside a chroot will not be able to chmod
52489 + or fchmod files to make them have suid or sgid bits. This protects
52490 + against another published method of breaking a chroot. If the sysctl
52491 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
52492 + created.
52493 +
52494 +config GRKERNSEC_CHROOT_FCHDIR
52495 + bool "Deny fchdir out of chroot"
52496 + default y if GRKERNSEC_CONFIG_AUTO
52497 + depends on GRKERNSEC_CHROOT
52498 + help
52499 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
52500 + to a file descriptor of the chrooting process that points to a directory
52501 + outside the filesystem will be stopped. If the sysctl option
52502 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52503 +
52504 +config GRKERNSEC_CHROOT_MKNOD
52505 + bool "Deny mknod"
52506 + default y if GRKERNSEC_CONFIG_AUTO
52507 + depends on GRKERNSEC_CHROOT
52508 + help
52509 + If you say Y here, processes inside a chroot will not be allowed to
52510 + mknod. The problem with using mknod inside a chroot is that it
52511 + would allow an attacker to create a device entry that is the same
52512 + as one on the physical root of your system, which could range from
52513 + anything from the console device to a device for your harddrive (which
52514 + they could then use to wipe the drive or steal data). It is recommended
52515 + that you say Y here, unless you run into software incompatibilities.
52516 + If the sysctl option is enabled, a sysctl option with name
52517 + "chroot_deny_mknod" is created.
52518 +
52519 +config GRKERNSEC_CHROOT_SHMAT
52520 + bool "Deny shmat() out of chroot"
52521 + default y if GRKERNSEC_CONFIG_AUTO
52522 + depends on GRKERNSEC_CHROOT
52523 + help
52524 + If you say Y here, processes inside a chroot will not be able to attach
52525 + to shared memory segments that were created outside of the chroot jail.
52526 + It is recommended that you say Y here. If the sysctl option is enabled,
52527 + a sysctl option with name "chroot_deny_shmat" is created.
52528 +
52529 +config GRKERNSEC_CHROOT_UNIX
52530 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
52531 + default y if GRKERNSEC_CONFIG_AUTO
52532 + depends on GRKERNSEC_CHROOT
52533 + help
52534 + If you say Y here, processes inside a chroot will not be able to
52535 + connect to abstract (meaning not belonging to a filesystem) Unix
52536 + domain sockets that were bound outside of a chroot. It is recommended
52537 + that you say Y here. If the sysctl option is enabled, a sysctl option
52538 + with name "chroot_deny_unix" is created.
52539 +
52540 +config GRKERNSEC_CHROOT_FINDTASK
52541 + bool "Protect outside processes"
52542 + default y if GRKERNSEC_CONFIG_AUTO
52543 + depends on GRKERNSEC_CHROOT
52544 + help
52545 + If you say Y here, processes inside a chroot will not be able to
52546 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52547 + getsid, or view any process outside of the chroot. If the sysctl
52548 + option is enabled, a sysctl option with name "chroot_findtask" is
52549 + created.
52550 +
52551 +config GRKERNSEC_CHROOT_NICE
52552 + bool "Restrict priority changes"
52553 + default y if GRKERNSEC_CONFIG_AUTO
52554 + depends on GRKERNSEC_CHROOT
52555 + help
52556 + If you say Y here, processes inside a chroot will not be able to raise
52557 + the priority of processes in the chroot, or alter the priority of
52558 + processes outside the chroot. This provides more security than simply
52559 + removing CAP_SYS_NICE from the process' capability set. If the
52560 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52561 + is created.
52562 +
52563 +config GRKERNSEC_CHROOT_SYSCTL
52564 + bool "Deny sysctl writes"
52565 + default y if GRKERNSEC_CONFIG_AUTO
52566 + depends on GRKERNSEC_CHROOT
52567 + help
52568 + If you say Y here, an attacker in a chroot will not be able to
52569 + write to sysctl entries, either by sysctl(2) or through a /proc
52570 + interface. It is strongly recommended that you say Y here. If the
52571 + sysctl option is enabled, a sysctl option with name
52572 + "chroot_deny_sysctl" is created.
52573 +
52574 +config GRKERNSEC_CHROOT_CAPS
52575 + bool "Capability restrictions"
52576 + default y if GRKERNSEC_CONFIG_AUTO
52577 + depends on GRKERNSEC_CHROOT
52578 + help
52579 + If you say Y here, the capabilities on all processes within a
52580 + chroot jail will be lowered to stop module insertion, raw i/o,
52581 + system and net admin tasks, rebooting the system, modifying immutable
52582 + files, modifying IPC owned by another, and changing the system time.
52583 + This is left an option because it can break some apps. Disable this
52584 + if your chrooted apps are having problems performing those kinds of
52585 + tasks. If the sysctl option is enabled, a sysctl option with
52586 + name "chroot_caps" is created.
52587 +
52588 +endmenu
52589 +menu "Kernel Auditing"
52590 +depends on GRKERNSEC
52591 +
52592 +config GRKERNSEC_AUDIT_GROUP
52593 + bool "Single group for auditing"
52594 + help
52595 + If you say Y here, the exec, chdir, and (un)mount logging features
52596 + will only operate on a group you specify. This option is recommended
52597 + if you only want to watch certain users instead of having a large
52598 + amount of logs from the entire system. If the sysctl option is enabled,
52599 + a sysctl option with name "audit_group" is created.
52600 +
52601 +config GRKERNSEC_AUDIT_GID
52602 + int "GID for auditing"
52603 + depends on GRKERNSEC_AUDIT_GROUP
52604 + default 1007
52605 +
52606 +config GRKERNSEC_EXECLOG
52607 + bool "Exec logging"
52608 + help
52609 + If you say Y here, all execve() calls will be logged (since the
52610 + other exec*() calls are frontends to execve(), all execution
52611 + will be logged). Useful for shell-servers that like to keep track
52612 + of their users. If the sysctl option is enabled, a sysctl option with
52613 + name "exec_logging" is created.
52614 + WARNING: This option when enabled will produce a LOT of logs, especially
52615 + on an active system.
52616 +
52617 +config GRKERNSEC_RESLOG
52618 + bool "Resource logging"
52619 + default y if GRKERNSEC_CONFIG_AUTO
52620 + help
52621 + If you say Y here, all attempts to overstep resource limits will
52622 + be logged with the resource name, the requested size, and the current
52623 + limit. It is highly recommended that you say Y here. If the sysctl
52624 + option is enabled, a sysctl option with name "resource_logging" is
52625 + created. If the RBAC system is enabled, the sysctl value is ignored.
52626 +
52627 +config GRKERNSEC_CHROOT_EXECLOG
52628 + bool "Log execs within chroot"
52629 + help
52630 + If you say Y here, all executions inside a chroot jail will be logged
52631 + to syslog. This can cause a large amount of logs if certain
52632 + applications (eg. djb's daemontools) are installed on the system, and
52633 + is therefore left as an option. If the sysctl option is enabled, a
52634 + sysctl option with name "chroot_execlog" is created.
52635 +
52636 +config GRKERNSEC_AUDIT_PTRACE
52637 + bool "Ptrace logging"
52638 + help
52639 + If you say Y here, all attempts to attach to a process via ptrace
52640 + will be logged. If the sysctl option is enabled, a sysctl option
52641 + with name "audit_ptrace" is created.
52642 +
52643 +config GRKERNSEC_AUDIT_CHDIR
52644 + bool "Chdir logging"
52645 + help
52646 + If you say Y here, all chdir() calls will be logged. If the sysctl
52647 + option is enabled, a sysctl option with name "audit_chdir" is created.
52648 +
52649 +config GRKERNSEC_AUDIT_MOUNT
52650 + bool "(Un)Mount logging"
52651 + help
52652 + If you say Y here, all mounts and unmounts will be logged. If the
52653 + sysctl option is enabled, a sysctl option with name "audit_mount" is
52654 + created.
52655 +
52656 +config GRKERNSEC_SIGNAL
52657 + bool "Signal logging"
52658 + default y if GRKERNSEC_CONFIG_AUTO
52659 + help
52660 + If you say Y here, certain important signals will be logged, such as
52661 + SIGSEGV, which will as a result inform you of when a error in a program
52662 + occurred, which in some cases could mean a possible exploit attempt.
52663 + If the sysctl option is enabled, a sysctl option with name
52664 + "signal_logging" is created.
52665 +
52666 +config GRKERNSEC_FORKFAIL
52667 + bool "Fork failure logging"
52668 + help
52669 + If you say Y here, all failed fork() attempts will be logged.
52670 + This could suggest a fork bomb, or someone attempting to overstep
52671 + their process limit. If the sysctl option is enabled, a sysctl option
52672 + with name "forkfail_logging" is created.
52673 +
52674 +config GRKERNSEC_TIME
52675 + bool "Time change logging"
52676 + default y if GRKERNSEC_CONFIG_AUTO
52677 + help
52678 + If you say Y here, any changes of the system clock will be logged.
52679 + If the sysctl option is enabled, a sysctl option with name
52680 + "timechange_logging" is created.
52681 +
52682 +config GRKERNSEC_PROC_IPADDR
52683 + bool "/proc/<pid>/ipaddr support"
52684 + default y if GRKERNSEC_CONFIG_AUTO
52685 + help
52686 + If you say Y here, a new entry will be added to each /proc/<pid>
52687 + directory that contains the IP address of the person using the task.
52688 + The IP is carried across local TCP and AF_UNIX stream sockets.
52689 + This information can be useful for IDS/IPSes to perform remote response
52690 + to a local attack. The entry is readable by only the owner of the
52691 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
52692 + the RBAC system), and thus does not create privacy concerns.
52693 +
52694 +config GRKERNSEC_RWXMAP_LOG
52695 + bool 'Denied RWX mmap/mprotect logging'
52696 + default y if GRKERNSEC_CONFIG_AUTO
52697 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
52698 + help
52699 + If you say Y here, calls to mmap() and mprotect() with explicit
52700 + usage of PROT_WRITE and PROT_EXEC together will be logged when
52701 + denied by the PAX_MPROTECT feature. If the sysctl option is
52702 + enabled, a sysctl option with name "rwxmap_logging" is created.
52703 +
52704 +config GRKERNSEC_AUDIT_TEXTREL
52705 + bool 'ELF text relocations logging (READ HELP)'
52706 + depends on PAX_MPROTECT
52707 + help
52708 + If you say Y here, text relocations will be logged with the filename
52709 + of the offending library or binary. The purpose of the feature is
52710 + to help Linux distribution developers get rid of libraries and
52711 + binaries that need text relocations which hinder the future progress
52712 + of PaX. Only Linux distribution developers should say Y here, and
52713 + never on a production machine, as this option creates an information
52714 + leak that could aid an attacker in defeating the randomization of
52715 + a single memory region. If the sysctl option is enabled, a sysctl
52716 + option with name "audit_textrel" is created.
52717 +
52718 +endmenu
52719 +
52720 +menu "Executable Protections"
52721 +depends on GRKERNSEC
52722 +
52723 +config GRKERNSEC_DMESG
52724 + bool "Dmesg(8) restriction"
52725 + default y if GRKERNSEC_CONFIG_AUTO
52726 + help
52727 + If you say Y here, non-root users will not be able to use dmesg(8)
52728 + to view the contents of the kernel's circular log buffer.
52729 + The kernel's log buffer often contains kernel addresses and other
52730 + identifying information useful to an attacker in fingerprinting a
52731 + system for a targeted exploit.
52732 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
52733 + created.
52734 +
52735 +config GRKERNSEC_HARDEN_PTRACE
52736 + bool "Deter ptrace-based process snooping"
52737 + default y if GRKERNSEC_CONFIG_AUTO
52738 + help
52739 + If you say Y here, TTY sniffers and other malicious monitoring
52740 + programs implemented through ptrace will be defeated. If you
52741 + have been using the RBAC system, this option has already been
52742 + enabled for several years for all users, with the ability to make
52743 + fine-grained exceptions.
52744 +
52745 + This option only affects the ability of non-root users to ptrace
52746 + processes that are not a descendent of the ptracing process.
52747 + This means that strace ./binary and gdb ./binary will still work,
52748 + but attaching to arbitrary processes will not. If the sysctl
52749 + option is enabled, a sysctl option with name "harden_ptrace" is
52750 + created.
52751 +
52752 +config GRKERNSEC_PTRACE_READEXEC
52753 + bool "Require read access to ptrace sensitive binaries"
52754 + default y if GRKERNSEC_CONFIG_AUTO
52755 + help
52756 + If you say Y here, unprivileged users will not be able to ptrace unreadable
52757 + binaries. This option is useful in environments that
52758 + remove the read bits (e.g. file mode 4711) from suid binaries to
52759 + prevent infoleaking of their contents. This option adds
52760 + consistency to the use of that file mode, as the binary could normally
52761 + be read out when run without privileges while ptracing.
52762 +
52763 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
52764 + is created.
52765 +
52766 +config GRKERNSEC_SETXID
52767 + bool "Enforce consistent multithreaded privileges"
52768 + default y if GRKERNSEC_CONFIG_AUTO
52769 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
52770 + help
52771 + If you say Y here, a change from a root uid to a non-root uid
52772 + in a multithreaded application will cause the resulting uids,
52773 + gids, supplementary groups, and capabilities in that thread
52774 + to be propagated to the other threads of the process. In most
52775 + cases this is unnecessary, as glibc will emulate this behavior
52776 + on behalf of the application. Other libcs do not act in the
52777 + same way, allowing the other threads of the process to continue
52778 + running with root privileges. If the sysctl option is enabled,
52779 + a sysctl option with name "consistent_setxid" is created.
52780 +
52781 +config GRKERNSEC_TPE
52782 + bool "Trusted Path Execution (TPE)"
52783 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52784 + help
52785 + If you say Y here, you will be able to choose a gid to add to the
52786 + supplementary groups of users you want to mark as "untrusted."
52787 + These users will not be able to execute any files that are not in
52788 + root-owned directories writable only by root. If the sysctl option
52789 + is enabled, a sysctl option with name "tpe" is created.
52790 +
52791 +config GRKERNSEC_TPE_ALL
52792 + bool "Partially restrict all non-root users"
52793 + depends on GRKERNSEC_TPE
52794 + help
52795 + If you say Y here, all non-root users will be covered under
52796 + a weaker TPE restriction. This is separate from, and in addition to,
52797 + the main TPE options that you have selected elsewhere. Thus, if a
52798 + "trusted" GID is chosen, this restriction applies to even that GID.
52799 + Under this restriction, all non-root users will only be allowed to
52800 + execute files in directories they own that are not group or
52801 + world-writable, or in directories owned by root and writable only by
52802 + root. If the sysctl option is enabled, a sysctl option with name
52803 + "tpe_restrict_all" is created.
52804 +
52805 +config GRKERNSEC_TPE_INVERT
52806 + bool "Invert GID option"
52807 + depends on GRKERNSEC_TPE
52808 + help
52809 + If you say Y here, the group you specify in the TPE configuration will
52810 + decide what group TPE restrictions will be *disabled* for. This
52811 + option is useful if you want TPE restrictions to be applied to most
52812 + users on the system. If the sysctl option is enabled, a sysctl option
52813 + with name "tpe_invert" is created. Unlike other sysctl options, this
52814 + entry will default to on for backward-compatibility.
52815 +
52816 +config GRKERNSEC_TPE_GID
52817 + int
52818 + default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
52819 + default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
52820 +
52821 +config GRKERNSEC_TPE_UNTRUSTED_GID
52822 + int "GID for TPE-untrusted users"
52823 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
52824 + default 1005
52825 + help
52826 + Setting this GID determines what group TPE restrictions will be
52827 + *enabled* for. If the sysctl option is enabled, a sysctl option
52828 + with name "tpe_gid" is created.
52829 +
52830 +config GRKERNSEC_TPE_TRUSTED_GID
52831 + int "GID for TPE-trusted users"
52832 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
52833 + default 1005
52834 + help
52835 + Setting this GID determines what group TPE restrictions will be
52836 + *disabled* for. If the sysctl option is enabled, a sysctl option
52837 + with name "tpe_gid" is created.
52838 +
52839 +endmenu
52840 +menu "Network Protections"
52841 +depends on GRKERNSEC
52842 +
52843 +config GRKERNSEC_RANDNET
52844 + bool "Larger entropy pools"
52845 + default y if GRKERNSEC_CONFIG_AUTO
52846 + help
52847 + If you say Y here, the entropy pools used for many features of Linux
52848 + and grsecurity will be doubled in size. Since several grsecurity
52849 + features use additional randomness, it is recommended that you say Y
52850 + here. Saying Y here has a similar effect as modifying
52851 + /proc/sys/kernel/random/poolsize.
52852 +
52853 +config GRKERNSEC_BLACKHOLE
52854 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
52855 + default y if GRKERNSEC_CONFIG_AUTO
52856 + depends on NET
52857 + help
52858 + If you say Y here, neither TCP resets nor ICMP
52859 + destination-unreachable packets will be sent in response to packets
52860 + sent to ports for which no associated listening process exists.
52861 + This feature supports both IPV4 and IPV6 and exempts the
52862 + loopback interface from blackholing. Enabling this feature
52863 + makes a host more resilient to DoS attacks and reduces network
52864 + visibility against scanners.
52865 +
52866 + The blackhole feature as-implemented is equivalent to the FreeBSD
52867 + blackhole feature, as it prevents RST responses to all packets, not
52868 + just SYNs. Under most application behavior this causes no
52869 + problems, but applications (like haproxy) may not close certain
52870 + connections in a way that cleanly terminates them on the remote
52871 + end, leaving the remote host in LAST_ACK state. Because of this
52872 + side-effect and to prevent intentional LAST_ACK DoSes, this
52873 + feature also adds automatic mitigation against such attacks.
52874 + The mitigation drastically reduces the amount of time a socket
52875 + can spend in LAST_ACK state. If you're using haproxy and not
52876 + all servers it connects to have this option enabled, consider
52877 + disabling this feature on the haproxy host.
52878 +
52879 + If the sysctl option is enabled, two sysctl options with names
52880 + "ip_blackhole" and "lastack_retries" will be created.
52881 + While "ip_blackhole" takes the standard zero/non-zero on/off
52882 + toggle, "lastack_retries" uses the same kinds of values as
52883 + "tcp_retries1" and "tcp_retries2". The default value of 4
52884 + prevents a socket from lasting more than 45 seconds in LAST_ACK
52885 + state.
52886 +
52887 +config GRKERNSEC_SOCKET
52888 + bool "Socket restrictions"
52889 + depends on NET
52890 + help
52891 + If you say Y here, you will be able to choose from several options.
52892 + If you assign a GID on your system and add it to the supplementary
52893 + groups of users you want to restrict socket access to, this patch
52894 + will perform up to three things, based on the option(s) you choose.
52895 +
52896 +config GRKERNSEC_SOCKET_ALL
52897 + bool "Deny any sockets to group"
52898 + depends on GRKERNSEC_SOCKET
52899 + help
52900 + If you say Y here, you will be able to choose a GID of whose users will
52901 + be unable to connect to other hosts from your machine or run server
52902 + applications from your machine. If the sysctl option is enabled, a
52903 + sysctl option with name "socket_all" is created.
52904 +
52905 +config GRKERNSEC_SOCKET_ALL_GID
52906 + int "GID to deny all sockets for"
52907 + depends on GRKERNSEC_SOCKET_ALL
52908 + default 1004
52909 + help
52910 + Here you can choose the GID to disable socket access for. Remember to
52911 + add the users you want socket access disabled for to the GID
52912 + specified here. If the sysctl option is enabled, a sysctl option
52913 + with name "socket_all_gid" is created.
52914 +
52915 +config GRKERNSEC_SOCKET_CLIENT
52916 + bool "Deny client sockets to group"
52917 + depends on GRKERNSEC_SOCKET
52918 + help
52919 + If you say Y here, you will be able to choose a GID of whose users will
52920 + be unable to connect to other hosts from your machine, but will be
52921 + able to run servers. If this option is enabled, all users in the group
52922 + you specify will have to use passive mode when initiating ftp transfers
52923 + from the shell on your machine. If the sysctl option is enabled, a
52924 + sysctl option with name "socket_client" is created.
52925 +
52926 +config GRKERNSEC_SOCKET_CLIENT_GID
52927 + int "GID to deny client sockets for"
52928 + depends on GRKERNSEC_SOCKET_CLIENT
52929 + default 1003
52930 + help
52931 + Here you can choose the GID to disable client socket access for.
52932 + Remember to add the users you want client socket access disabled for to
52933 + the GID specified here. If the sysctl option is enabled, a sysctl
52934 + option with name "socket_client_gid" is created.
52935 +
52936 +config GRKERNSEC_SOCKET_SERVER
52937 + bool "Deny server sockets to group"
52938 + depends on GRKERNSEC_SOCKET
52939 + help
52940 + If you say Y here, you will be able to choose a GID of whose users will
52941 + be unable to run server applications from your machine. If the sysctl
52942 + option is enabled, a sysctl option with name "socket_server" is created.
52943 +
52944 +config GRKERNSEC_SOCKET_SERVER_GID
52945 + int "GID to deny server sockets for"
52946 + depends on GRKERNSEC_SOCKET_SERVER
52947 + default 1002
52948 + help
52949 + Here you can choose the GID to disable server socket access for.
52950 + Remember to add the users you want server socket access disabled for to
52951 + the GID specified here. If the sysctl option is enabled, a sysctl
52952 + option with name "socket_server_gid" is created.
52953 +
52954 +endmenu
52955 +menu "Sysctl Support"
52956 +depends on GRKERNSEC && SYSCTL
52957 +
52958 +config GRKERNSEC_SYSCTL
52959 + bool "Sysctl support"
52960 + default y if GRKERNSEC_CONFIG_AUTO
52961 + help
52962 + If you say Y here, you will be able to change the options that
52963 + grsecurity runs with at bootup, without having to recompile your
52964 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
52965 + to enable (1) or disable (0) various features. All the sysctl entries
52966 + are mutable until the "grsec_lock" entry is set to a non-zero value.
52967 + All features enabled in the kernel configuration are disabled at boot
52968 + if you do not say Y to the "Turn on features by default" option.
52969 + All options should be set at startup, and the grsec_lock entry should
52970 + be set to a non-zero value after all the options are set.
52971 + *THIS IS EXTREMELY IMPORTANT*
52972 +
52973 +config GRKERNSEC_SYSCTL_DISTRO
52974 + bool "Extra sysctl support for distro makers (READ HELP)"
52975 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
52976 + help
52977 + If you say Y here, additional sysctl options will be created
52978 + for features that affect processes running as root. Therefore,
52979 + it is critical when using this option that the grsec_lock entry be
52980 + enabled after boot. Only distros with prebuilt kernel packages
52981 + with this option enabled that can ensure grsec_lock is enabled
52982 + after boot should use this option.
52983 + *Failure to set grsec_lock after boot makes all grsec features
52984 + this option covers useless*
52985 +
52986 + Currently this option creates the following sysctl entries:
52987 + "Disable Privileged I/O": "disable_priv_io"
52988 +
52989 +config GRKERNSEC_SYSCTL_ON
52990 + bool "Turn on features by default"
52991 + default y if GRKERNSEC_CONFIG_AUTO
52992 + depends on GRKERNSEC_SYSCTL
52993 + help
52994 + If you say Y here, instead of having all features enabled in the
52995 + kernel configuration disabled at boot time, the features will be
52996 + enabled at boot time. It is recommended you say Y here unless
52997 + there is some reason you would want all sysctl-tunable features to
52998 + be disabled by default. As mentioned elsewhere, it is important
52999 + to enable the grsec_lock entry once you have finished modifying
53000 + the sysctl entries.
53001 +
53002 +endmenu
53003 +menu "Logging Options"
53004 +depends on GRKERNSEC
53005 +
53006 +config GRKERNSEC_FLOODTIME
53007 + int "Seconds in between log messages (minimum)"
53008 + default 10
53009 + help
53010 + This option allows you to enforce the number of seconds between
53011 + grsecurity log messages. The default should be suitable for most
53012 + people, however, if you choose to change it, choose a value small enough
53013 + to allow informative logs to be produced, but large enough to
53014 + prevent flooding.
53015 +
53016 +config GRKERNSEC_FLOODBURST
53017 + int "Number of messages in a burst (maximum)"
53018 + default 6
53019 + help
53020 + This option allows you to choose the maximum number of messages allowed
53021 + within the flood time interval you chose in a separate option. The
53022 + default should be suitable for most people, however if you find that
53023 + many of your logs are being interpreted as flooding, you may want to
53024 + raise this value.
53025 +
53026 +endmenu
53027 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
53028 new file mode 100644
53029 index 0000000..1b9afa9
53030 --- /dev/null
53031 +++ b/grsecurity/Makefile
53032 @@ -0,0 +1,38 @@
53033 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53034 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53035 +# into an RBAC system
53036 +#
53037 +# All code in this directory and various hooks inserted throughout the kernel
53038 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53039 +# under the GPL v2 or higher
53040 +
53041 +KBUILD_CFLAGS += -Werror
53042 +
53043 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53044 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
53045 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53046 +
53047 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53048 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53049 + gracl_learn.o grsec_log.o
53050 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53051 +
53052 +ifdef CONFIG_NET
53053 +obj-y += grsec_sock.o
53054 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53055 +endif
53056 +
53057 +ifndef CONFIG_GRKERNSEC
53058 +obj-y += grsec_disabled.o
53059 +endif
53060 +
53061 +ifdef CONFIG_GRKERNSEC_HIDESYM
53062 +extra-y := grsec_hidesym.o
53063 +$(obj)/grsec_hidesym.o:
53064 + @-chmod -f 500 /boot
53065 + @-chmod -f 500 /lib/modules
53066 + @-chmod -f 500 /lib64/modules
53067 + @-chmod -f 500 /lib32/modules
53068 + @-chmod -f 700 .
53069 + @echo ' grsec: protected kernel image paths'
53070 +endif
53071 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
53072 new file mode 100644
53073 index 0000000..b1810d9
53074 --- /dev/null
53075 +++ b/grsecurity/gracl.c
53076 @@ -0,0 +1,4056 @@
53077 +#include <linux/kernel.h>
53078 +#include <linux/module.h>
53079 +#include <linux/sched.h>
53080 +#include <linux/mm.h>
53081 +#include <linux/file.h>
53082 +#include <linux/fs.h>
53083 +#include <linux/namei.h>
53084 +#include <linux/mount.h>
53085 +#include <linux/tty.h>
53086 +#include <linux/proc_fs.h>
53087 +#include <linux/lglock.h>
53088 +#include <linux/slab.h>
53089 +#include <linux/vmalloc.h>
53090 +#include <linux/types.h>
53091 +#include <linux/sysctl.h>
53092 +#include <linux/netdevice.h>
53093 +#include <linux/ptrace.h>
53094 +#include <linux/gracl.h>
53095 +#include <linux/gralloc.h>
53096 +#include <linux/security.h>
53097 +#include <linux/grinternal.h>
53098 +#include <linux/pid_namespace.h>
53099 +#include <linux/stop_machine.h>
53100 +#include <linux/fdtable.h>
53101 +#include <linux/percpu.h>
53102 +#include <linux/lglock.h>
53103 +#include "../fs/mount.h"
53104 +
53105 +#include <asm/uaccess.h>
53106 +#include <asm/errno.h>
53107 +#include <asm/mman.h>
53108 +
53109 +extern struct lglock vfsmount_lock;
53110 +
53111 +static struct acl_role_db acl_role_set;
53112 +static struct name_db name_set;
53113 +static struct inodev_db inodev_set;
53114 +
53115 +/* for keeping track of userspace pointers used for subjects, so we
53116 + can share references in the kernel as well
53117 +*/
53118 +
53119 +static struct path real_root;
53120 +
53121 +static struct acl_subj_map_db subj_map_set;
53122 +
53123 +static struct acl_role_label *default_role;
53124 +
53125 +static struct acl_role_label *role_list;
53126 +
53127 +static u16 acl_sp_role_value;
53128 +
53129 +extern char *gr_shared_page[4];
53130 +static DEFINE_MUTEX(gr_dev_mutex);
53131 +DEFINE_RWLOCK(gr_inode_lock);
53132 +
53133 +struct gr_arg *gr_usermode;
53134 +
53135 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
53136 +
53137 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
53138 +extern void gr_clear_learn_entries(void);
53139 +
53140 +#ifdef CONFIG_GRKERNSEC_RESLOG
53141 +extern void gr_log_resource(const struct task_struct *task,
53142 + const int res, const unsigned long wanted, const int gt);
53143 +#endif
53144 +
53145 +unsigned char *gr_system_salt;
53146 +unsigned char *gr_system_sum;
53147 +
53148 +static struct sprole_pw **acl_special_roles = NULL;
53149 +static __u16 num_sprole_pws = 0;
53150 +
53151 +static struct acl_role_label *kernel_role = NULL;
53152 +
53153 +static unsigned int gr_auth_attempts = 0;
53154 +static unsigned long gr_auth_expires = 0UL;
53155 +
53156 +#ifdef CONFIG_NET
53157 +extern struct vfsmount *sock_mnt;
53158 +#endif
53159 +
53160 +extern struct vfsmount *pipe_mnt;
53161 +extern struct vfsmount *shm_mnt;
53162 +#ifdef CONFIG_HUGETLBFS
53163 +extern struct vfsmount *hugetlbfs_vfsmount;
53164 +#endif
53165 +
53166 +static struct acl_object_label *fakefs_obj_rw;
53167 +static struct acl_object_label *fakefs_obj_rwx;
53168 +
53169 +extern int gr_init_uidset(void);
53170 +extern void gr_free_uidset(void);
53171 +extern void gr_remove_uid(uid_t uid);
53172 +extern int gr_find_uid(uid_t uid);
53173 +
53174 +__inline__ int
53175 +gr_acl_is_enabled(void)
53176 +{
53177 + return (gr_status & GR_READY);
53178 +}
53179 +
53180 +#ifdef CONFIG_BTRFS_FS
53181 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53182 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53183 +#endif
53184 +
53185 +static inline dev_t __get_dev(const struct dentry *dentry)
53186 +{
53187 +#ifdef CONFIG_BTRFS_FS
53188 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53189 + return get_btrfs_dev_from_inode(dentry->d_inode);
53190 + else
53191 +#endif
53192 + return dentry->d_inode->i_sb->s_dev;
53193 +}
53194 +
53195 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53196 +{
53197 + return __get_dev(dentry);
53198 +}
53199 +
53200 +static char gr_task_roletype_to_char(struct task_struct *task)
53201 +{
53202 + switch (task->role->roletype &
53203 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
53204 + GR_ROLE_SPECIAL)) {
53205 + case GR_ROLE_DEFAULT:
53206 + return 'D';
53207 + case GR_ROLE_USER:
53208 + return 'U';
53209 + case GR_ROLE_GROUP:
53210 + return 'G';
53211 + case GR_ROLE_SPECIAL:
53212 + return 'S';
53213 + }
53214 +
53215 + return 'X';
53216 +}
53217 +
53218 +char gr_roletype_to_char(void)
53219 +{
53220 + return gr_task_roletype_to_char(current);
53221 +}
53222 +
53223 +__inline__ int
53224 +gr_acl_tpe_check(void)
53225 +{
53226 + if (unlikely(!(gr_status & GR_READY)))
53227 + return 0;
53228 + if (current->role->roletype & GR_ROLE_TPE)
53229 + return 1;
53230 + else
53231 + return 0;
53232 +}
53233 +
53234 +int
53235 +gr_handle_rawio(const struct inode *inode)
53236 +{
53237 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53238 + if (inode && S_ISBLK(inode->i_mode) &&
53239 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53240 + !capable(CAP_SYS_RAWIO))
53241 + return 1;
53242 +#endif
53243 + return 0;
53244 +}
53245 +
53246 +static int
53247 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
53248 +{
53249 + if (likely(lena != lenb))
53250 + return 0;
53251 +
53252 + return !memcmp(a, b, lena);
53253 +}
53254 +
53255 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
53256 +{
53257 + *buflen -= namelen;
53258 + if (*buflen < 0)
53259 + return -ENAMETOOLONG;
53260 + *buffer -= namelen;
53261 + memcpy(*buffer, str, namelen);
53262 + return 0;
53263 +}
53264 +
53265 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
53266 +{
53267 + return prepend(buffer, buflen, name->name, name->len);
53268 +}
53269 +
53270 +static int prepend_path(const struct path *path, struct path *root,
53271 + char **buffer, int *buflen)
53272 +{
53273 + struct dentry *dentry = path->dentry;
53274 + struct vfsmount *vfsmnt = path->mnt;
53275 + struct mount *mnt = real_mount(vfsmnt);
53276 + bool slash = false;
53277 + int error = 0;
53278 +
53279 + while (dentry != root->dentry || vfsmnt != root->mnt) {
53280 + struct dentry * parent;
53281 +
53282 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
53283 + /* Global root? */
53284 + if (!mnt_has_parent(mnt)) {
53285 + goto out;
53286 + }
53287 + dentry = mnt->mnt_mountpoint;
53288 + mnt = mnt->mnt_parent;
53289 + vfsmnt = &mnt->mnt;
53290 + continue;
53291 + }
53292 + parent = dentry->d_parent;
53293 + prefetch(parent);
53294 + spin_lock(&dentry->d_lock);
53295 + error = prepend_name(buffer, buflen, &dentry->d_name);
53296 + spin_unlock(&dentry->d_lock);
53297 + if (!error)
53298 + error = prepend(buffer, buflen, "/", 1);
53299 + if (error)
53300 + break;
53301 +
53302 + slash = true;
53303 + dentry = parent;
53304 + }
53305 +
53306 +out:
53307 + if (!error && !slash)
53308 + error = prepend(buffer, buflen, "/", 1);
53309 +
53310 + return error;
53311 +}
53312 +
53313 +/* this must be called with vfsmount_lock and rename_lock held */
53314 +
53315 +static char *__our_d_path(const struct path *path, struct path *root,
53316 + char *buf, int buflen)
53317 +{
53318 + char *res = buf + buflen;
53319 + int error;
53320 +
53321 + prepend(&res, &buflen, "\0", 1);
53322 + error = prepend_path(path, root, &res, &buflen);
53323 + if (error)
53324 + return ERR_PTR(error);
53325 +
53326 + return res;
53327 +}
53328 +
53329 +static char *
53330 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
53331 +{
53332 + char *retval;
53333 +
53334 + retval = __our_d_path(path, root, buf, buflen);
53335 + if (unlikely(IS_ERR(retval)))
53336 + retval = strcpy(buf, "<path too long>");
53337 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
53338 + retval[1] = '\0';
53339 +
53340 + return retval;
53341 +}
53342 +
53343 +static char *
53344 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53345 + char *buf, int buflen)
53346 +{
53347 + struct path path;
53348 + char *res;
53349 +
53350 + path.dentry = (struct dentry *)dentry;
53351 + path.mnt = (struct vfsmount *)vfsmnt;
53352 +
53353 + /* we can use real_root.dentry, real_root.mnt, because this is only called
53354 + by the RBAC system */
53355 + res = gen_full_path(&path, &real_root, buf, buflen);
53356 +
53357 + return res;
53358 +}
53359 +
53360 +static char *
53361 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53362 + char *buf, int buflen)
53363 +{
53364 + char *res;
53365 + struct path path;
53366 + struct path root;
53367 + struct task_struct *reaper = init_pid_ns.child_reaper;
53368 +
53369 + path.dentry = (struct dentry *)dentry;
53370 + path.mnt = (struct vfsmount *)vfsmnt;
53371 +
53372 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
53373 + get_fs_root(reaper->fs, &root);
53374 +
53375 + write_seqlock(&rename_lock);
53376 + br_read_lock(&vfsmount_lock);
53377 + res = gen_full_path(&path, &root, buf, buflen);
53378 + br_read_unlock(&vfsmount_lock);
53379 + write_sequnlock(&rename_lock);
53380 +
53381 + path_put(&root);
53382 + return res;
53383 +}
53384 +
53385 +static char *
53386 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53387 +{
53388 + char *ret;
53389 + write_seqlock(&rename_lock);
53390 + br_read_lock(&vfsmount_lock);
53391 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53392 + PAGE_SIZE);
53393 + br_read_unlock(&vfsmount_lock);
53394 + write_sequnlock(&rename_lock);
53395 + return ret;
53396 +}
53397 +
53398 +static char *
53399 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53400 +{
53401 + char *ret;
53402 + char *buf;
53403 + int buflen;
53404 +
53405 + write_seqlock(&rename_lock);
53406 + br_read_lock(&vfsmount_lock);
53407 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53408 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
53409 + buflen = (int)(ret - buf);
53410 + if (buflen >= 5)
53411 + prepend(&ret, &buflen, "/proc", 5);
53412 + else
53413 + ret = strcpy(buf, "<path too long>");
53414 + br_read_unlock(&vfsmount_lock);
53415 + write_sequnlock(&rename_lock);
53416 + return ret;
53417 +}
53418 +
53419 +char *
53420 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
53421 +{
53422 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53423 + PAGE_SIZE);
53424 +}
53425 +
53426 +char *
53427 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
53428 +{
53429 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53430 + PAGE_SIZE);
53431 +}
53432 +
53433 +char *
53434 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
53435 +{
53436 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
53437 + PAGE_SIZE);
53438 +}
53439 +
53440 +char *
53441 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
53442 +{
53443 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
53444 + PAGE_SIZE);
53445 +}
53446 +
53447 +char *
53448 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
53449 +{
53450 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
53451 + PAGE_SIZE);
53452 +}
53453 +
53454 +__inline__ __u32
53455 +to_gr_audit(const __u32 reqmode)
53456 +{
53457 + /* masks off auditable permission flags, then shifts them to create
53458 + auditing flags, and adds the special case of append auditing if
53459 + we're requesting write */
53460 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
53461 +}
53462 +
53463 +struct acl_subject_label *
53464 +lookup_subject_map(const struct acl_subject_label *userp)
53465 +{
53466 + unsigned int index = gr_shash(userp, subj_map_set.s_size);
53467 + struct subject_map *match;
53468 +
53469 + match = subj_map_set.s_hash[index];
53470 +
53471 + while (match && match->user != userp)
53472 + match = match->next;
53473 +
53474 + if (match != NULL)
53475 + return match->kernel;
53476 + else
53477 + return NULL;
53478 +}
53479 +
53480 +static void
53481 +insert_subj_map_entry(struct subject_map *subjmap)
53482 +{
53483 + unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
53484 + struct subject_map **curr;
53485 +
53486 + subjmap->prev = NULL;
53487 +
53488 + curr = &subj_map_set.s_hash[index];
53489 + if (*curr != NULL)
53490 + (*curr)->prev = subjmap;
53491 +
53492 + subjmap->next = *curr;
53493 + *curr = subjmap;
53494 +
53495 + return;
53496 +}
53497 +
53498 +static struct acl_role_label *
53499 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
53500 + const gid_t gid)
53501 +{
53502 + unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
53503 + struct acl_role_label *match;
53504 + struct role_allowed_ip *ipp;
53505 + unsigned int x;
53506 + u32 curr_ip = task->signal->curr_ip;
53507 +
53508 + task->signal->saved_ip = curr_ip;
53509 +
53510 + match = acl_role_set.r_hash[index];
53511 +
53512 + while (match) {
53513 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
53514 + for (x = 0; x < match->domain_child_num; x++) {
53515 + if (match->domain_children[x] == uid)
53516 + goto found;
53517 + }
53518 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
53519 + break;
53520 + match = match->next;
53521 + }
53522 +found:
53523 + if (match == NULL) {
53524 + try_group:
53525 + index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
53526 + match = acl_role_set.r_hash[index];
53527 +
53528 + while (match) {
53529 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
53530 + for (x = 0; x < match->domain_child_num; x++) {
53531 + if (match->domain_children[x] == gid)
53532 + goto found2;
53533 + }
53534 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
53535 + break;
53536 + match = match->next;
53537 + }
53538 +found2:
53539 + if (match == NULL)
53540 + match = default_role;
53541 + if (match->allowed_ips == NULL)
53542 + return match;
53543 + else {
53544 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53545 + if (likely
53546 + ((ntohl(curr_ip) & ipp->netmask) ==
53547 + (ntohl(ipp->addr) & ipp->netmask)))
53548 + return match;
53549 + }
53550 + match = default_role;
53551 + }
53552 + } else if (match->allowed_ips == NULL) {
53553 + return match;
53554 + } else {
53555 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53556 + if (likely
53557 + ((ntohl(curr_ip) & ipp->netmask) ==
53558 + (ntohl(ipp->addr) & ipp->netmask)))
53559 + return match;
53560 + }
53561 + goto try_group;
53562 + }
53563 +
53564 + return match;
53565 +}
53566 +
53567 +struct acl_subject_label *
53568 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
53569 + const struct acl_role_label *role)
53570 +{
53571 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53572 + struct acl_subject_label *match;
53573 +
53574 + match = role->subj_hash[index];
53575 +
53576 + while (match && (match->inode != ino || match->device != dev ||
53577 + (match->mode & GR_DELETED))) {
53578 + match = match->next;
53579 + }
53580 +
53581 + if (match && !(match->mode & GR_DELETED))
53582 + return match;
53583 + else
53584 + return NULL;
53585 +}
53586 +
53587 +struct acl_subject_label *
53588 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
53589 + const struct acl_role_label *role)
53590 +{
53591 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53592 + struct acl_subject_label *match;
53593 +
53594 + match = role->subj_hash[index];
53595 +
53596 + while (match && (match->inode != ino || match->device != dev ||
53597 + !(match->mode & GR_DELETED))) {
53598 + match = match->next;
53599 + }
53600 +
53601 + if (match && (match->mode & GR_DELETED))
53602 + return match;
53603 + else
53604 + return NULL;
53605 +}
53606 +
53607 +static struct acl_object_label *
53608 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
53609 + const struct acl_subject_label *subj)
53610 +{
53611 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53612 + struct acl_object_label *match;
53613 +
53614 + match = subj->obj_hash[index];
53615 +
53616 + while (match && (match->inode != ino || match->device != dev ||
53617 + (match->mode & GR_DELETED))) {
53618 + match = match->next;
53619 + }
53620 +
53621 + if (match && !(match->mode & GR_DELETED))
53622 + return match;
53623 + else
53624 + return NULL;
53625 +}
53626 +
53627 +static struct acl_object_label *
53628 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
53629 + const struct acl_subject_label *subj)
53630 +{
53631 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53632 + struct acl_object_label *match;
53633 +
53634 + match = subj->obj_hash[index];
53635 +
53636 + while (match && (match->inode != ino || match->device != dev ||
53637 + !(match->mode & GR_DELETED))) {
53638 + match = match->next;
53639 + }
53640 +
53641 + if (match && (match->mode & GR_DELETED))
53642 + return match;
53643 +
53644 + match = subj->obj_hash[index];
53645 +
53646 + while (match && (match->inode != ino || match->device != dev ||
53647 + (match->mode & GR_DELETED))) {
53648 + match = match->next;
53649 + }
53650 +
53651 + if (match && !(match->mode & GR_DELETED))
53652 + return match;
53653 + else
53654 + return NULL;
53655 +}
53656 +
53657 +static struct name_entry *
53658 +lookup_name_entry(const char *name)
53659 +{
53660 + unsigned int len = strlen(name);
53661 + unsigned int key = full_name_hash(name, len);
53662 + unsigned int index = key % name_set.n_size;
53663 + struct name_entry *match;
53664 +
53665 + match = name_set.n_hash[index];
53666 +
53667 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
53668 + match = match->next;
53669 +
53670 + return match;
53671 +}
53672 +
53673 +static struct name_entry *
53674 +lookup_name_entry_create(const char *name)
53675 +{
53676 + unsigned int len = strlen(name);
53677 + unsigned int key = full_name_hash(name, len);
53678 + unsigned int index = key % name_set.n_size;
53679 + struct name_entry *match;
53680 +
53681 + match = name_set.n_hash[index];
53682 +
53683 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
53684 + !match->deleted))
53685 + match = match->next;
53686 +
53687 + if (match && match->deleted)
53688 + return match;
53689 +
53690 + match = name_set.n_hash[index];
53691 +
53692 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
53693 + match->deleted))
53694 + match = match->next;
53695 +
53696 + if (match && !match->deleted)
53697 + return match;
53698 + else
53699 + return NULL;
53700 +}
53701 +
53702 +static struct inodev_entry *
53703 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
53704 +{
53705 + unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
53706 + struct inodev_entry *match;
53707 +
53708 + match = inodev_set.i_hash[index];
53709 +
53710 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
53711 + match = match->next;
53712 +
53713 + return match;
53714 +}
53715 +
53716 +static void
53717 +insert_inodev_entry(struct inodev_entry *entry)
53718 +{
53719 + unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
53720 + inodev_set.i_size);
53721 + struct inodev_entry **curr;
53722 +
53723 + entry->prev = NULL;
53724 +
53725 + curr = &inodev_set.i_hash[index];
53726 + if (*curr != NULL)
53727 + (*curr)->prev = entry;
53728 +
53729 + entry->next = *curr;
53730 + *curr = entry;
53731 +
53732 + return;
53733 +}
53734 +
53735 +static void
53736 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
53737 +{
53738 + unsigned int index =
53739 + gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
53740 + struct acl_role_label **curr;
53741 + struct acl_role_label *tmp, *tmp2;
53742 +
53743 + curr = &acl_role_set.r_hash[index];
53744 +
53745 + /* simple case, slot is empty, just set it to our role */
53746 + if (*curr == NULL) {
53747 + *curr = role;
53748 + } else {
53749 + /* example:
53750 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
53751 + 2 -> 3
53752 + */
53753 + /* first check to see if we can already be reached via this slot */
53754 + tmp = *curr;
53755 + while (tmp && tmp != role)
53756 + tmp = tmp->next;
53757 + if (tmp == role) {
53758 + /* we don't need to add ourselves to this slot's chain */
53759 + return;
53760 + }
53761 + /* we need to add ourselves to this chain, two cases */
53762 + if (role->next == NULL) {
53763 + /* simple case, append the current chain to our role */
53764 + role->next = *curr;
53765 + *curr = role;
53766 + } else {
53767 + /* 1 -> 2 -> 3 -> 4
53768 + 2 -> 3 -> 4
53769 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
53770 + */
53771 + /* trickier case: walk our role's chain until we find
53772 + the role for the start of the current slot's chain */
53773 + tmp = role;
53774 + tmp2 = *curr;
53775 + while (tmp->next && tmp->next != tmp2)
53776 + tmp = tmp->next;
53777 + if (tmp->next == tmp2) {
53778 + /* from example above, we found 3, so just
53779 + replace this slot's chain with ours */
53780 + *curr = role;
53781 + } else {
53782 + /* we didn't find a subset of our role's chain
53783 + in the current slot's chain, so append their
53784 + chain to ours, and set us as the first role in
53785 + the slot's chain
53786 +
53787 + we could fold this case with the case above,
53788 + but making it explicit for clarity
53789 + */
53790 + tmp->next = tmp2;
53791 + *curr = role;
53792 + }
53793 + }
53794 + }
53795 +
53796 + return;
53797 +}
53798 +
53799 +static void
53800 +insert_acl_role_label(struct acl_role_label *role)
53801 +{
53802 + int i;
53803 +
53804 + if (role_list == NULL) {
53805 + role_list = role;
53806 + role->prev = NULL;
53807 + } else {
53808 + role->prev = role_list;
53809 + role_list = role;
53810 + }
53811 +
53812 + /* used for hash chains */
53813 + role->next = NULL;
53814 +
53815 + if (role->roletype & GR_ROLE_DOMAIN) {
53816 + for (i = 0; i < role->domain_child_num; i++)
53817 + __insert_acl_role_label(role, role->domain_children[i]);
53818 + } else
53819 + __insert_acl_role_label(role, role->uidgid);
53820 +}
53821 +
53822 +static int
53823 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
53824 +{
53825 + struct name_entry **curr, *nentry;
53826 + struct inodev_entry *ientry;
53827 + unsigned int len = strlen(name);
53828 + unsigned int key = full_name_hash(name, len);
53829 + unsigned int index = key % name_set.n_size;
53830 +
53831 + curr = &name_set.n_hash[index];
53832 +
53833 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
53834 + curr = &((*curr)->next);
53835 +
53836 + if (*curr != NULL)
53837 + return 1;
53838 +
53839 + nentry = acl_alloc(sizeof (struct name_entry));
53840 + if (nentry == NULL)
53841 + return 0;
53842 + ientry = acl_alloc(sizeof (struct inodev_entry));
53843 + if (ientry == NULL)
53844 + return 0;
53845 + ientry->nentry = nentry;
53846 +
53847 + nentry->key = key;
53848 + nentry->name = name;
53849 + nentry->inode = inode;
53850 + nentry->device = device;
53851 + nentry->len = len;
53852 + nentry->deleted = deleted;
53853 +
53854 + nentry->prev = NULL;
53855 + curr = &name_set.n_hash[index];
53856 + if (*curr != NULL)
53857 + (*curr)->prev = nentry;
53858 + nentry->next = *curr;
53859 + *curr = nentry;
53860 +
53861 + /* insert us into the table searchable by inode/dev */
53862 + insert_inodev_entry(ientry);
53863 +
53864 + return 1;
53865 +}
53866 +
53867 +static void
53868 +insert_acl_obj_label(struct acl_object_label *obj,
53869 + struct acl_subject_label *subj)
53870 +{
53871 + unsigned int index =
53872 + gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
53873 + struct acl_object_label **curr;
53874 +
53875 +
53876 + obj->prev = NULL;
53877 +
53878 + curr = &subj->obj_hash[index];
53879 + if (*curr != NULL)
53880 + (*curr)->prev = obj;
53881 +
53882 + obj->next = *curr;
53883 + *curr = obj;
53884 +
53885 + return;
53886 +}
53887 +
53888 +static void
53889 +insert_acl_subj_label(struct acl_subject_label *obj,
53890 + struct acl_role_label *role)
53891 +{
53892 + unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
53893 + struct acl_subject_label **curr;
53894 +
53895 + obj->prev = NULL;
53896 +
53897 + curr = &role->subj_hash[index];
53898 + if (*curr != NULL)
53899 + (*curr)->prev = obj;
53900 +
53901 + obj->next = *curr;
53902 + *curr = obj;
53903 +
53904 + return;
53905 +}
53906 +
53907 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
53908 +
53909 +static void *
53910 +create_table(__u32 * len, int elementsize)
53911 +{
53912 + unsigned int table_sizes[] = {
53913 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
53914 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
53915 + 4194301, 8388593, 16777213, 33554393, 67108859
53916 + };
53917 + void *newtable = NULL;
53918 + unsigned int pwr = 0;
53919 +
53920 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
53921 + table_sizes[pwr] <= *len)
53922 + pwr++;
53923 +
53924 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
53925 + return newtable;
53926 +
53927 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
53928 + newtable =
53929 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
53930 + else
53931 + newtable = vmalloc(table_sizes[pwr] * elementsize);
53932 +
53933 + *len = table_sizes[pwr];
53934 +
53935 + return newtable;
53936 +}
53937 +
53938 +static int
53939 +init_variables(const struct gr_arg *arg)
53940 +{
53941 + struct task_struct *reaper = init_pid_ns.child_reaper;
53942 + unsigned int stacksize;
53943 +
53944 + subj_map_set.s_size = arg->role_db.num_subjects;
53945 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
53946 + name_set.n_size = arg->role_db.num_objects;
53947 + inodev_set.i_size = arg->role_db.num_objects;
53948 +
53949 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
53950 + !name_set.n_size || !inodev_set.i_size)
53951 + return 1;
53952 +
53953 + if (!gr_init_uidset())
53954 + return 1;
53955 +
53956 + /* set up the stack that holds allocation info */
53957 +
53958 + stacksize = arg->role_db.num_pointers + 5;
53959 +
53960 + if (!acl_alloc_stack_init(stacksize))
53961 + return 1;
53962 +
53963 + /* grab reference for the real root dentry and vfsmount */
53964 + get_fs_root(reaper->fs, &real_root);
53965 +
53966 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53967 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
53968 +#endif
53969 +
53970 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
53971 + if (fakefs_obj_rw == NULL)
53972 + return 1;
53973 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
53974 +
53975 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
53976 + if (fakefs_obj_rwx == NULL)
53977 + return 1;
53978 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
53979 +
53980 + subj_map_set.s_hash =
53981 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
53982 + acl_role_set.r_hash =
53983 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
53984 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
53985 + inodev_set.i_hash =
53986 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
53987 +
53988 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
53989 + !name_set.n_hash || !inodev_set.i_hash)
53990 + return 1;
53991 +
53992 + memset(subj_map_set.s_hash, 0,
53993 + sizeof(struct subject_map *) * subj_map_set.s_size);
53994 + memset(acl_role_set.r_hash, 0,
53995 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
53996 + memset(name_set.n_hash, 0,
53997 + sizeof (struct name_entry *) * name_set.n_size);
53998 + memset(inodev_set.i_hash, 0,
53999 + sizeof (struct inodev_entry *) * inodev_set.i_size);
54000 +
54001 + return 0;
54002 +}
54003 +
54004 +/* free information not needed after startup
54005 + currently contains user->kernel pointer mappings for subjects
54006 +*/
54007 +
54008 +static void
54009 +free_init_variables(void)
54010 +{
54011 + __u32 i;
54012 +
54013 + if (subj_map_set.s_hash) {
54014 + for (i = 0; i < subj_map_set.s_size; i++) {
54015 + if (subj_map_set.s_hash[i]) {
54016 + kfree(subj_map_set.s_hash[i]);
54017 + subj_map_set.s_hash[i] = NULL;
54018 + }
54019 + }
54020 +
54021 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
54022 + PAGE_SIZE)
54023 + kfree(subj_map_set.s_hash);
54024 + else
54025 + vfree(subj_map_set.s_hash);
54026 + }
54027 +
54028 + return;
54029 +}
54030 +
54031 +static void
54032 +free_variables(void)
54033 +{
54034 + struct acl_subject_label *s;
54035 + struct acl_role_label *r;
54036 + struct task_struct *task, *task2;
54037 + unsigned int x;
54038 +
54039 + gr_clear_learn_entries();
54040 +
54041 + read_lock(&tasklist_lock);
54042 + do_each_thread(task2, task) {
54043 + task->acl_sp_role = 0;
54044 + task->acl_role_id = 0;
54045 + task->acl = NULL;
54046 + task->role = NULL;
54047 + } while_each_thread(task2, task);
54048 + read_unlock(&tasklist_lock);
54049 +
54050 + /* release the reference to the real root dentry and vfsmount */
54051 + path_put(&real_root);
54052 + memset(&real_root, 0, sizeof(real_root));
54053 +
54054 + /* free all object hash tables */
54055 +
54056 + FOR_EACH_ROLE_START(r)
54057 + if (r->subj_hash == NULL)
54058 + goto next_role;
54059 + FOR_EACH_SUBJECT_START(r, s, x)
54060 + if (s->obj_hash == NULL)
54061 + break;
54062 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54063 + kfree(s->obj_hash);
54064 + else
54065 + vfree(s->obj_hash);
54066 + FOR_EACH_SUBJECT_END(s, x)
54067 + FOR_EACH_NESTED_SUBJECT_START(r, s)
54068 + if (s->obj_hash == NULL)
54069 + break;
54070 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54071 + kfree(s->obj_hash);
54072 + else
54073 + vfree(s->obj_hash);
54074 + FOR_EACH_NESTED_SUBJECT_END(s)
54075 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
54076 + kfree(r->subj_hash);
54077 + else
54078 + vfree(r->subj_hash);
54079 + r->subj_hash = NULL;
54080 +next_role:
54081 + FOR_EACH_ROLE_END(r)
54082 +
54083 + acl_free_all();
54084 +
54085 + if (acl_role_set.r_hash) {
54086 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
54087 + PAGE_SIZE)
54088 + kfree(acl_role_set.r_hash);
54089 + else
54090 + vfree(acl_role_set.r_hash);
54091 + }
54092 + if (name_set.n_hash) {
54093 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
54094 + PAGE_SIZE)
54095 + kfree(name_set.n_hash);
54096 + else
54097 + vfree(name_set.n_hash);
54098 + }
54099 +
54100 + if (inodev_set.i_hash) {
54101 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
54102 + PAGE_SIZE)
54103 + kfree(inodev_set.i_hash);
54104 + else
54105 + vfree(inodev_set.i_hash);
54106 + }
54107 +
54108 + gr_free_uidset();
54109 +
54110 + memset(&name_set, 0, sizeof (struct name_db));
54111 + memset(&inodev_set, 0, sizeof (struct inodev_db));
54112 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
54113 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
54114 +
54115 + default_role = NULL;
54116 + kernel_role = NULL;
54117 + role_list = NULL;
54118 +
54119 + return;
54120 +}
54121 +
54122 +static __u32
54123 +count_user_objs(struct acl_object_label *userp)
54124 +{
54125 + struct acl_object_label o_tmp;
54126 + __u32 num = 0;
54127 +
54128 + while (userp) {
54129 + if (copy_from_user(&o_tmp, userp,
54130 + sizeof (struct acl_object_label)))
54131 + break;
54132 +
54133 + userp = o_tmp.prev;
54134 + num++;
54135 + }
54136 +
54137 + return num;
54138 +}
54139 +
54140 +static struct acl_subject_label *
54141 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
54142 +
54143 +static int
54144 +copy_user_glob(struct acl_object_label *obj)
54145 +{
54146 + struct acl_object_label *g_tmp, **guser;
54147 + unsigned int len;
54148 + char *tmp;
54149 +
54150 + if (obj->globbed == NULL)
54151 + return 0;
54152 +
54153 + guser = &obj->globbed;
54154 + while (*guser) {
54155 + g_tmp = (struct acl_object_label *)
54156 + acl_alloc(sizeof (struct acl_object_label));
54157 + if (g_tmp == NULL)
54158 + return -ENOMEM;
54159 +
54160 + if (copy_from_user(g_tmp, *guser,
54161 + sizeof (struct acl_object_label)))
54162 + return -EFAULT;
54163 +
54164 + len = strnlen_user(g_tmp->filename, PATH_MAX);
54165 +
54166 + if (!len || len >= PATH_MAX)
54167 + return -EINVAL;
54168 +
54169 + if ((tmp = (char *) acl_alloc(len)) == NULL)
54170 + return -ENOMEM;
54171 +
54172 + if (copy_from_user(tmp, g_tmp->filename, len))
54173 + return -EFAULT;
54174 + tmp[len-1] = '\0';
54175 + g_tmp->filename = tmp;
54176 +
54177 + *guser = g_tmp;
54178 + guser = &(g_tmp->next);
54179 + }
54180 +
54181 + return 0;
54182 +}
54183 +
54184 +static int
54185 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
54186 + struct acl_role_label *role)
54187 +{
54188 + struct acl_object_label *o_tmp;
54189 + unsigned int len;
54190 + int ret;
54191 + char *tmp;
54192 +
54193 + while (userp) {
54194 + if ((o_tmp = (struct acl_object_label *)
54195 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
54196 + return -ENOMEM;
54197 +
54198 + if (copy_from_user(o_tmp, userp,
54199 + sizeof (struct acl_object_label)))
54200 + return -EFAULT;
54201 +
54202 + userp = o_tmp->prev;
54203 +
54204 + len = strnlen_user(o_tmp->filename, PATH_MAX);
54205 +
54206 + if (!len || len >= PATH_MAX)
54207 + return -EINVAL;
54208 +
54209 + if ((tmp = (char *) acl_alloc(len)) == NULL)
54210 + return -ENOMEM;
54211 +
54212 + if (copy_from_user(tmp, o_tmp->filename, len))
54213 + return -EFAULT;
54214 + tmp[len-1] = '\0';
54215 + o_tmp->filename = tmp;
54216 +
54217 + insert_acl_obj_label(o_tmp, subj);
54218 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
54219 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
54220 + return -ENOMEM;
54221 +
54222 + ret = copy_user_glob(o_tmp);
54223 + if (ret)
54224 + return ret;
54225 +
54226 + if (o_tmp->nested) {
54227 + int already_copied;
54228 +
54229 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
54230 + if (IS_ERR(o_tmp->nested))
54231 + return PTR_ERR(o_tmp->nested);
54232 +
54233 + /* insert into nested subject list if we haven't copied this one yet
54234 + to prevent duplicate entries */
54235 + if (!already_copied) {
54236 + o_tmp->nested->next = role->hash->first;
54237 + role->hash->first = o_tmp->nested;
54238 + }
54239 + }
54240 + }
54241 +
54242 + return 0;
54243 +}
54244 +
54245 +static __u32
54246 +count_user_subjs(struct acl_subject_label *userp)
54247 +{
54248 + struct acl_subject_label s_tmp;
54249 + __u32 num = 0;
54250 +
54251 + while (userp) {
54252 + if (copy_from_user(&s_tmp, userp,
54253 + sizeof (struct acl_subject_label)))
54254 + break;
54255 +
54256 + userp = s_tmp.prev;
54257 + /* do not count nested subjects against this count, since
54258 + they are not included in the hash table, but are
54259 + attached to objects. We have already counted
54260 + the subjects in userspace for the allocation
54261 + stack
54262 + */
54263 + if (!(s_tmp.mode & GR_NESTED))
54264 + num++;
54265 + }
54266 +
54267 + return num;
54268 +}
54269 +
54270 +static int
54271 +copy_user_allowedips(struct acl_role_label *rolep)
54272 +{
54273 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
54274 +
54275 + ruserip = rolep->allowed_ips;
54276 +
54277 + while (ruserip) {
54278 + rlast = rtmp;
54279 +
54280 + if ((rtmp = (struct role_allowed_ip *)
54281 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
54282 + return -ENOMEM;
54283 +
54284 + if (copy_from_user(rtmp, ruserip,
54285 + sizeof (struct role_allowed_ip)))
54286 + return -EFAULT;
54287 +
54288 + ruserip = rtmp->prev;
54289 +
54290 + if (!rlast) {
54291 + rtmp->prev = NULL;
54292 + rolep->allowed_ips = rtmp;
54293 + } else {
54294 + rlast->next = rtmp;
54295 + rtmp->prev = rlast;
54296 + }
54297 +
54298 + if (!ruserip)
54299 + rtmp->next = NULL;
54300 + }
54301 +
54302 + return 0;
54303 +}
54304 +
54305 +static int
54306 +copy_user_transitions(struct acl_role_label *rolep)
54307 +{
54308 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
54309 +
54310 + unsigned int len;
54311 + char *tmp;
54312 +
54313 + rusertp = rolep->transitions;
54314 +
54315 + while (rusertp) {
54316 + rlast = rtmp;
54317 +
54318 + if ((rtmp = (struct role_transition *)
54319 + acl_alloc(sizeof (struct role_transition))) == NULL)
54320 + return -ENOMEM;
54321 +
54322 + if (copy_from_user(rtmp, rusertp,
54323 + sizeof (struct role_transition)))
54324 + return -EFAULT;
54325 +
54326 + rusertp = rtmp->prev;
54327 +
54328 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
54329 +
54330 + if (!len || len >= GR_SPROLE_LEN)
54331 + return -EINVAL;
54332 +
54333 + if ((tmp = (char *) acl_alloc(len)) == NULL)
54334 + return -ENOMEM;
54335 +
54336 + if (copy_from_user(tmp, rtmp->rolename, len))
54337 + return -EFAULT;
54338 + tmp[len-1] = '\0';
54339 + rtmp->rolename = tmp;
54340 +
54341 + if (!rlast) {
54342 + rtmp->prev = NULL;
54343 + rolep->transitions = rtmp;
54344 + } else {
54345 + rlast->next = rtmp;
54346 + rtmp->prev = rlast;
54347 + }
54348 +
54349 + if (!rusertp)
54350 + rtmp->next = NULL;
54351 + }
54352 +
54353 + return 0;
54354 +}
54355 +
54356 +static struct acl_subject_label *
54357 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
54358 +{
54359 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
54360 + unsigned int len;
54361 + char *tmp;
54362 + __u32 num_objs;
54363 + struct acl_ip_label **i_tmp, *i_utmp2;
54364 + struct gr_hash_struct ghash;
54365 + struct subject_map *subjmap;
54366 + unsigned int i_num;
54367 + int err;
54368 +
54369 + if (already_copied != NULL)
54370 + *already_copied = 0;
54371 +
54372 + s_tmp = lookup_subject_map(userp);
54373 +
54374 + /* we've already copied this subject into the kernel, just return
54375 + the reference to it, and don't copy it over again
54376 + */
54377 + if (s_tmp) {
54378 + if (already_copied != NULL)
54379 + *already_copied = 1;
54380 + return(s_tmp);
54381 + }
54382 +
54383 + if ((s_tmp = (struct acl_subject_label *)
54384 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
54385 + return ERR_PTR(-ENOMEM);
54386 +
54387 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
54388 + if (subjmap == NULL)
54389 + return ERR_PTR(-ENOMEM);
54390 +
54391 + subjmap->user = userp;
54392 + subjmap->kernel = s_tmp;
54393 + insert_subj_map_entry(subjmap);
54394 +
54395 + if (copy_from_user(s_tmp, userp,
54396 + sizeof (struct acl_subject_label)))
54397 + return ERR_PTR(-EFAULT);
54398 +
54399 + len = strnlen_user(s_tmp->filename, PATH_MAX);
54400 +
54401 + if (!len || len >= PATH_MAX)
54402 + return ERR_PTR(-EINVAL);
54403 +
54404 + if ((tmp = (char *) acl_alloc(len)) == NULL)
54405 + return ERR_PTR(-ENOMEM);
54406 +
54407 + if (copy_from_user(tmp, s_tmp->filename, len))
54408 + return ERR_PTR(-EFAULT);
54409 + tmp[len-1] = '\0';
54410 + s_tmp->filename = tmp;
54411 +
54412 + if (!strcmp(s_tmp->filename, "/"))
54413 + role->root_label = s_tmp;
54414 +
54415 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
54416 + return ERR_PTR(-EFAULT);
54417 +
54418 + /* copy user and group transition tables */
54419 +
54420 + if (s_tmp->user_trans_num) {
54421 + uid_t *uidlist;
54422 +
54423 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
54424 + if (uidlist == NULL)
54425 + return ERR_PTR(-ENOMEM);
54426 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
54427 + return ERR_PTR(-EFAULT);
54428 +
54429 + s_tmp->user_transitions = uidlist;
54430 + }
54431 +
54432 + if (s_tmp->group_trans_num) {
54433 + gid_t *gidlist;
54434 +
54435 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
54436 + if (gidlist == NULL)
54437 + return ERR_PTR(-ENOMEM);
54438 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
54439 + return ERR_PTR(-EFAULT);
54440 +
54441 + s_tmp->group_transitions = gidlist;
54442 + }
54443 +
54444 + /* set up object hash table */
54445 + num_objs = count_user_objs(ghash.first);
54446 +
54447 + s_tmp->obj_hash_size = num_objs;
54448 + s_tmp->obj_hash =
54449 + (struct acl_object_label **)
54450 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
54451 +
54452 + if (!s_tmp->obj_hash)
54453 + return ERR_PTR(-ENOMEM);
54454 +
54455 + memset(s_tmp->obj_hash, 0,
54456 + s_tmp->obj_hash_size *
54457 + sizeof (struct acl_object_label *));
54458 +
54459 + /* add in objects */
54460 + err = copy_user_objs(ghash.first, s_tmp, role);
54461 +
54462 + if (err)
54463 + return ERR_PTR(err);
54464 +
54465 + /* set pointer for parent subject */
54466 + if (s_tmp->parent_subject) {
54467 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
54468 +
54469 + if (IS_ERR(s_tmp2))
54470 + return s_tmp2;
54471 +
54472 + s_tmp->parent_subject = s_tmp2;
54473 + }
54474 +
54475 + /* add in ip acls */
54476 +
54477 + if (!s_tmp->ip_num) {
54478 + s_tmp->ips = NULL;
54479 + goto insert;
54480 + }
54481 +
54482 + i_tmp =
54483 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
54484 + sizeof (struct acl_ip_label *));
54485 +
54486 + if (!i_tmp)
54487 + return ERR_PTR(-ENOMEM);
54488 +
54489 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
54490 + *(i_tmp + i_num) =
54491 + (struct acl_ip_label *)
54492 + acl_alloc(sizeof (struct acl_ip_label));
54493 + if (!*(i_tmp + i_num))
54494 + return ERR_PTR(-ENOMEM);
54495 +
54496 + if (copy_from_user
54497 + (&i_utmp2, s_tmp->ips + i_num,
54498 + sizeof (struct acl_ip_label *)))
54499 + return ERR_PTR(-EFAULT);
54500 +
54501 + if (copy_from_user
54502 + (*(i_tmp + i_num), i_utmp2,
54503 + sizeof (struct acl_ip_label)))
54504 + return ERR_PTR(-EFAULT);
54505 +
54506 + if ((*(i_tmp + i_num))->iface == NULL)
54507 + continue;
54508 +
54509 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
54510 + if (!len || len >= IFNAMSIZ)
54511 + return ERR_PTR(-EINVAL);
54512 + tmp = acl_alloc(len);
54513 + if (tmp == NULL)
54514 + return ERR_PTR(-ENOMEM);
54515 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
54516 + return ERR_PTR(-EFAULT);
54517 + (*(i_tmp + i_num))->iface = tmp;
54518 + }
54519 +
54520 + s_tmp->ips = i_tmp;
54521 +
54522 +insert:
54523 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
54524 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
54525 + return ERR_PTR(-ENOMEM);
54526 +
54527 + return s_tmp;
54528 +}
54529 +
54530 +static int
54531 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
54532 +{
54533 + struct acl_subject_label s_pre;
54534 + struct acl_subject_label * ret;
54535 + int err;
54536 +
54537 + while (userp) {
54538 + if (copy_from_user(&s_pre, userp,
54539 + sizeof (struct acl_subject_label)))
54540 + return -EFAULT;
54541 +
54542 + /* do not add nested subjects here, add
54543 + while parsing objects
54544 + */
54545 +
54546 + if (s_pre.mode & GR_NESTED) {
54547 + userp = s_pre.prev;
54548 + continue;
54549 + }
54550 +
54551 + ret = do_copy_user_subj(userp, role, NULL);
54552 +
54553 + err = PTR_ERR(ret);
54554 + if (IS_ERR(ret))
54555 + return err;
54556 +
54557 + insert_acl_subj_label(ret, role);
54558 +
54559 + userp = s_pre.prev;
54560 + }
54561 +
54562 + return 0;
54563 +}
54564 +
54565 +static int
54566 +copy_user_acl(struct gr_arg *arg)
54567 +{
54568 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
54569 + struct acl_subject_label *subj_list;
54570 + struct sprole_pw *sptmp;
54571 + struct gr_hash_struct *ghash;
54572 + uid_t *domainlist;
54573 + unsigned int r_num;
54574 + unsigned int len;
54575 + char *tmp;
54576 + int err = 0;
54577 + __u16 i;
54578 + __u32 num_subjs;
54579 +
54580 + /* we need a default and kernel role */
54581 + if (arg->role_db.num_roles < 2)
54582 + return -EINVAL;
54583 +
54584 + /* copy special role authentication info from userspace */
54585 +
54586 + num_sprole_pws = arg->num_sprole_pws;
54587 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
54588 +
54589 + if (!acl_special_roles && num_sprole_pws)
54590 + return -ENOMEM;
54591 +
54592 + for (i = 0; i < num_sprole_pws; i++) {
54593 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
54594 + if (!sptmp)
54595 + return -ENOMEM;
54596 + if (copy_from_user(sptmp, arg->sprole_pws + i,
54597 + sizeof (struct sprole_pw)))
54598 + return -EFAULT;
54599 +
54600 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
54601 +
54602 + if (!len || len >= GR_SPROLE_LEN)
54603 + return -EINVAL;
54604 +
54605 + if ((tmp = (char *) acl_alloc(len)) == NULL)
54606 + return -ENOMEM;
54607 +
54608 + if (copy_from_user(tmp, sptmp->rolename, len))
54609 + return -EFAULT;
54610 +
54611 + tmp[len-1] = '\0';
54612 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54613 + printk(KERN_ALERT "Copying special role %s\n", tmp);
54614 +#endif
54615 + sptmp->rolename = tmp;
54616 + acl_special_roles[i] = sptmp;
54617 + }
54618 +
54619 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
54620 +
54621 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
54622 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
54623 +
54624 + if (!r_tmp)
54625 + return -ENOMEM;
54626 +
54627 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
54628 + sizeof (struct acl_role_label *)))
54629 + return -EFAULT;
54630 +
54631 + if (copy_from_user(r_tmp, r_utmp2,
54632 + sizeof (struct acl_role_label)))
54633 + return -EFAULT;
54634 +
54635 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
54636 +
54637 + if (!len || len >= PATH_MAX)
54638 + return -EINVAL;
54639 +
54640 + if ((tmp = (char *) acl_alloc(len)) == NULL)
54641 + return -ENOMEM;
54642 +
54643 + if (copy_from_user(tmp, r_tmp->rolename, len))
54644 + return -EFAULT;
54645 +
54646 + tmp[len-1] = '\0';
54647 + r_tmp->rolename = tmp;
54648 +
54649 + if (!strcmp(r_tmp->rolename, "default")
54650 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
54651 + default_role = r_tmp;
54652 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
54653 + kernel_role = r_tmp;
54654 + }
54655 +
54656 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
54657 + return -ENOMEM;
54658 +
54659 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
54660 + return -EFAULT;
54661 +
54662 + r_tmp->hash = ghash;
54663 +
54664 + num_subjs = count_user_subjs(r_tmp->hash->first);
54665 +
54666 + r_tmp->subj_hash_size = num_subjs;
54667 + r_tmp->subj_hash =
54668 + (struct acl_subject_label **)
54669 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
54670 +
54671 + if (!r_tmp->subj_hash)
54672 + return -ENOMEM;
54673 +
54674 + err = copy_user_allowedips(r_tmp);
54675 + if (err)
54676 + return err;
54677 +
54678 + /* copy domain info */
54679 + if (r_tmp->domain_children != NULL) {
54680 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
54681 + if (domainlist == NULL)
54682 + return -ENOMEM;
54683 +
54684 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
54685 + return -EFAULT;
54686 +
54687 + r_tmp->domain_children = domainlist;
54688 + }
54689 +
54690 + err = copy_user_transitions(r_tmp);
54691 + if (err)
54692 + return err;
54693 +
54694 + memset(r_tmp->subj_hash, 0,
54695 + r_tmp->subj_hash_size *
54696 + sizeof (struct acl_subject_label *));
54697 +
54698 + /* acquire the list of subjects, then NULL out
54699 + the list prior to parsing the subjects for this role,
54700 + as during this parsing the list is replaced with a list
54701 + of *nested* subjects for the role
54702 + */
54703 + subj_list = r_tmp->hash->first;
54704 +
54705 + /* set nested subject list to null */
54706 + r_tmp->hash->first = NULL;
54707 +
54708 + err = copy_user_subjs(subj_list, r_tmp);
54709 +
54710 + if (err)
54711 + return err;
54712 +
54713 + insert_acl_role_label(r_tmp);
54714 + }
54715 +
54716 + if (default_role == NULL || kernel_role == NULL)
54717 + return -EINVAL;
54718 +
54719 + return err;
54720 +}
54721 +
54722 +static int
54723 +gracl_init(struct gr_arg *args)
54724 +{
54725 + int error = 0;
54726 +
54727 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
54728 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
54729 +
54730 + if (init_variables(args)) {
54731 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
54732 + error = -ENOMEM;
54733 + free_variables();
54734 + goto out;
54735 + }
54736 +
54737 + error = copy_user_acl(args);
54738 + free_init_variables();
54739 + if (error) {
54740 + free_variables();
54741 + goto out;
54742 + }
54743 +
54744 + if ((error = gr_set_acls(0))) {
54745 + free_variables();
54746 + goto out;
54747 + }
54748 +
54749 + pax_open_kernel();
54750 + gr_status |= GR_READY;
54751 + pax_close_kernel();
54752 +
54753 + out:
54754 + return error;
54755 +}
54756 +
54757 +/* derived from glibc fnmatch() 0: match, 1: no match*/
54758 +
54759 +static int
54760 +glob_match(const char *p, const char *n)
54761 +{
54762 + char c;
54763 +
54764 + while ((c = *p++) != '\0') {
54765 + switch (c) {
54766 + case '?':
54767 + if (*n == '\0')
54768 + return 1;
54769 + else if (*n == '/')
54770 + return 1;
54771 + break;
54772 + case '\\':
54773 + if (*n != c)
54774 + return 1;
54775 + break;
54776 + case '*':
54777 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
54778 + if (*n == '/')
54779 + return 1;
54780 + else if (c == '?') {
54781 + if (*n == '\0')
54782 + return 1;
54783 + else
54784 + ++n;
54785 + }
54786 + }
54787 + if (c == '\0') {
54788 + return 0;
54789 + } else {
54790 + const char *endp;
54791 +
54792 + if ((endp = strchr(n, '/')) == NULL)
54793 + endp = n + strlen(n);
54794 +
54795 + if (c == '[') {
54796 + for (--p; n < endp; ++n)
54797 + if (!glob_match(p, n))
54798 + return 0;
54799 + } else if (c == '/') {
54800 + while (*n != '\0' && *n != '/')
54801 + ++n;
54802 + if (*n == '/' && !glob_match(p, n + 1))
54803 + return 0;
54804 + } else {
54805 + for (--p; n < endp; ++n)
54806 + if (*n == c && !glob_match(p, n))
54807 + return 0;
54808 + }
54809 +
54810 + return 1;
54811 + }
54812 + case '[':
54813 + {
54814 + int not;
54815 + char cold;
54816 +
54817 + if (*n == '\0' || *n == '/')
54818 + return 1;
54819 +
54820 + not = (*p == '!' || *p == '^');
54821 + if (not)
54822 + ++p;
54823 +
54824 + c = *p++;
54825 + for (;;) {
54826 + unsigned char fn = (unsigned char)*n;
54827 +
54828 + if (c == '\0')
54829 + return 1;
54830 + else {
54831 + if (c == fn)
54832 + goto matched;
54833 + cold = c;
54834 + c = *p++;
54835 +
54836 + if (c == '-' && *p != ']') {
54837 + unsigned char cend = *p++;
54838 +
54839 + if (cend == '\0')
54840 + return 1;
54841 +
54842 + if (cold <= fn && fn <= cend)
54843 + goto matched;
54844 +
54845 + c = *p++;
54846 + }
54847 + }
54848 +
54849 + if (c == ']')
54850 + break;
54851 + }
54852 + if (!not)
54853 + return 1;
54854 + break;
54855 + matched:
54856 + while (c != ']') {
54857 + if (c == '\0')
54858 + return 1;
54859 +
54860 + c = *p++;
54861 + }
54862 + if (not)
54863 + return 1;
54864 + }
54865 + break;
54866 + default:
54867 + if (c != *n)
54868 + return 1;
54869 + }
54870 +
54871 + ++n;
54872 + }
54873 +
54874 + if (*n == '\0')
54875 + return 0;
54876 +
54877 + if (*n == '/')
54878 + return 0;
54879 +
54880 + return 1;
54881 +}
54882 +
54883 +static struct acl_object_label *
54884 +chk_glob_label(struct acl_object_label *globbed,
54885 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
54886 +{
54887 + struct acl_object_label *tmp;
54888 +
54889 + if (*path == NULL)
54890 + *path = gr_to_filename_nolock(dentry, mnt);
54891 +
54892 + tmp = globbed;
54893 +
54894 + while (tmp) {
54895 + if (!glob_match(tmp->filename, *path))
54896 + return tmp;
54897 + tmp = tmp->next;
54898 + }
54899 +
54900 + return NULL;
54901 +}
54902 +
54903 +static struct acl_object_label *
54904 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
54905 + const ino_t curr_ino, const dev_t curr_dev,
54906 + const struct acl_subject_label *subj, char **path, const int checkglob)
54907 +{
54908 + struct acl_subject_label *tmpsubj;
54909 + struct acl_object_label *retval;
54910 + struct acl_object_label *retval2;
54911 +
54912 + tmpsubj = (struct acl_subject_label *) subj;
54913 + read_lock(&gr_inode_lock);
54914 + do {
54915 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
54916 + if (retval) {
54917 + if (checkglob && retval->globbed) {
54918 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
54919 + if (retval2)
54920 + retval = retval2;
54921 + }
54922 + break;
54923 + }
54924 + } while ((tmpsubj = tmpsubj->parent_subject));
54925 + read_unlock(&gr_inode_lock);
54926 +
54927 + return retval;
54928 +}
54929 +
54930 +static __inline__ struct acl_object_label *
54931 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
54932 + struct dentry *curr_dentry,
54933 + const struct acl_subject_label *subj, char **path, const int checkglob)
54934 +{
54935 + int newglob = checkglob;
54936 + ino_t inode;
54937 + dev_t device;
54938 +
54939 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
54940 + as we don't want a / * rule to match instead of the / object
54941 + don't do this for create lookups that call this function though, since they're looking up
54942 + on the parent and thus need globbing checks on all paths
54943 + */
54944 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
54945 + newglob = GR_NO_GLOB;
54946 +
54947 + spin_lock(&curr_dentry->d_lock);
54948 + inode = curr_dentry->d_inode->i_ino;
54949 + device = __get_dev(curr_dentry);
54950 + spin_unlock(&curr_dentry->d_lock);
54951 +
54952 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
54953 +}
54954 +
54955 +static struct acl_object_label *
54956 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
54957 + const struct acl_subject_label *subj, char *path, const int checkglob)
54958 +{
54959 + struct dentry *dentry = (struct dentry *) l_dentry;
54960 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
54961 + struct mount *real_mnt = real_mount(mnt);
54962 + struct acl_object_label *retval;
54963 + struct dentry *parent;
54964 +
54965 + write_seqlock(&rename_lock);
54966 + br_read_lock(&vfsmount_lock);
54967 +
54968 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
54969 +#ifdef CONFIG_NET
54970 + mnt == sock_mnt ||
54971 +#endif
54972 +#ifdef CONFIG_HUGETLBFS
54973 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
54974 +#endif
54975 + /* ignore Eric Biederman */
54976 + IS_PRIVATE(l_dentry->d_inode))) {
54977 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
54978 + goto out;
54979 + }
54980 +
54981 + for (;;) {
54982 + if (dentry == real_root.dentry && mnt == real_root.mnt)
54983 + break;
54984 +
54985 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
54986 + if (!mnt_has_parent(real_mnt))
54987 + break;
54988 +
54989 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
54990 + if (retval != NULL)
54991 + goto out;
54992 +
54993 + dentry = real_mnt->mnt_mountpoint;
54994 + real_mnt = real_mnt->mnt_parent;
54995 + mnt = &real_mnt->mnt;
54996 + continue;
54997 + }
54998 +
54999 + parent = dentry->d_parent;
55000 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55001 + if (retval != NULL)
55002 + goto out;
55003 +
55004 + dentry = parent;
55005 + }
55006 +
55007 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55008 +
55009 + /* real_root is pinned so we don't have to hold a reference */
55010 + if (retval == NULL)
55011 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
55012 +out:
55013 + br_read_unlock(&vfsmount_lock);
55014 + write_sequnlock(&rename_lock);
55015 +
55016 + BUG_ON(retval == NULL);
55017 +
55018 + return retval;
55019 +}
55020 +
55021 +static __inline__ struct acl_object_label *
55022 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55023 + const struct acl_subject_label *subj)
55024 +{
55025 + char *path = NULL;
55026 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
55027 +}
55028 +
55029 +static __inline__ struct acl_object_label *
55030 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55031 + const struct acl_subject_label *subj)
55032 +{
55033 + char *path = NULL;
55034 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
55035 +}
55036 +
55037 +static __inline__ struct acl_object_label *
55038 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55039 + const struct acl_subject_label *subj, char *path)
55040 +{
55041 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
55042 +}
55043 +
55044 +static struct acl_subject_label *
55045 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55046 + const struct acl_role_label *role)
55047 +{
55048 + struct dentry *dentry = (struct dentry *) l_dentry;
55049 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55050 + struct mount *real_mnt = real_mount(mnt);
55051 + struct acl_subject_label *retval;
55052 + struct dentry *parent;
55053 +
55054 + write_seqlock(&rename_lock);
55055 + br_read_lock(&vfsmount_lock);
55056 +
55057 + for (;;) {
55058 + if (dentry == real_root.dentry && mnt == real_root.mnt)
55059 + break;
55060 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55061 + if (!mnt_has_parent(real_mnt))
55062 + break;
55063 +
55064 + spin_lock(&dentry->d_lock);
55065 + read_lock(&gr_inode_lock);
55066 + retval =
55067 + lookup_acl_subj_label(dentry->d_inode->i_ino,
55068 + __get_dev(dentry), role);
55069 + read_unlock(&gr_inode_lock);
55070 + spin_unlock(&dentry->d_lock);
55071 + if (retval != NULL)
55072 + goto out;
55073 +
55074 + dentry = real_mnt->mnt_mountpoint;
55075 + real_mnt = real_mnt->mnt_parent;
55076 + mnt = &real_mnt->mnt;
55077 + continue;
55078 + }
55079 +
55080 + spin_lock(&dentry->d_lock);
55081 + read_lock(&gr_inode_lock);
55082 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55083 + __get_dev(dentry), role);
55084 + read_unlock(&gr_inode_lock);
55085 + parent = dentry->d_parent;
55086 + spin_unlock(&dentry->d_lock);
55087 +
55088 + if (retval != NULL)
55089 + goto out;
55090 +
55091 + dentry = parent;
55092 + }
55093 +
55094 + spin_lock(&dentry->d_lock);
55095 + read_lock(&gr_inode_lock);
55096 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55097 + __get_dev(dentry), role);
55098 + read_unlock(&gr_inode_lock);
55099 + spin_unlock(&dentry->d_lock);
55100 +
55101 + if (unlikely(retval == NULL)) {
55102 + /* real_root is pinned, we don't need to hold a reference */
55103 + read_lock(&gr_inode_lock);
55104 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
55105 + __get_dev(real_root.dentry), role);
55106 + read_unlock(&gr_inode_lock);
55107 + }
55108 +out:
55109 + br_read_unlock(&vfsmount_lock);
55110 + write_sequnlock(&rename_lock);
55111 +
55112 + BUG_ON(retval == NULL);
55113 +
55114 + return retval;
55115 +}
55116 +
55117 +static void
55118 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
55119 +{
55120 + struct task_struct *task = current;
55121 + const struct cred *cred = current_cred();
55122 +
55123 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
55124 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55125 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55126 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
55127 +
55128 + return;
55129 +}
55130 +
55131 +static void
55132 +gr_log_learn_id_change(const char type, const unsigned int real,
55133 + const unsigned int effective, const unsigned int fs)
55134 +{
55135 + struct task_struct *task = current;
55136 + const struct cred *cred = current_cred();
55137 +
55138 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
55139 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55140 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55141 + type, real, effective, fs, &task->signal->saved_ip);
55142 +
55143 + return;
55144 +}
55145 +
55146 +__u32
55147 +gr_search_file(const struct dentry * dentry, const __u32 mode,
55148 + const struct vfsmount * mnt)
55149 +{
55150 + __u32 retval = mode;
55151 + struct acl_subject_label *curracl;
55152 + struct acl_object_label *currobj;
55153 +
55154 + if (unlikely(!(gr_status & GR_READY)))
55155 + return (mode & ~GR_AUDITS);
55156 +
55157 + curracl = current->acl;
55158 +
55159 + currobj = chk_obj_label(dentry, mnt, curracl);
55160 + retval = currobj->mode & mode;
55161 +
55162 + /* if we're opening a specified transfer file for writing
55163 + (e.g. /dev/initctl), then transfer our role to init
55164 + */
55165 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
55166 + current->role->roletype & GR_ROLE_PERSIST)) {
55167 + struct task_struct *task = init_pid_ns.child_reaper;
55168 +
55169 + if (task->role != current->role) {
55170 + task->acl_sp_role = 0;
55171 + task->acl_role_id = current->acl_role_id;
55172 + task->role = current->role;
55173 + rcu_read_lock();
55174 + read_lock(&grsec_exec_file_lock);
55175 + gr_apply_subject_to_task(task);
55176 + read_unlock(&grsec_exec_file_lock);
55177 + rcu_read_unlock();
55178 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
55179 + }
55180 + }
55181 +
55182 + if (unlikely
55183 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
55184 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
55185 + __u32 new_mode = mode;
55186 +
55187 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55188 +
55189 + retval = new_mode;
55190 +
55191 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
55192 + new_mode |= GR_INHERIT;
55193 +
55194 + if (!(mode & GR_NOLEARN))
55195 + gr_log_learn(dentry, mnt, new_mode);
55196 + }
55197 +
55198 + return retval;
55199 +}
55200 +
55201 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
55202 + const struct dentry *parent,
55203 + const struct vfsmount *mnt)
55204 +{
55205 + struct name_entry *match;
55206 + struct acl_object_label *matchpo;
55207 + struct acl_subject_label *curracl;
55208 + char *path;
55209 +
55210 + if (unlikely(!(gr_status & GR_READY)))
55211 + return NULL;
55212 +
55213 + preempt_disable();
55214 + path = gr_to_filename_rbac(new_dentry, mnt);
55215 + match = lookup_name_entry_create(path);
55216 +
55217 + curracl = current->acl;
55218 +
55219 + if (match) {
55220 + read_lock(&gr_inode_lock);
55221 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
55222 + read_unlock(&gr_inode_lock);
55223 +
55224 + if (matchpo) {
55225 + preempt_enable();
55226 + return matchpo;
55227 + }
55228 + }
55229 +
55230 + // lookup parent
55231 +
55232 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
55233 +
55234 + preempt_enable();
55235 + return matchpo;
55236 +}
55237 +
55238 +__u32
55239 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
55240 + const struct vfsmount * mnt, const __u32 mode)
55241 +{
55242 + struct acl_object_label *matchpo;
55243 + __u32 retval;
55244 +
55245 + if (unlikely(!(gr_status & GR_READY)))
55246 + return (mode & ~GR_AUDITS);
55247 +
55248 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
55249 +
55250 + retval = matchpo->mode & mode;
55251 +
55252 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
55253 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55254 + __u32 new_mode = mode;
55255 +
55256 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55257 +
55258 + gr_log_learn(new_dentry, mnt, new_mode);
55259 + return new_mode;
55260 + }
55261 +
55262 + return retval;
55263 +}
55264 +
55265 +__u32
55266 +gr_check_link(const struct dentry * new_dentry,
55267 + const struct dentry * parent_dentry,
55268 + const struct vfsmount * parent_mnt,
55269 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
55270 +{
55271 + struct acl_object_label *obj;
55272 + __u32 oldmode, newmode;
55273 + __u32 needmode;
55274 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
55275 + GR_DELETE | GR_INHERIT;
55276 +
55277 + if (unlikely(!(gr_status & GR_READY)))
55278 + return (GR_CREATE | GR_LINK);
55279 +
55280 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
55281 + oldmode = obj->mode;
55282 +
55283 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
55284 + newmode = obj->mode;
55285 +
55286 + needmode = newmode & checkmodes;
55287 +
55288 + // old name for hardlink must have at least the permissions of the new name
55289 + if ((oldmode & needmode) != needmode)
55290 + goto bad;
55291 +
55292 + // if old name had restrictions/auditing, make sure the new name does as well
55293 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
55294 +
55295 + // don't allow hardlinking of suid/sgid/fcapped files without permission
55296 + if (is_privileged_binary(old_dentry))
55297 + needmode |= GR_SETID;
55298 +
55299 + if ((newmode & needmode) != needmode)
55300 + goto bad;
55301 +
55302 + // enforce minimum permissions
55303 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
55304 + return newmode;
55305 +bad:
55306 + needmode = oldmode;
55307 + if (is_privileged_binary(old_dentry))
55308 + needmode |= GR_SETID;
55309 +
55310 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
55311 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
55312 + return (GR_CREATE | GR_LINK);
55313 + } else if (newmode & GR_SUPPRESS)
55314 + return GR_SUPPRESS;
55315 + else
55316 + return 0;
55317 +}
55318 +
55319 +int
55320 +gr_check_hidden_task(const struct task_struct *task)
55321 +{
55322 + if (unlikely(!(gr_status & GR_READY)))
55323 + return 0;
55324 +
55325 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
55326 + return 1;
55327 +
55328 + return 0;
55329 +}
55330 +
55331 +int
55332 +gr_check_protected_task(const struct task_struct *task)
55333 +{
55334 + if (unlikely(!(gr_status & GR_READY) || !task))
55335 + return 0;
55336 +
55337 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55338 + task->acl != current->acl)
55339 + return 1;
55340 +
55341 + return 0;
55342 +}
55343 +
55344 +int
55345 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55346 +{
55347 + struct task_struct *p;
55348 + int ret = 0;
55349 +
55350 + if (unlikely(!(gr_status & GR_READY) || !pid))
55351 + return ret;
55352 +
55353 + read_lock(&tasklist_lock);
55354 + do_each_pid_task(pid, type, p) {
55355 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55356 + p->acl != current->acl) {
55357 + ret = 1;
55358 + goto out;
55359 + }
55360 + } while_each_pid_task(pid, type, p);
55361 +out:
55362 + read_unlock(&tasklist_lock);
55363 +
55364 + return ret;
55365 +}
55366 +
55367 +void
55368 +gr_copy_label(struct task_struct *tsk)
55369 +{
55370 + tsk->signal->used_accept = 0;
55371 + tsk->acl_sp_role = 0;
55372 + tsk->acl_role_id = current->acl_role_id;
55373 + tsk->acl = current->acl;
55374 + tsk->role = current->role;
55375 + tsk->signal->curr_ip = current->signal->curr_ip;
55376 + tsk->signal->saved_ip = current->signal->saved_ip;
55377 + if (current->exec_file)
55378 + get_file(current->exec_file);
55379 + tsk->exec_file = current->exec_file;
55380 + tsk->is_writable = current->is_writable;
55381 + if (unlikely(current->signal->used_accept)) {
55382 + current->signal->curr_ip = 0;
55383 + current->signal->saved_ip = 0;
55384 + }
55385 +
55386 + return;
55387 +}
55388 +
55389 +static void
55390 +gr_set_proc_res(struct task_struct *task)
55391 +{
55392 + struct acl_subject_label *proc;
55393 + unsigned short i;
55394 +
55395 + proc = task->acl;
55396 +
55397 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
55398 + return;
55399 +
55400 + for (i = 0; i < RLIM_NLIMITS; i++) {
55401 + if (!(proc->resmask & (1 << i)))
55402 + continue;
55403 +
55404 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
55405 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
55406 + }
55407 +
55408 + return;
55409 +}
55410 +
55411 +extern int __gr_process_user_ban(struct user_struct *user);
55412 +
55413 +int
55414 +gr_check_user_change(int real, int effective, int fs)
55415 +{
55416 + unsigned int i;
55417 + __u16 num;
55418 + uid_t *uidlist;
55419 + int curuid;
55420 + int realok = 0;
55421 + int effectiveok = 0;
55422 + int fsok = 0;
55423 +
55424 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55425 + struct user_struct *user;
55426 +
55427 + if (real == -1)
55428 + goto skipit;
55429 +
55430 + user = find_user(real);
55431 + if (user == NULL)
55432 + goto skipit;
55433 +
55434 + if (__gr_process_user_ban(user)) {
55435 + /* for find_user */
55436 + free_uid(user);
55437 + return 1;
55438 + }
55439 +
55440 + /* for find_user */
55441 + free_uid(user);
55442 +
55443 +skipit:
55444 +#endif
55445 +
55446 + if (unlikely(!(gr_status & GR_READY)))
55447 + return 0;
55448 +
55449 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55450 + gr_log_learn_id_change('u', real, effective, fs);
55451 +
55452 + num = current->acl->user_trans_num;
55453 + uidlist = current->acl->user_transitions;
55454 +
55455 + if (uidlist == NULL)
55456 + return 0;
55457 +
55458 + if (real == -1)
55459 + realok = 1;
55460 + if (effective == -1)
55461 + effectiveok = 1;
55462 + if (fs == -1)
55463 + fsok = 1;
55464 +
55465 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
55466 + for (i = 0; i < num; i++) {
55467 + curuid = (int)uidlist[i];
55468 + if (real == curuid)
55469 + realok = 1;
55470 + if (effective == curuid)
55471 + effectiveok = 1;
55472 + if (fs == curuid)
55473 + fsok = 1;
55474 + }
55475 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
55476 + for (i = 0; i < num; i++) {
55477 + curuid = (int)uidlist[i];
55478 + if (real == curuid)
55479 + break;
55480 + if (effective == curuid)
55481 + break;
55482 + if (fs == curuid)
55483 + break;
55484 + }
55485 + /* not in deny list */
55486 + if (i == num) {
55487 + realok = 1;
55488 + effectiveok = 1;
55489 + fsok = 1;
55490 + }
55491 + }
55492 +
55493 + if (realok && effectiveok && fsok)
55494 + return 0;
55495 + else {
55496 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55497 + return 1;
55498 + }
55499 +}
55500 +
55501 +int
55502 +gr_check_group_change(int real, int effective, int fs)
55503 +{
55504 + unsigned int i;
55505 + __u16 num;
55506 + gid_t *gidlist;
55507 + int curgid;
55508 + int realok = 0;
55509 + int effectiveok = 0;
55510 + int fsok = 0;
55511 +
55512 + if (unlikely(!(gr_status & GR_READY)))
55513 + return 0;
55514 +
55515 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55516 + gr_log_learn_id_change('g', real, effective, fs);
55517 +
55518 + num = current->acl->group_trans_num;
55519 + gidlist = current->acl->group_transitions;
55520 +
55521 + if (gidlist == NULL)
55522 + return 0;
55523 +
55524 + if (real == -1)
55525 + realok = 1;
55526 + if (effective == -1)
55527 + effectiveok = 1;
55528 + if (fs == -1)
55529 + fsok = 1;
55530 +
55531 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
55532 + for (i = 0; i < num; i++) {
55533 + curgid = (int)gidlist[i];
55534 + if (real == curgid)
55535 + realok = 1;
55536 + if (effective == curgid)
55537 + effectiveok = 1;
55538 + if (fs == curgid)
55539 + fsok = 1;
55540 + }
55541 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
55542 + for (i = 0; i < num; i++) {
55543 + curgid = (int)gidlist[i];
55544 + if (real == curgid)
55545 + break;
55546 + if (effective == curgid)
55547 + break;
55548 + if (fs == curgid)
55549 + break;
55550 + }
55551 + /* not in deny list */
55552 + if (i == num) {
55553 + realok = 1;
55554 + effectiveok = 1;
55555 + fsok = 1;
55556 + }
55557 + }
55558 +
55559 + if (realok && effectiveok && fsok)
55560 + return 0;
55561 + else {
55562 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55563 + return 1;
55564 + }
55565 +}
55566 +
55567 +extern int gr_acl_is_capable(const int cap);
55568 +
55569 +void
55570 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
55571 +{
55572 + struct acl_role_label *role = task->role;
55573 + struct acl_subject_label *subj = NULL;
55574 + struct acl_object_label *obj;
55575 + struct file *filp;
55576 +
55577 + if (unlikely(!(gr_status & GR_READY)))
55578 + return;
55579 +
55580 + filp = task->exec_file;
55581 +
55582 + /* kernel process, we'll give them the kernel role */
55583 + if (unlikely(!filp)) {
55584 + task->role = kernel_role;
55585 + task->acl = kernel_role->root_label;
55586 + return;
55587 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
55588 + role = lookup_acl_role_label(task, uid, gid);
55589 +
55590 + /* don't change the role if we're not a privileged process */
55591 + if (role && task->role != role &&
55592 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
55593 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
55594 + return;
55595 +
55596 + /* perform subject lookup in possibly new role
55597 + we can use this result below in the case where role == task->role
55598 + */
55599 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
55600 +
55601 + /* if we changed uid/gid, but result in the same role
55602 + and are using inheritance, don't lose the inherited subject
55603 + if current subject is other than what normal lookup
55604 + would result in, we arrived via inheritance, don't
55605 + lose subject
55606 + */
55607 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
55608 + (subj == task->acl)))
55609 + task->acl = subj;
55610 +
55611 + task->role = role;
55612 +
55613 + task->is_writable = 0;
55614 +
55615 + /* ignore additional mmap checks for processes that are writable
55616 + by the default ACL */
55617 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55618 + if (unlikely(obj->mode & GR_WRITE))
55619 + task->is_writable = 1;
55620 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
55621 + if (unlikely(obj->mode & GR_WRITE))
55622 + task->is_writable = 1;
55623 +
55624 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55625 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55626 +#endif
55627 +
55628 + gr_set_proc_res(task);
55629 +
55630 + return;
55631 +}
55632 +
55633 +int
55634 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55635 + const int unsafe_flags)
55636 +{
55637 + struct task_struct *task = current;
55638 + struct acl_subject_label *newacl;
55639 + struct acl_object_label *obj;
55640 + __u32 retmode;
55641 +
55642 + if (unlikely(!(gr_status & GR_READY)))
55643 + return 0;
55644 +
55645 + newacl = chk_subj_label(dentry, mnt, task->role);
55646 +
55647 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
55648 + did an exec
55649 + */
55650 + rcu_read_lock();
55651 + read_lock(&tasklist_lock);
55652 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
55653 + (task->parent->acl->mode & GR_POVERRIDE))) {
55654 + read_unlock(&tasklist_lock);
55655 + rcu_read_unlock();
55656 + goto skip_check;
55657 + }
55658 + read_unlock(&tasklist_lock);
55659 + rcu_read_unlock();
55660 +
55661 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
55662 + !(task->role->roletype & GR_ROLE_GOD) &&
55663 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
55664 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55665 + if (unsafe_flags & LSM_UNSAFE_SHARE)
55666 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
55667 + else
55668 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
55669 + return -EACCES;
55670 + }
55671 +
55672 +skip_check:
55673 +
55674 + obj = chk_obj_label(dentry, mnt, task->acl);
55675 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
55676 +
55677 + if (!(task->acl->mode & GR_INHERITLEARN) &&
55678 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
55679 + if (obj->nested)
55680 + task->acl = obj->nested;
55681 + else
55682 + task->acl = newacl;
55683 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
55684 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
55685 +
55686 + task->is_writable = 0;
55687 +
55688 + /* ignore additional mmap checks for processes that are writable
55689 + by the default ACL */
55690 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
55691 + if (unlikely(obj->mode & GR_WRITE))
55692 + task->is_writable = 1;
55693 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
55694 + if (unlikely(obj->mode & GR_WRITE))
55695 + task->is_writable = 1;
55696 +
55697 + gr_set_proc_res(task);
55698 +
55699 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55700 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55701 +#endif
55702 + return 0;
55703 +}
55704 +
55705 +/* always called with valid inodev ptr */
55706 +static void
55707 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
55708 +{
55709 + struct acl_object_label *matchpo;
55710 + struct acl_subject_label *matchps;
55711 + struct acl_subject_label *subj;
55712 + struct acl_role_label *role;
55713 + unsigned int x;
55714 +
55715 + FOR_EACH_ROLE_START(role)
55716 + FOR_EACH_SUBJECT_START(role, subj, x)
55717 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
55718 + matchpo->mode |= GR_DELETED;
55719 + FOR_EACH_SUBJECT_END(subj,x)
55720 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
55721 + /* nested subjects aren't in the role's subj_hash table */
55722 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
55723 + matchpo->mode |= GR_DELETED;
55724 + FOR_EACH_NESTED_SUBJECT_END(subj)
55725 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
55726 + matchps->mode |= GR_DELETED;
55727 + FOR_EACH_ROLE_END(role)
55728 +
55729 + inodev->nentry->deleted = 1;
55730 +
55731 + return;
55732 +}
55733 +
55734 +void
55735 +gr_handle_delete(const ino_t ino, const dev_t dev)
55736 +{
55737 + struct inodev_entry *inodev;
55738 +
55739 + if (unlikely(!(gr_status & GR_READY)))
55740 + return;
55741 +
55742 + write_lock(&gr_inode_lock);
55743 + inodev = lookup_inodev_entry(ino, dev);
55744 + if (inodev != NULL)
55745 + do_handle_delete(inodev, ino, dev);
55746 + write_unlock(&gr_inode_lock);
55747 +
55748 + return;
55749 +}
55750 +
55751 +static void
55752 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
55753 + const ino_t newinode, const dev_t newdevice,
55754 + struct acl_subject_label *subj)
55755 +{
55756 + unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
55757 + struct acl_object_label *match;
55758 +
55759 + match = subj->obj_hash[index];
55760 +
55761 + while (match && (match->inode != oldinode ||
55762 + match->device != olddevice ||
55763 + !(match->mode & GR_DELETED)))
55764 + match = match->next;
55765 +
55766 + if (match && (match->inode == oldinode)
55767 + && (match->device == olddevice)
55768 + && (match->mode & GR_DELETED)) {
55769 + if (match->prev == NULL) {
55770 + subj->obj_hash[index] = match->next;
55771 + if (match->next != NULL)
55772 + match->next->prev = NULL;
55773 + } else {
55774 + match->prev->next = match->next;
55775 + if (match->next != NULL)
55776 + match->next->prev = match->prev;
55777 + }
55778 + match->prev = NULL;
55779 + match->next = NULL;
55780 + match->inode = newinode;
55781 + match->device = newdevice;
55782 + match->mode &= ~GR_DELETED;
55783 +
55784 + insert_acl_obj_label(match, subj);
55785 + }
55786 +
55787 + return;
55788 +}
55789 +
55790 +static void
55791 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
55792 + const ino_t newinode, const dev_t newdevice,
55793 + struct acl_role_label *role)
55794 +{
55795 + unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
55796 + struct acl_subject_label *match;
55797 +
55798 + match = role->subj_hash[index];
55799 +
55800 + while (match && (match->inode != oldinode ||
55801 + match->device != olddevice ||
55802 + !(match->mode & GR_DELETED)))
55803 + match = match->next;
55804 +
55805 + if (match && (match->inode == oldinode)
55806 + && (match->device == olddevice)
55807 + && (match->mode & GR_DELETED)) {
55808 + if (match->prev == NULL) {
55809 + role->subj_hash[index] = match->next;
55810 + if (match->next != NULL)
55811 + match->next->prev = NULL;
55812 + } else {
55813 + match->prev->next = match->next;
55814 + if (match->next != NULL)
55815 + match->next->prev = match->prev;
55816 + }
55817 + match->prev = NULL;
55818 + match->next = NULL;
55819 + match->inode = newinode;
55820 + match->device = newdevice;
55821 + match->mode &= ~GR_DELETED;
55822 +
55823 + insert_acl_subj_label(match, role);
55824 + }
55825 +
55826 + return;
55827 +}
55828 +
55829 +static void
55830 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
55831 + const ino_t newinode, const dev_t newdevice)
55832 +{
55833 + unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
55834 + struct inodev_entry *match;
55835 +
55836 + match = inodev_set.i_hash[index];
55837 +
55838 + while (match && (match->nentry->inode != oldinode ||
55839 + match->nentry->device != olddevice || !match->nentry->deleted))
55840 + match = match->next;
55841 +
55842 + if (match && (match->nentry->inode == oldinode)
55843 + && (match->nentry->device == olddevice) &&
55844 + match->nentry->deleted) {
55845 + if (match->prev == NULL) {
55846 + inodev_set.i_hash[index] = match->next;
55847 + if (match->next != NULL)
55848 + match->next->prev = NULL;
55849 + } else {
55850 + match->prev->next = match->next;
55851 + if (match->next != NULL)
55852 + match->next->prev = match->prev;
55853 + }
55854 + match->prev = NULL;
55855 + match->next = NULL;
55856 + match->nentry->inode = newinode;
55857 + match->nentry->device = newdevice;
55858 + match->nentry->deleted = 0;
55859 +
55860 + insert_inodev_entry(match);
55861 + }
55862 +
55863 + return;
55864 +}
55865 +
55866 +static void
55867 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
55868 +{
55869 + struct acl_subject_label *subj;
55870 + struct acl_role_label *role;
55871 + unsigned int x;
55872 +
55873 + FOR_EACH_ROLE_START(role)
55874 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
55875 +
55876 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
55877 + if ((subj->inode == ino) && (subj->device == dev)) {
55878 + subj->inode = ino;
55879 + subj->device = dev;
55880 + }
55881 + /* nested subjects aren't in the role's subj_hash table */
55882 + update_acl_obj_label(matchn->inode, matchn->device,
55883 + ino, dev, subj);
55884 + FOR_EACH_NESTED_SUBJECT_END(subj)
55885 + FOR_EACH_SUBJECT_START(role, subj, x)
55886 + update_acl_obj_label(matchn->inode, matchn->device,
55887 + ino, dev, subj);
55888 + FOR_EACH_SUBJECT_END(subj,x)
55889 + FOR_EACH_ROLE_END(role)
55890 +
55891 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
55892 +
55893 + return;
55894 +}
55895 +
55896 +static void
55897 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
55898 + const struct vfsmount *mnt)
55899 +{
55900 + ino_t ino = dentry->d_inode->i_ino;
55901 + dev_t dev = __get_dev(dentry);
55902 +
55903 + __do_handle_create(matchn, ino, dev);
55904 +
55905 + return;
55906 +}
55907 +
55908 +void
55909 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
55910 +{
55911 + struct name_entry *matchn;
55912 +
55913 + if (unlikely(!(gr_status & GR_READY)))
55914 + return;
55915 +
55916 + preempt_disable();
55917 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
55918 +
55919 + if (unlikely((unsigned long)matchn)) {
55920 + write_lock(&gr_inode_lock);
55921 + do_handle_create(matchn, dentry, mnt);
55922 + write_unlock(&gr_inode_lock);
55923 + }
55924 + preempt_enable();
55925 +
55926 + return;
55927 +}
55928 +
55929 +void
55930 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
55931 +{
55932 + struct name_entry *matchn;
55933 +
55934 + if (unlikely(!(gr_status & GR_READY)))
55935 + return;
55936 +
55937 + preempt_disable();
55938 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
55939 +
55940 + if (unlikely((unsigned long)matchn)) {
55941 + write_lock(&gr_inode_lock);
55942 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
55943 + write_unlock(&gr_inode_lock);
55944 + }
55945 + preempt_enable();
55946 +
55947 + return;
55948 +}
55949 +
55950 +void
55951 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55952 + struct dentry *old_dentry,
55953 + struct dentry *new_dentry,
55954 + struct vfsmount *mnt, const __u8 replace)
55955 +{
55956 + struct name_entry *matchn;
55957 + struct inodev_entry *inodev;
55958 + struct inode *inode = new_dentry->d_inode;
55959 + ino_t old_ino = old_dentry->d_inode->i_ino;
55960 + dev_t old_dev = __get_dev(old_dentry);
55961 +
55962 + /* vfs_rename swaps the name and parent link for old_dentry and
55963 + new_dentry
55964 + at this point, old_dentry has the new name, parent link, and inode
55965 + for the renamed file
55966 + if a file is being replaced by a rename, new_dentry has the inode
55967 + and name for the replaced file
55968 + */
55969 +
55970 + if (unlikely(!(gr_status & GR_READY)))
55971 + return;
55972 +
55973 + preempt_disable();
55974 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
55975 +
55976 + /* we wouldn't have to check d_inode if it weren't for
55977 + NFS silly-renaming
55978 + */
55979 +
55980 + write_lock(&gr_inode_lock);
55981 + if (unlikely(replace && inode)) {
55982 + ino_t new_ino = inode->i_ino;
55983 + dev_t new_dev = __get_dev(new_dentry);
55984 +
55985 + inodev = lookup_inodev_entry(new_ino, new_dev);
55986 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
55987 + do_handle_delete(inodev, new_ino, new_dev);
55988 + }
55989 +
55990 + inodev = lookup_inodev_entry(old_ino, old_dev);
55991 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
55992 + do_handle_delete(inodev, old_ino, old_dev);
55993 +
55994 + if (unlikely((unsigned long)matchn))
55995 + do_handle_create(matchn, old_dentry, mnt);
55996 +
55997 + write_unlock(&gr_inode_lock);
55998 + preempt_enable();
55999 +
56000 + return;
56001 +}
56002 +
56003 +static int
56004 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
56005 + unsigned char **sum)
56006 +{
56007 + struct acl_role_label *r;
56008 + struct role_allowed_ip *ipp;
56009 + struct role_transition *trans;
56010 + unsigned int i;
56011 + int found = 0;
56012 + u32 curr_ip = current->signal->curr_ip;
56013 +
56014 + current->signal->saved_ip = curr_ip;
56015 +
56016 + /* check transition table */
56017 +
56018 + for (trans = current->role->transitions; trans; trans = trans->next) {
56019 + if (!strcmp(rolename, trans->rolename)) {
56020 + found = 1;
56021 + break;
56022 + }
56023 + }
56024 +
56025 + if (!found)
56026 + return 0;
56027 +
56028 + /* handle special roles that do not require authentication
56029 + and check ip */
56030 +
56031 + FOR_EACH_ROLE_START(r)
56032 + if (!strcmp(rolename, r->rolename) &&
56033 + (r->roletype & GR_ROLE_SPECIAL)) {
56034 + found = 0;
56035 + if (r->allowed_ips != NULL) {
56036 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
56037 + if ((ntohl(curr_ip) & ipp->netmask) ==
56038 + (ntohl(ipp->addr) & ipp->netmask))
56039 + found = 1;
56040 + }
56041 + } else
56042 + found = 2;
56043 + if (!found)
56044 + return 0;
56045 +
56046 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
56047 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
56048 + *salt = NULL;
56049 + *sum = NULL;
56050 + return 1;
56051 + }
56052 + }
56053 + FOR_EACH_ROLE_END(r)
56054 +
56055 + for (i = 0; i < num_sprole_pws; i++) {
56056 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
56057 + *salt = acl_special_roles[i]->salt;
56058 + *sum = acl_special_roles[i]->sum;
56059 + return 1;
56060 + }
56061 + }
56062 +
56063 + return 0;
56064 +}
56065 +
56066 +static void
56067 +assign_special_role(char *rolename)
56068 +{
56069 + struct acl_object_label *obj;
56070 + struct acl_role_label *r;
56071 + struct acl_role_label *assigned = NULL;
56072 + struct task_struct *tsk;
56073 + struct file *filp;
56074 +
56075 + FOR_EACH_ROLE_START(r)
56076 + if (!strcmp(rolename, r->rolename) &&
56077 + (r->roletype & GR_ROLE_SPECIAL)) {
56078 + assigned = r;
56079 + break;
56080 + }
56081 + FOR_EACH_ROLE_END(r)
56082 +
56083 + if (!assigned)
56084 + return;
56085 +
56086 + read_lock(&tasklist_lock);
56087 + read_lock(&grsec_exec_file_lock);
56088 +
56089 + tsk = current->real_parent;
56090 + if (tsk == NULL)
56091 + goto out_unlock;
56092 +
56093 + filp = tsk->exec_file;
56094 + if (filp == NULL)
56095 + goto out_unlock;
56096 +
56097 + tsk->is_writable = 0;
56098 +
56099 + tsk->acl_sp_role = 1;
56100 + tsk->acl_role_id = ++acl_sp_role_value;
56101 + tsk->role = assigned;
56102 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
56103 +
56104 + /* ignore additional mmap checks for processes that are writable
56105 + by the default ACL */
56106 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56107 + if (unlikely(obj->mode & GR_WRITE))
56108 + tsk->is_writable = 1;
56109 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
56110 + if (unlikely(obj->mode & GR_WRITE))
56111 + tsk->is_writable = 1;
56112 +
56113 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56114 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
56115 +#endif
56116 +
56117 +out_unlock:
56118 + read_unlock(&grsec_exec_file_lock);
56119 + read_unlock(&tasklist_lock);
56120 + return;
56121 +}
56122 +
56123 +int gr_check_secure_terminal(struct task_struct *task)
56124 +{
56125 + struct task_struct *p, *p2, *p3;
56126 + struct files_struct *files;
56127 + struct fdtable *fdt;
56128 + struct file *our_file = NULL, *file;
56129 + int i;
56130 +
56131 + if (task->signal->tty == NULL)
56132 + return 1;
56133 +
56134 + files = get_files_struct(task);
56135 + if (files != NULL) {
56136 + rcu_read_lock();
56137 + fdt = files_fdtable(files);
56138 + for (i=0; i < fdt->max_fds; i++) {
56139 + file = fcheck_files(files, i);
56140 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
56141 + get_file(file);
56142 + our_file = file;
56143 + }
56144 + }
56145 + rcu_read_unlock();
56146 + put_files_struct(files);
56147 + }
56148 +
56149 + if (our_file == NULL)
56150 + return 1;
56151 +
56152 + read_lock(&tasklist_lock);
56153 + do_each_thread(p2, p) {
56154 + files = get_files_struct(p);
56155 + if (files == NULL ||
56156 + (p->signal && p->signal->tty == task->signal->tty)) {
56157 + if (files != NULL)
56158 + put_files_struct(files);
56159 + continue;
56160 + }
56161 + rcu_read_lock();
56162 + fdt = files_fdtable(files);
56163 + for (i=0; i < fdt->max_fds; i++) {
56164 + file = fcheck_files(files, i);
56165 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
56166 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
56167 + p3 = task;
56168 + while (p3->pid > 0) {
56169 + if (p3 == p)
56170 + break;
56171 + p3 = p3->real_parent;
56172 + }
56173 + if (p3 == p)
56174 + break;
56175 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
56176 + gr_handle_alertkill(p);
56177 + rcu_read_unlock();
56178 + put_files_struct(files);
56179 + read_unlock(&tasklist_lock);
56180 + fput(our_file);
56181 + return 0;
56182 + }
56183 + }
56184 + rcu_read_unlock();
56185 + put_files_struct(files);
56186 + } while_each_thread(p2, p);
56187 + read_unlock(&tasklist_lock);
56188 +
56189 + fput(our_file);
56190 + return 1;
56191 +}
56192 +
56193 +static int gr_rbac_disable(void *unused)
56194 +{
56195 + pax_open_kernel();
56196 + gr_status &= ~GR_READY;
56197 + pax_close_kernel();
56198 +
56199 + return 0;
56200 +}
56201 +
56202 +ssize_t
56203 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
56204 +{
56205 + struct gr_arg_wrapper uwrap;
56206 + unsigned char *sprole_salt = NULL;
56207 + unsigned char *sprole_sum = NULL;
56208 + int error = sizeof (struct gr_arg_wrapper);
56209 + int error2 = 0;
56210 +
56211 + mutex_lock(&gr_dev_mutex);
56212 +
56213 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
56214 + error = -EPERM;
56215 + goto out;
56216 + }
56217 +
56218 + if (count != sizeof (struct gr_arg_wrapper)) {
56219 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
56220 + error = -EINVAL;
56221 + goto out;
56222 + }
56223 +
56224 +
56225 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
56226 + gr_auth_expires = 0;
56227 + gr_auth_attempts = 0;
56228 + }
56229 +
56230 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
56231 + error = -EFAULT;
56232 + goto out;
56233 + }
56234 +
56235 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
56236 + error = -EINVAL;
56237 + goto out;
56238 + }
56239 +
56240 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
56241 + error = -EFAULT;
56242 + goto out;
56243 + }
56244 +
56245 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56246 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56247 + time_after(gr_auth_expires, get_seconds())) {
56248 + error = -EBUSY;
56249 + goto out;
56250 + }
56251 +
56252 + /* if non-root trying to do anything other than use a special role,
56253 + do not attempt authentication, do not count towards authentication
56254 + locking
56255 + */
56256 +
56257 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
56258 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56259 + !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
56260 + error = -EPERM;
56261 + goto out;
56262 + }
56263 +
56264 + /* ensure pw and special role name are null terminated */
56265 +
56266 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
56267 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
56268 +
56269 + /* Okay.
56270 + * We have our enough of the argument structure..(we have yet
56271 + * to copy_from_user the tables themselves) . Copy the tables
56272 + * only if we need them, i.e. for loading operations. */
56273 +
56274 + switch (gr_usermode->mode) {
56275 + case GR_STATUS:
56276 + if (gr_status & GR_READY) {
56277 + error = 1;
56278 + if (!gr_check_secure_terminal(current))
56279 + error = 3;
56280 + } else
56281 + error = 2;
56282 + goto out;
56283 + case GR_SHUTDOWN:
56284 + if ((gr_status & GR_READY)
56285 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56286 + stop_machine(gr_rbac_disable, NULL, NULL);
56287 + free_variables();
56288 + memset(gr_usermode, 0, sizeof (struct gr_arg));
56289 + memset(gr_system_salt, 0, GR_SALT_LEN);
56290 + memset(gr_system_sum, 0, GR_SHA_LEN);
56291 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
56292 + } else if (gr_status & GR_READY) {
56293 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
56294 + error = -EPERM;
56295 + } else {
56296 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
56297 + error = -EAGAIN;
56298 + }
56299 + break;
56300 + case GR_ENABLE:
56301 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
56302 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
56303 + else {
56304 + if (gr_status & GR_READY)
56305 + error = -EAGAIN;
56306 + else
56307 + error = error2;
56308 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
56309 + }
56310 + break;
56311 + case GR_RELOAD:
56312 + if (!(gr_status & GR_READY)) {
56313 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
56314 + error = -EAGAIN;
56315 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56316 + stop_machine(gr_rbac_disable, NULL, NULL);
56317 + free_variables();
56318 + error2 = gracl_init(gr_usermode);
56319 + if (!error2)
56320 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
56321 + else {
56322 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56323 + error = error2;
56324 + }
56325 + } else {
56326 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56327 + error = -EPERM;
56328 + }
56329 + break;
56330 + case GR_SEGVMOD:
56331 + if (unlikely(!(gr_status & GR_READY))) {
56332 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
56333 + error = -EAGAIN;
56334 + break;
56335 + }
56336 +
56337 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56338 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
56339 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
56340 + struct acl_subject_label *segvacl;
56341 + segvacl =
56342 + lookup_acl_subj_label(gr_usermode->segv_inode,
56343 + gr_usermode->segv_device,
56344 + current->role);
56345 + if (segvacl) {
56346 + segvacl->crashes = 0;
56347 + segvacl->expires = 0;
56348 + }
56349 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
56350 + gr_remove_uid(gr_usermode->segv_uid);
56351 + }
56352 + } else {
56353 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
56354 + error = -EPERM;
56355 + }
56356 + break;
56357 + case GR_SPROLE:
56358 + case GR_SPROLEPAM:
56359 + if (unlikely(!(gr_status & GR_READY))) {
56360 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
56361 + error = -EAGAIN;
56362 + break;
56363 + }
56364 +
56365 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
56366 + current->role->expires = 0;
56367 + current->role->auth_attempts = 0;
56368 + }
56369 +
56370 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56371 + time_after(current->role->expires, get_seconds())) {
56372 + error = -EBUSY;
56373 + goto out;
56374 + }
56375 +
56376 + if (lookup_special_role_auth
56377 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
56378 + && ((!sprole_salt && !sprole_sum)
56379 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
56380 + char *p = "";
56381 + assign_special_role(gr_usermode->sp_role);
56382 + read_lock(&tasklist_lock);
56383 + if (current->real_parent)
56384 + p = current->real_parent->role->rolename;
56385 + read_unlock(&tasklist_lock);
56386 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
56387 + p, acl_sp_role_value);
56388 + } else {
56389 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
56390 + error = -EPERM;
56391 + if(!(current->role->auth_attempts++))
56392 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56393 +
56394 + goto out;
56395 + }
56396 + break;
56397 + case GR_UNSPROLE:
56398 + if (unlikely(!(gr_status & GR_READY))) {
56399 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
56400 + error = -EAGAIN;
56401 + break;
56402 + }
56403 +
56404 + if (current->role->roletype & GR_ROLE_SPECIAL) {
56405 + char *p = "";
56406 + int i = 0;
56407 +
56408 + read_lock(&tasklist_lock);
56409 + if (current->real_parent) {
56410 + p = current->real_parent->role->rolename;
56411 + i = current->real_parent->acl_role_id;
56412 + }
56413 + read_unlock(&tasklist_lock);
56414 +
56415 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
56416 + gr_set_acls(1);
56417 + } else {
56418 + error = -EPERM;
56419 + goto out;
56420 + }
56421 + break;
56422 + default:
56423 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
56424 + error = -EINVAL;
56425 + break;
56426 + }
56427 +
56428 + if (error != -EPERM)
56429 + goto out;
56430 +
56431 + if(!(gr_auth_attempts++))
56432 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56433 +
56434 + out:
56435 + mutex_unlock(&gr_dev_mutex);
56436 + return error;
56437 +}
56438 +
56439 +/* must be called with
56440 + rcu_read_lock();
56441 + read_lock(&tasklist_lock);
56442 + read_lock(&grsec_exec_file_lock);
56443 +*/
56444 +int gr_apply_subject_to_task(struct task_struct *task)
56445 +{
56446 + struct acl_object_label *obj;
56447 + char *tmpname;
56448 + struct acl_subject_label *tmpsubj;
56449 + struct file *filp;
56450 + struct name_entry *nmatch;
56451 +
56452 + filp = task->exec_file;
56453 + if (filp == NULL)
56454 + return 0;
56455 +
56456 + /* the following is to apply the correct subject
56457 + on binaries running when the RBAC system
56458 + is enabled, when the binaries have been
56459 + replaced or deleted since their execution
56460 + -----
56461 + when the RBAC system starts, the inode/dev
56462 + from exec_file will be one the RBAC system
56463 + is unaware of. It only knows the inode/dev
56464 + of the present file on disk, or the absence
56465 + of it.
56466 + */
56467 + preempt_disable();
56468 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
56469 +
56470 + nmatch = lookup_name_entry(tmpname);
56471 + preempt_enable();
56472 + tmpsubj = NULL;
56473 + if (nmatch) {
56474 + if (nmatch->deleted)
56475 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
56476 + else
56477 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
56478 + if (tmpsubj != NULL)
56479 + task->acl = tmpsubj;
56480 + }
56481 + if (tmpsubj == NULL)
56482 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
56483 + task->role);
56484 + if (task->acl) {
56485 + task->is_writable = 0;
56486 + /* ignore additional mmap checks for processes that are writable
56487 + by the default ACL */
56488 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56489 + if (unlikely(obj->mode & GR_WRITE))
56490 + task->is_writable = 1;
56491 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
56492 + if (unlikely(obj->mode & GR_WRITE))
56493 + task->is_writable = 1;
56494 +
56495 + gr_set_proc_res(task);
56496 +
56497 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56498 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
56499 +#endif
56500 + } else {
56501 + return 1;
56502 + }
56503 +
56504 + return 0;
56505 +}
56506 +
56507 +int
56508 +gr_set_acls(const int type)
56509 +{
56510 + struct task_struct *task, *task2;
56511 + struct acl_role_label *role = current->role;
56512 + __u16 acl_role_id = current->acl_role_id;
56513 + const struct cred *cred;
56514 + int ret;
56515 +
56516 + rcu_read_lock();
56517 + read_lock(&tasklist_lock);
56518 + read_lock(&grsec_exec_file_lock);
56519 + do_each_thread(task2, task) {
56520 + /* check to see if we're called from the exit handler,
56521 + if so, only replace ACLs that have inherited the admin
56522 + ACL */
56523 +
56524 + if (type && (task->role != role ||
56525 + task->acl_role_id != acl_role_id))
56526 + continue;
56527 +
56528 + task->acl_role_id = 0;
56529 + task->acl_sp_role = 0;
56530 +
56531 + if (task->exec_file) {
56532 + cred = __task_cred(task);
56533 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
56534 + ret = gr_apply_subject_to_task(task);
56535 + if (ret) {
56536 + read_unlock(&grsec_exec_file_lock);
56537 + read_unlock(&tasklist_lock);
56538 + rcu_read_unlock();
56539 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
56540 + return ret;
56541 + }
56542 + } else {
56543 + // it's a kernel process
56544 + task->role = kernel_role;
56545 + task->acl = kernel_role->root_label;
56546 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
56547 + task->acl->mode &= ~GR_PROCFIND;
56548 +#endif
56549 + }
56550 + } while_each_thread(task2, task);
56551 + read_unlock(&grsec_exec_file_lock);
56552 + read_unlock(&tasklist_lock);
56553 + rcu_read_unlock();
56554 +
56555 + return 0;
56556 +}
56557 +
56558 +void
56559 +gr_learn_resource(const struct task_struct *task,
56560 + const int res, const unsigned long wanted, const int gt)
56561 +{
56562 + struct acl_subject_label *acl;
56563 + const struct cred *cred;
56564 +
56565 + if (unlikely((gr_status & GR_READY) &&
56566 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
56567 + goto skip_reslog;
56568 +
56569 +#ifdef CONFIG_GRKERNSEC_RESLOG
56570 + gr_log_resource(task, res, wanted, gt);
56571 +#endif
56572 + skip_reslog:
56573 +
56574 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
56575 + return;
56576 +
56577 + acl = task->acl;
56578 +
56579 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
56580 + !(acl->resmask & (1 << (unsigned short) res))))
56581 + return;
56582 +
56583 + if (wanted >= acl->res[res].rlim_cur) {
56584 + unsigned long res_add;
56585 +
56586 + res_add = wanted;
56587 + switch (res) {
56588 + case RLIMIT_CPU:
56589 + res_add += GR_RLIM_CPU_BUMP;
56590 + break;
56591 + case RLIMIT_FSIZE:
56592 + res_add += GR_RLIM_FSIZE_BUMP;
56593 + break;
56594 + case RLIMIT_DATA:
56595 + res_add += GR_RLIM_DATA_BUMP;
56596 + break;
56597 + case RLIMIT_STACK:
56598 + res_add += GR_RLIM_STACK_BUMP;
56599 + break;
56600 + case RLIMIT_CORE:
56601 + res_add += GR_RLIM_CORE_BUMP;
56602 + break;
56603 + case RLIMIT_RSS:
56604 + res_add += GR_RLIM_RSS_BUMP;
56605 + break;
56606 + case RLIMIT_NPROC:
56607 + res_add += GR_RLIM_NPROC_BUMP;
56608 + break;
56609 + case RLIMIT_NOFILE:
56610 + res_add += GR_RLIM_NOFILE_BUMP;
56611 + break;
56612 + case RLIMIT_MEMLOCK:
56613 + res_add += GR_RLIM_MEMLOCK_BUMP;
56614 + break;
56615 + case RLIMIT_AS:
56616 + res_add += GR_RLIM_AS_BUMP;
56617 + break;
56618 + case RLIMIT_LOCKS:
56619 + res_add += GR_RLIM_LOCKS_BUMP;
56620 + break;
56621 + case RLIMIT_SIGPENDING:
56622 + res_add += GR_RLIM_SIGPENDING_BUMP;
56623 + break;
56624 + case RLIMIT_MSGQUEUE:
56625 + res_add += GR_RLIM_MSGQUEUE_BUMP;
56626 + break;
56627 + case RLIMIT_NICE:
56628 + res_add += GR_RLIM_NICE_BUMP;
56629 + break;
56630 + case RLIMIT_RTPRIO:
56631 + res_add += GR_RLIM_RTPRIO_BUMP;
56632 + break;
56633 + case RLIMIT_RTTIME:
56634 + res_add += GR_RLIM_RTTIME_BUMP;
56635 + break;
56636 + }
56637 +
56638 + acl->res[res].rlim_cur = res_add;
56639 +
56640 + if (wanted > acl->res[res].rlim_max)
56641 + acl->res[res].rlim_max = res_add;
56642 +
56643 + /* only log the subject filename, since resource logging is supported for
56644 + single-subject learning only */
56645 + rcu_read_lock();
56646 + cred = __task_cred(task);
56647 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
56648 + task->role->roletype, cred->uid, cred->gid, acl->filename,
56649 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
56650 + "", (unsigned long) res, &task->signal->saved_ip);
56651 + rcu_read_unlock();
56652 + }
56653 +
56654 + return;
56655 +}
56656 +
56657 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
56658 +void
56659 +pax_set_initial_flags(struct linux_binprm *bprm)
56660 +{
56661 + struct task_struct *task = current;
56662 + struct acl_subject_label *proc;
56663 + unsigned long flags;
56664 +
56665 + if (unlikely(!(gr_status & GR_READY)))
56666 + return;
56667 +
56668 + flags = pax_get_flags(task);
56669 +
56670 + proc = task->acl;
56671 +
56672 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
56673 + flags &= ~MF_PAX_PAGEEXEC;
56674 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
56675 + flags &= ~MF_PAX_SEGMEXEC;
56676 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
56677 + flags &= ~MF_PAX_RANDMMAP;
56678 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
56679 + flags &= ~MF_PAX_EMUTRAMP;
56680 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
56681 + flags &= ~MF_PAX_MPROTECT;
56682 +
56683 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
56684 + flags |= MF_PAX_PAGEEXEC;
56685 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
56686 + flags |= MF_PAX_SEGMEXEC;
56687 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
56688 + flags |= MF_PAX_RANDMMAP;
56689 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
56690 + flags |= MF_PAX_EMUTRAMP;
56691 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
56692 + flags |= MF_PAX_MPROTECT;
56693 +
56694 + pax_set_flags(task, flags);
56695 +
56696 + return;
56697 +}
56698 +#endif
56699 +
56700 +int
56701 +gr_handle_proc_ptrace(struct task_struct *task)
56702 +{
56703 + struct file *filp;
56704 + struct task_struct *tmp = task;
56705 + struct task_struct *curtemp = current;
56706 + __u32 retmode;
56707 +
56708 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
56709 + if (unlikely(!(gr_status & GR_READY)))
56710 + return 0;
56711 +#endif
56712 +
56713 + read_lock(&tasklist_lock);
56714 + read_lock(&grsec_exec_file_lock);
56715 + filp = task->exec_file;
56716 +
56717 + while (tmp->pid > 0) {
56718 + if (tmp == curtemp)
56719 + break;
56720 + tmp = tmp->real_parent;
56721 + }
56722 +
56723 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
56724 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
56725 + read_unlock(&grsec_exec_file_lock);
56726 + read_unlock(&tasklist_lock);
56727 + return 1;
56728 + }
56729 +
56730 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56731 + if (!(gr_status & GR_READY)) {
56732 + read_unlock(&grsec_exec_file_lock);
56733 + read_unlock(&tasklist_lock);
56734 + return 0;
56735 + }
56736 +#endif
56737 +
56738 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
56739 + read_unlock(&grsec_exec_file_lock);
56740 + read_unlock(&tasklist_lock);
56741 +
56742 + if (retmode & GR_NOPTRACE)
56743 + return 1;
56744 +
56745 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
56746 + && (current->acl != task->acl || (current->acl != current->role->root_label
56747 + && current->pid != task->pid)))
56748 + return 1;
56749 +
56750 + return 0;
56751 +}
56752 +
56753 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
56754 +{
56755 + if (unlikely(!(gr_status & GR_READY)))
56756 + return;
56757 +
56758 + if (!(current->role->roletype & GR_ROLE_GOD))
56759 + return;
56760 +
56761 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
56762 + p->role->rolename, gr_task_roletype_to_char(p),
56763 + p->acl->filename);
56764 +}
56765 +
56766 +int
56767 +gr_handle_ptrace(struct task_struct *task, const long request)
56768 +{
56769 + struct task_struct *tmp = task;
56770 + struct task_struct *curtemp = current;
56771 + __u32 retmode;
56772 +
56773 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
56774 + if (unlikely(!(gr_status & GR_READY)))
56775 + return 0;
56776 +#endif
56777 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
56778 + read_lock(&tasklist_lock);
56779 + while (tmp->pid > 0) {
56780 + if (tmp == curtemp)
56781 + break;
56782 + tmp = tmp->real_parent;
56783 + }
56784 +
56785 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
56786 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
56787 + read_unlock(&tasklist_lock);
56788 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56789 + return 1;
56790 + }
56791 + read_unlock(&tasklist_lock);
56792 + }
56793 +
56794 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56795 + if (!(gr_status & GR_READY))
56796 + return 0;
56797 +#endif
56798 +
56799 + read_lock(&grsec_exec_file_lock);
56800 + if (unlikely(!task->exec_file)) {
56801 + read_unlock(&grsec_exec_file_lock);
56802 + return 0;
56803 + }
56804 +
56805 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
56806 + read_unlock(&grsec_exec_file_lock);
56807 +
56808 + if (retmode & GR_NOPTRACE) {
56809 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56810 + return 1;
56811 + }
56812 +
56813 + if (retmode & GR_PTRACERD) {
56814 + switch (request) {
56815 + case PTRACE_SEIZE:
56816 + case PTRACE_POKETEXT:
56817 + case PTRACE_POKEDATA:
56818 + case PTRACE_POKEUSR:
56819 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
56820 + case PTRACE_SETREGS:
56821 + case PTRACE_SETFPREGS:
56822 +#endif
56823 +#ifdef CONFIG_X86
56824 + case PTRACE_SETFPXREGS:
56825 +#endif
56826 +#ifdef CONFIG_ALTIVEC
56827 + case PTRACE_SETVRREGS:
56828 +#endif
56829 + return 1;
56830 + default:
56831 + return 0;
56832 + }
56833 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
56834 + !(current->role->roletype & GR_ROLE_GOD) &&
56835 + (current->acl != task->acl)) {
56836 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
56837 + return 1;
56838 + }
56839 +
56840 + return 0;
56841 +}
56842 +
56843 +static int is_writable_mmap(const struct file *filp)
56844 +{
56845 + struct task_struct *task = current;
56846 + struct acl_object_label *obj, *obj2;
56847 +
56848 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
56849 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
56850 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56851 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
56852 + task->role->root_label);
56853 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
56854 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
56855 + return 1;
56856 + }
56857 + }
56858 + return 0;
56859 +}
56860 +
56861 +int
56862 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
56863 +{
56864 + __u32 mode;
56865 +
56866 + if (unlikely(!file || !(prot & PROT_EXEC)))
56867 + return 1;
56868 +
56869 + if (is_writable_mmap(file))
56870 + return 0;
56871 +
56872 + mode =
56873 + gr_search_file(file->f_path.dentry,
56874 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
56875 + file->f_path.mnt);
56876 +
56877 + if (!gr_tpe_allow(file))
56878 + return 0;
56879 +
56880 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
56881 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
56882 + return 0;
56883 + } else if (unlikely(!(mode & GR_EXEC))) {
56884 + return 0;
56885 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
56886 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
56887 + return 1;
56888 + }
56889 +
56890 + return 1;
56891 +}
56892 +
56893 +int
56894 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56895 +{
56896 + __u32 mode;
56897 +
56898 + if (unlikely(!file || !(prot & PROT_EXEC)))
56899 + return 1;
56900 +
56901 + if (is_writable_mmap(file))
56902 + return 0;
56903 +
56904 + mode =
56905 + gr_search_file(file->f_path.dentry,
56906 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
56907 + file->f_path.mnt);
56908 +
56909 + if (!gr_tpe_allow(file))
56910 + return 0;
56911 +
56912 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
56913 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
56914 + return 0;
56915 + } else if (unlikely(!(mode & GR_EXEC))) {
56916 + return 0;
56917 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
56918 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
56919 + return 1;
56920 + }
56921 +
56922 + return 1;
56923 +}
56924 +
56925 +void
56926 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56927 +{
56928 + unsigned long runtime;
56929 + unsigned long cputime;
56930 + unsigned int wday, cday;
56931 + __u8 whr, chr;
56932 + __u8 wmin, cmin;
56933 + __u8 wsec, csec;
56934 + struct timespec timeval;
56935 +
56936 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
56937 + !(task->acl->mode & GR_PROCACCT)))
56938 + return;
56939 +
56940 + do_posix_clock_monotonic_gettime(&timeval);
56941 + runtime = timeval.tv_sec - task->start_time.tv_sec;
56942 + wday = runtime / (3600 * 24);
56943 + runtime -= wday * (3600 * 24);
56944 + whr = runtime / 3600;
56945 + runtime -= whr * 3600;
56946 + wmin = runtime / 60;
56947 + runtime -= wmin * 60;
56948 + wsec = runtime;
56949 +
56950 + cputime = (task->utime + task->stime) / HZ;
56951 + cday = cputime / (3600 * 24);
56952 + cputime -= cday * (3600 * 24);
56953 + chr = cputime / 3600;
56954 + cputime -= chr * 3600;
56955 + cmin = cputime / 60;
56956 + cputime -= cmin * 60;
56957 + csec = cputime;
56958 +
56959 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
56960 +
56961 + return;
56962 +}
56963 +
56964 +void gr_set_kernel_label(struct task_struct *task)
56965 +{
56966 + if (gr_status & GR_READY) {
56967 + task->role = kernel_role;
56968 + task->acl = kernel_role->root_label;
56969 + }
56970 + return;
56971 +}
56972 +
56973 +#ifdef CONFIG_TASKSTATS
56974 +int gr_is_taskstats_denied(int pid)
56975 +{
56976 + struct task_struct *task;
56977 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56978 + const struct cred *cred;
56979 +#endif
56980 + int ret = 0;
56981 +
56982 + /* restrict taskstats viewing to un-chrooted root users
56983 + who have the 'view' subject flag if the RBAC system is enabled
56984 + */
56985 +
56986 + rcu_read_lock();
56987 + read_lock(&tasklist_lock);
56988 + task = find_task_by_vpid(pid);
56989 + if (task) {
56990 +#ifdef CONFIG_GRKERNSEC_CHROOT
56991 + if (proc_is_chrooted(task))
56992 + ret = -EACCES;
56993 +#endif
56994 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56995 + cred = __task_cred(task);
56996 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56997 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
56998 + ret = -EACCES;
56999 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57000 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, grsec_proc_gid))
57001 + ret = -EACCES;
57002 +#endif
57003 +#endif
57004 + if (gr_status & GR_READY) {
57005 + if (!(task->acl->mode & GR_VIEW))
57006 + ret = -EACCES;
57007 + }
57008 + } else
57009 + ret = -ENOENT;
57010 +
57011 + read_unlock(&tasklist_lock);
57012 + rcu_read_unlock();
57013 +
57014 + return ret;
57015 +}
57016 +#endif
57017 +
57018 +/* AUXV entries are filled via a descendant of search_binary_handler
57019 + after we've already applied the subject for the target
57020 +*/
57021 +int gr_acl_enable_at_secure(void)
57022 +{
57023 + if (unlikely(!(gr_status & GR_READY)))
57024 + return 0;
57025 +
57026 + if (current->acl->mode & GR_ATSECURE)
57027 + return 1;
57028 +
57029 + return 0;
57030 +}
57031 +
57032 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
57033 +{
57034 + struct task_struct *task = current;
57035 + struct dentry *dentry = file->f_path.dentry;
57036 + struct vfsmount *mnt = file->f_path.mnt;
57037 + struct acl_object_label *obj, *tmp;
57038 + struct acl_subject_label *subj;
57039 + unsigned int bufsize;
57040 + int is_not_root;
57041 + char *path;
57042 + dev_t dev = __get_dev(dentry);
57043 +
57044 + if (unlikely(!(gr_status & GR_READY)))
57045 + return 1;
57046 +
57047 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57048 + return 1;
57049 +
57050 + /* ignore Eric Biederman */
57051 + if (IS_PRIVATE(dentry->d_inode))
57052 + return 1;
57053 +
57054 + subj = task->acl;
57055 + read_lock(&gr_inode_lock);
57056 + do {
57057 + obj = lookup_acl_obj_label(ino, dev, subj);
57058 + if (obj != NULL) {
57059 + read_unlock(&gr_inode_lock);
57060 + return (obj->mode & GR_FIND) ? 1 : 0;
57061 + }
57062 + } while ((subj = subj->parent_subject));
57063 + read_unlock(&gr_inode_lock);
57064 +
57065 + /* this is purely an optimization since we're looking for an object
57066 + for the directory we're doing a readdir on
57067 + if it's possible for any globbed object to match the entry we're
57068 + filling into the directory, then the object we find here will be
57069 + an anchor point with attached globbed objects
57070 + */
57071 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
57072 + if (obj->globbed == NULL)
57073 + return (obj->mode & GR_FIND) ? 1 : 0;
57074 +
57075 + is_not_root = ((obj->filename[0] == '/') &&
57076 + (obj->filename[1] == '\0')) ? 0 : 1;
57077 + bufsize = PAGE_SIZE - namelen - is_not_root;
57078 +
57079 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
57080 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
57081 + return 1;
57082 +
57083 + preempt_disable();
57084 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57085 + bufsize);
57086 +
57087 + bufsize = strlen(path);
57088 +
57089 + /* if base is "/", don't append an additional slash */
57090 + if (is_not_root)
57091 + *(path + bufsize) = '/';
57092 + memcpy(path + bufsize + is_not_root, name, namelen);
57093 + *(path + bufsize + namelen + is_not_root) = '\0';
57094 +
57095 + tmp = obj->globbed;
57096 + while (tmp) {
57097 + if (!glob_match(tmp->filename, path)) {
57098 + preempt_enable();
57099 + return (tmp->mode & GR_FIND) ? 1 : 0;
57100 + }
57101 + tmp = tmp->next;
57102 + }
57103 + preempt_enable();
57104 + return (obj->mode & GR_FIND) ? 1 : 0;
57105 +}
57106 +
57107 +void gr_put_exec_file(struct task_struct *task)
57108 +{
57109 + struct file *filp;
57110 +
57111 + write_lock(&grsec_exec_file_lock);
57112 + filp = task->exec_file;
57113 + task->exec_file = NULL;
57114 + write_unlock(&grsec_exec_file_lock);
57115 +
57116 + if (filp)
57117 + fput(filp);
57118 +
57119 + return;
57120 +}
57121 +
57122 +
57123 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
57124 +EXPORT_SYMBOL(gr_acl_is_enabled);
57125 +#endif
57126 +EXPORT_SYMBOL(gr_learn_resource);
57127 +EXPORT_SYMBOL(gr_set_kernel_label);
57128 +#ifdef CONFIG_SECURITY
57129 +EXPORT_SYMBOL(gr_check_user_change);
57130 +EXPORT_SYMBOL(gr_check_group_change);
57131 +#endif
57132 +
57133 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
57134 new file mode 100644
57135 index 0000000..34fefda
57136 --- /dev/null
57137 +++ b/grsecurity/gracl_alloc.c
57138 @@ -0,0 +1,105 @@
57139 +#include <linux/kernel.h>
57140 +#include <linux/mm.h>
57141 +#include <linux/slab.h>
57142 +#include <linux/vmalloc.h>
57143 +#include <linux/gracl.h>
57144 +#include <linux/grsecurity.h>
57145 +
57146 +static unsigned long alloc_stack_next = 1;
57147 +static unsigned long alloc_stack_size = 1;
57148 +static void **alloc_stack;
57149 +
57150 +static __inline__ int
57151 +alloc_pop(void)
57152 +{
57153 + if (alloc_stack_next == 1)
57154 + return 0;
57155 +
57156 + kfree(alloc_stack[alloc_stack_next - 2]);
57157 +
57158 + alloc_stack_next--;
57159 +
57160 + return 1;
57161 +}
57162 +
57163 +static __inline__ int
57164 +alloc_push(void *buf)
57165 +{
57166 + if (alloc_stack_next >= alloc_stack_size)
57167 + return 1;
57168 +
57169 + alloc_stack[alloc_stack_next - 1] = buf;
57170 +
57171 + alloc_stack_next++;
57172 +
57173 + return 0;
57174 +}
57175 +
57176 +void *
57177 +acl_alloc(unsigned long len)
57178 +{
57179 + void *ret = NULL;
57180 +
57181 + if (!len || len > PAGE_SIZE)
57182 + goto out;
57183 +
57184 + ret = kmalloc(len, GFP_KERNEL);
57185 +
57186 + if (ret) {
57187 + if (alloc_push(ret)) {
57188 + kfree(ret);
57189 + ret = NULL;
57190 + }
57191 + }
57192 +
57193 +out:
57194 + return ret;
57195 +}
57196 +
57197 +void *
57198 +acl_alloc_num(unsigned long num, unsigned long len)
57199 +{
57200 + if (!len || (num > (PAGE_SIZE / len)))
57201 + return NULL;
57202 +
57203 + return acl_alloc(num * len);
57204 +}
57205 +
57206 +void
57207 +acl_free_all(void)
57208 +{
57209 + if (gr_acl_is_enabled() || !alloc_stack)
57210 + return;
57211 +
57212 + while (alloc_pop()) ;
57213 +
57214 + if (alloc_stack) {
57215 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
57216 + kfree(alloc_stack);
57217 + else
57218 + vfree(alloc_stack);
57219 + }
57220 +
57221 + alloc_stack = NULL;
57222 + alloc_stack_size = 1;
57223 + alloc_stack_next = 1;
57224 +
57225 + return;
57226 +}
57227 +
57228 +int
57229 +acl_alloc_stack_init(unsigned long size)
57230 +{
57231 + if ((size * sizeof (void *)) <= PAGE_SIZE)
57232 + alloc_stack =
57233 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
57234 + else
57235 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
57236 +
57237 + alloc_stack_size = size;
57238 +
57239 + if (!alloc_stack)
57240 + return 0;
57241 + else
57242 + return 1;
57243 +}
57244 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
57245 new file mode 100644
57246 index 0000000..6d21049
57247 --- /dev/null
57248 +++ b/grsecurity/gracl_cap.c
57249 @@ -0,0 +1,110 @@
57250 +#include <linux/kernel.h>
57251 +#include <linux/module.h>
57252 +#include <linux/sched.h>
57253 +#include <linux/gracl.h>
57254 +#include <linux/grsecurity.h>
57255 +#include <linux/grinternal.h>
57256 +
57257 +extern const char *captab_log[];
57258 +extern int captab_log_entries;
57259 +
57260 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57261 +{
57262 + struct acl_subject_label *curracl;
57263 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57264 + kernel_cap_t cap_audit = __cap_empty_set;
57265 +
57266 + if (!gr_acl_is_enabled())
57267 + return 1;
57268 +
57269 + curracl = task->acl;
57270 +
57271 + cap_drop = curracl->cap_lower;
57272 + cap_mask = curracl->cap_mask;
57273 + cap_audit = curracl->cap_invert_audit;
57274 +
57275 + while ((curracl = curracl->parent_subject)) {
57276 + /* if the cap isn't specified in the current computed mask but is specified in the
57277 + current level subject, and is lowered in the current level subject, then add
57278 + it to the set of dropped capabilities
57279 + otherwise, add the current level subject's mask to the current computed mask
57280 + */
57281 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57282 + cap_raise(cap_mask, cap);
57283 + if (cap_raised(curracl->cap_lower, cap))
57284 + cap_raise(cap_drop, cap);
57285 + if (cap_raised(curracl->cap_invert_audit, cap))
57286 + cap_raise(cap_audit, cap);
57287 + }
57288 + }
57289 +
57290 + if (!cap_raised(cap_drop, cap)) {
57291 + if (cap_raised(cap_audit, cap))
57292 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
57293 + return 1;
57294 + }
57295 +
57296 + curracl = task->acl;
57297 +
57298 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
57299 + && cap_raised(cred->cap_effective, cap)) {
57300 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
57301 + task->role->roletype, cred->uid,
57302 + cred->gid, task->exec_file ?
57303 + gr_to_filename(task->exec_file->f_path.dentry,
57304 + task->exec_file->f_path.mnt) : curracl->filename,
57305 + curracl->filename, 0UL,
57306 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
57307 + return 1;
57308 + }
57309 +
57310 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
57311 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
57312 +
57313 + return 0;
57314 +}
57315 +
57316 +int
57317 +gr_acl_is_capable(const int cap)
57318 +{
57319 + return gr_task_acl_is_capable(current, current_cred(), cap);
57320 +}
57321 +
57322 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
57323 +{
57324 + struct acl_subject_label *curracl;
57325 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57326 +
57327 + if (!gr_acl_is_enabled())
57328 + return 1;
57329 +
57330 + curracl = task->acl;
57331 +
57332 + cap_drop = curracl->cap_lower;
57333 + cap_mask = curracl->cap_mask;
57334 +
57335 + while ((curracl = curracl->parent_subject)) {
57336 + /* if the cap isn't specified in the current computed mask but is specified in the
57337 + current level subject, and is lowered in the current level subject, then add
57338 + it to the set of dropped capabilities
57339 + otherwise, add the current level subject's mask to the current computed mask
57340 + */
57341 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57342 + cap_raise(cap_mask, cap);
57343 + if (cap_raised(curracl->cap_lower, cap))
57344 + cap_raise(cap_drop, cap);
57345 + }
57346 + }
57347 +
57348 + if (!cap_raised(cap_drop, cap))
57349 + return 1;
57350 +
57351 + return 0;
57352 +}
57353 +
57354 +int
57355 +gr_acl_is_capable_nolog(const int cap)
57356 +{
57357 + return gr_task_acl_is_capable_nolog(current, cap);
57358 +}
57359 +
57360 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
57361 new file mode 100644
57362 index 0000000..decb035
57363 --- /dev/null
57364 +++ b/grsecurity/gracl_fs.c
57365 @@ -0,0 +1,437 @@
57366 +#include <linux/kernel.h>
57367 +#include <linux/sched.h>
57368 +#include <linux/types.h>
57369 +#include <linux/fs.h>
57370 +#include <linux/file.h>
57371 +#include <linux/stat.h>
57372 +#include <linux/grsecurity.h>
57373 +#include <linux/grinternal.h>
57374 +#include <linux/gracl.h>
57375 +
57376 +umode_t
57377 +gr_acl_umask(void)
57378 +{
57379 + if (unlikely(!gr_acl_is_enabled()))
57380 + return 0;
57381 +
57382 + return current->role->umask;
57383 +}
57384 +
57385 +__u32
57386 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57387 + const struct vfsmount * mnt)
57388 +{
57389 + __u32 mode;
57390 +
57391 + if (unlikely(!dentry->d_inode))
57392 + return GR_FIND;
57393 +
57394 + mode =
57395 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
57396 +
57397 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
57398 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57399 + return mode;
57400 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
57401 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57402 + return 0;
57403 + } else if (unlikely(!(mode & GR_FIND)))
57404 + return 0;
57405 +
57406 + return GR_FIND;
57407 +}
57408 +
57409 +__u32
57410 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57411 + int acc_mode)
57412 +{
57413 + __u32 reqmode = GR_FIND;
57414 + __u32 mode;
57415 +
57416 + if (unlikely(!dentry->d_inode))
57417 + return reqmode;
57418 +
57419 + if (acc_mode & MAY_APPEND)
57420 + reqmode |= GR_APPEND;
57421 + else if (acc_mode & MAY_WRITE)
57422 + reqmode |= GR_WRITE;
57423 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
57424 + reqmode |= GR_READ;
57425 +
57426 + mode =
57427 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57428 + mnt);
57429 +
57430 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57431 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57432 + reqmode & GR_READ ? " reading" : "",
57433 + reqmode & GR_WRITE ? " writing" : reqmode &
57434 + GR_APPEND ? " appending" : "");
57435 + return reqmode;
57436 + } else
57437 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57438 + {
57439 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57440 + reqmode & GR_READ ? " reading" : "",
57441 + reqmode & GR_WRITE ? " writing" : reqmode &
57442 + GR_APPEND ? " appending" : "");
57443 + return 0;
57444 + } else if (unlikely((mode & reqmode) != reqmode))
57445 + return 0;
57446 +
57447 + return reqmode;
57448 +}
57449 +
57450 +__u32
57451 +gr_acl_handle_creat(const struct dentry * dentry,
57452 + const struct dentry * p_dentry,
57453 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57454 + const int imode)
57455 +{
57456 + __u32 reqmode = GR_WRITE | GR_CREATE;
57457 + __u32 mode;
57458 +
57459 + if (acc_mode & MAY_APPEND)
57460 + reqmode |= GR_APPEND;
57461 + // if a directory was required or the directory already exists, then
57462 + // don't count this open as a read
57463 + if ((acc_mode & MAY_READ) &&
57464 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
57465 + reqmode |= GR_READ;
57466 + if ((open_flags & O_CREAT) &&
57467 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57468 + reqmode |= GR_SETID;
57469 +
57470 + mode =
57471 + gr_check_create(dentry, p_dentry, p_mnt,
57472 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57473 +
57474 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57475 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57476 + reqmode & GR_READ ? " reading" : "",
57477 + reqmode & GR_WRITE ? " writing" : reqmode &
57478 + GR_APPEND ? " appending" : "");
57479 + return reqmode;
57480 + } else
57481 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57482 + {
57483 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57484 + reqmode & GR_READ ? " reading" : "",
57485 + reqmode & GR_WRITE ? " writing" : reqmode &
57486 + GR_APPEND ? " appending" : "");
57487 + return 0;
57488 + } else if (unlikely((mode & reqmode) != reqmode))
57489 + return 0;
57490 +
57491 + return reqmode;
57492 +}
57493 +
57494 +__u32
57495 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
57496 + const int fmode)
57497 +{
57498 + __u32 mode, reqmode = GR_FIND;
57499 +
57500 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
57501 + reqmode |= GR_EXEC;
57502 + if (fmode & S_IWOTH)
57503 + reqmode |= GR_WRITE;
57504 + if (fmode & S_IROTH)
57505 + reqmode |= GR_READ;
57506 +
57507 + mode =
57508 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57509 + mnt);
57510 +
57511 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57512 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57513 + reqmode & GR_READ ? " reading" : "",
57514 + reqmode & GR_WRITE ? " writing" : "",
57515 + reqmode & GR_EXEC ? " executing" : "");
57516 + return reqmode;
57517 + } else
57518 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57519 + {
57520 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57521 + reqmode & GR_READ ? " reading" : "",
57522 + reqmode & GR_WRITE ? " writing" : "",
57523 + reqmode & GR_EXEC ? " executing" : "");
57524 + return 0;
57525 + } else if (unlikely((mode & reqmode) != reqmode))
57526 + return 0;
57527 +
57528 + return reqmode;
57529 +}
57530 +
57531 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
57532 +{
57533 + __u32 mode;
57534 +
57535 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
57536 +
57537 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57538 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
57539 + return mode;
57540 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57541 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
57542 + return 0;
57543 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
57544 + return 0;
57545 +
57546 + return (reqmode);
57547 +}
57548 +
57549 +__u32
57550 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57551 +{
57552 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
57553 +}
57554 +
57555 +__u32
57556 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
57557 +{
57558 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
57559 +}
57560 +
57561 +__u32
57562 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
57563 +{
57564 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
57565 +}
57566 +
57567 +__u32
57568 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
57569 +{
57570 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
57571 +}
57572 +
57573 +__u32
57574 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
57575 + umode_t *modeptr)
57576 +{
57577 + umode_t mode;
57578 +
57579 + *modeptr &= ~gr_acl_umask();
57580 + mode = *modeptr;
57581 +
57582 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
57583 + return 1;
57584 +
57585 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
57586 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
57587 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
57588 + GR_CHMOD_ACL_MSG);
57589 + } else {
57590 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
57591 + }
57592 +}
57593 +
57594 +__u32
57595 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
57596 +{
57597 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
57598 +}
57599 +
57600 +__u32
57601 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
57602 +{
57603 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
57604 +}
57605 +
57606 +__u32
57607 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
57608 +{
57609 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
57610 +}
57611 +
57612 +__u32
57613 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
57614 +{
57615 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
57616 + GR_UNIXCONNECT_ACL_MSG);
57617 +}
57618 +
57619 +/* hardlinks require at minimum create and link permission,
57620 + any additional privilege required is based on the
57621 + privilege of the file being linked to
57622 +*/
57623 +__u32
57624 +gr_acl_handle_link(const struct dentry * new_dentry,
57625 + const struct dentry * parent_dentry,
57626 + const struct vfsmount * parent_mnt,
57627 + const struct dentry * old_dentry,
57628 + const struct vfsmount * old_mnt, const struct filename *to)
57629 +{
57630 + __u32 mode;
57631 + __u32 needmode = GR_CREATE | GR_LINK;
57632 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
57633 +
57634 + mode =
57635 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
57636 + old_mnt);
57637 +
57638 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
57639 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57640 + return mode;
57641 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57642 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57643 + return 0;
57644 + } else if (unlikely((mode & needmode) != needmode))
57645 + return 0;
57646 +
57647 + return 1;
57648 +}
57649 +
57650 +__u32
57651 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57652 + const struct dentry * parent_dentry,
57653 + const struct vfsmount * parent_mnt, const struct filename *from)
57654 +{
57655 + __u32 needmode = GR_WRITE | GR_CREATE;
57656 + __u32 mode;
57657 +
57658 + mode =
57659 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
57660 + GR_CREATE | GR_AUDIT_CREATE |
57661 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
57662 +
57663 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
57664 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57665 + return mode;
57666 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57667 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57668 + return 0;
57669 + } else if (unlikely((mode & needmode) != needmode))
57670 + return 0;
57671 +
57672 + return (GR_WRITE | GR_CREATE);
57673 +}
57674 +
57675 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
57676 +{
57677 + __u32 mode;
57678 +
57679 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57680 +
57681 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57682 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
57683 + return mode;
57684 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57685 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
57686 + return 0;
57687 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
57688 + return 0;
57689 +
57690 + return (reqmode);
57691 +}
57692 +
57693 +__u32
57694 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57695 + const struct dentry * parent_dentry,
57696 + const struct vfsmount * parent_mnt,
57697 + const int mode)
57698 +{
57699 + __u32 reqmode = GR_WRITE | GR_CREATE;
57700 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57701 + reqmode |= GR_SETID;
57702 +
57703 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
57704 + reqmode, GR_MKNOD_ACL_MSG);
57705 +}
57706 +
57707 +__u32
57708 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
57709 + const struct dentry *parent_dentry,
57710 + const struct vfsmount *parent_mnt)
57711 +{
57712 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
57713 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
57714 +}
57715 +
57716 +#define RENAME_CHECK_SUCCESS(old, new) \
57717 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
57718 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
57719 +
57720 +int
57721 +gr_acl_handle_rename(struct dentry *new_dentry,
57722 + struct dentry *parent_dentry,
57723 + const struct vfsmount *parent_mnt,
57724 + struct dentry *old_dentry,
57725 + struct inode *old_parent_inode,
57726 + struct vfsmount *old_mnt, const struct filename *newname)
57727 +{
57728 + __u32 comp1, comp2;
57729 + int error = 0;
57730 +
57731 + if (unlikely(!gr_acl_is_enabled()))
57732 + return 0;
57733 +
57734 + if (!new_dentry->d_inode) {
57735 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
57736 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
57737 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
57738 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
57739 + GR_DELETE | GR_AUDIT_DELETE |
57740 + GR_AUDIT_READ | GR_AUDIT_WRITE |
57741 + GR_SUPPRESS, old_mnt);
57742 + } else {
57743 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
57744 + GR_CREATE | GR_DELETE |
57745 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
57746 + GR_AUDIT_READ | GR_AUDIT_WRITE |
57747 + GR_SUPPRESS, parent_mnt);
57748 + comp2 =
57749 + gr_search_file(old_dentry,
57750 + GR_READ | GR_WRITE | GR_AUDIT_READ |
57751 + GR_DELETE | GR_AUDIT_DELETE |
57752 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
57753 + }
57754 +
57755 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
57756 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
57757 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
57758 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
57759 + && !(comp2 & GR_SUPPRESS)) {
57760 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
57761 + error = -EACCES;
57762 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
57763 + error = -EACCES;
57764 +
57765 + return error;
57766 +}
57767 +
57768 +void
57769 +gr_acl_handle_exit(void)
57770 +{
57771 + u16 id;
57772 + char *rolename;
57773 + struct file *exec_file;
57774 +
57775 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
57776 + !(current->role->roletype & GR_ROLE_PERSIST))) {
57777 + id = current->acl_role_id;
57778 + rolename = current->role->rolename;
57779 + gr_set_acls(1);
57780 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
57781 + }
57782 +
57783 + write_lock(&grsec_exec_file_lock);
57784 + exec_file = current->exec_file;
57785 + current->exec_file = NULL;
57786 + write_unlock(&grsec_exec_file_lock);
57787 +
57788 + if (exec_file)
57789 + fput(exec_file);
57790 +}
57791 +
57792 +int
57793 +gr_acl_handle_procpidmem(const struct task_struct *task)
57794 +{
57795 + if (unlikely(!gr_acl_is_enabled()))
57796 + return 0;
57797 +
57798 + if (task != current && task->acl->mode & GR_PROTPROCFD)
57799 + return -EACCES;
57800 +
57801 + return 0;
57802 +}
57803 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
57804 new file mode 100644
57805 index 0000000..58800a7
57806 --- /dev/null
57807 +++ b/grsecurity/gracl_ip.c
57808 @@ -0,0 +1,384 @@
57809 +#include <linux/kernel.h>
57810 +#include <asm/uaccess.h>
57811 +#include <asm/errno.h>
57812 +#include <net/sock.h>
57813 +#include <linux/file.h>
57814 +#include <linux/fs.h>
57815 +#include <linux/net.h>
57816 +#include <linux/in.h>
57817 +#include <linux/skbuff.h>
57818 +#include <linux/ip.h>
57819 +#include <linux/udp.h>
57820 +#include <linux/types.h>
57821 +#include <linux/sched.h>
57822 +#include <linux/netdevice.h>
57823 +#include <linux/inetdevice.h>
57824 +#include <linux/gracl.h>
57825 +#include <linux/grsecurity.h>
57826 +#include <linux/grinternal.h>
57827 +
57828 +#define GR_BIND 0x01
57829 +#define GR_CONNECT 0x02
57830 +#define GR_INVERT 0x04
57831 +#define GR_BINDOVERRIDE 0x08
57832 +#define GR_CONNECTOVERRIDE 0x10
57833 +#define GR_SOCK_FAMILY 0x20
57834 +
57835 +static const char * gr_protocols[IPPROTO_MAX] = {
57836 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
57837 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
57838 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
57839 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
57840 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
57841 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
57842 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
57843 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
57844 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
57845 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
57846 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
57847 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
57848 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
57849 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
57850 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
57851 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
57852 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
57853 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
57854 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
57855 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
57856 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
57857 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
57858 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
57859 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
57860 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
57861 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
57862 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
57863 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
57864 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
57865 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
57866 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
57867 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
57868 + };
57869 +
57870 +static const char * gr_socktypes[SOCK_MAX] = {
57871 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
57872 + "unknown:7", "unknown:8", "unknown:9", "packet"
57873 + };
57874 +
57875 +static const char * gr_sockfamilies[AF_MAX+1] = {
57876 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
57877 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
57878 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
57879 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
57880 + };
57881 +
57882 +const char *
57883 +gr_proto_to_name(unsigned char proto)
57884 +{
57885 + return gr_protocols[proto];
57886 +}
57887 +
57888 +const char *
57889 +gr_socktype_to_name(unsigned char type)
57890 +{
57891 + return gr_socktypes[type];
57892 +}
57893 +
57894 +const char *
57895 +gr_sockfamily_to_name(unsigned char family)
57896 +{
57897 + return gr_sockfamilies[family];
57898 +}
57899 +
57900 +int
57901 +gr_search_socket(const int domain, const int type, const int protocol)
57902 +{
57903 + struct acl_subject_label *curr;
57904 + const struct cred *cred = current_cred();
57905 +
57906 + if (unlikely(!gr_acl_is_enabled()))
57907 + goto exit;
57908 +
57909 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
57910 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
57911 + goto exit; // let the kernel handle it
57912 +
57913 + curr = current->acl;
57914 +
57915 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
57916 + /* the family is allowed, if this is PF_INET allow it only if
57917 + the extra sock type/protocol checks pass */
57918 + if (domain == PF_INET)
57919 + goto inet_check;
57920 + goto exit;
57921 + } else {
57922 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
57923 + __u32 fakeip = 0;
57924 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
57925 + current->role->roletype, cred->uid,
57926 + cred->gid, current->exec_file ?
57927 + gr_to_filename(current->exec_file->f_path.dentry,
57928 + current->exec_file->f_path.mnt) :
57929 + curr->filename, curr->filename,
57930 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
57931 + &current->signal->saved_ip);
57932 + goto exit;
57933 + }
57934 + goto exit_fail;
57935 + }
57936 +
57937 +inet_check:
57938 + /* the rest of this checking is for IPv4 only */
57939 + if (!curr->ips)
57940 + goto exit;
57941 +
57942 + if ((curr->ip_type & (1 << type)) &&
57943 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
57944 + goto exit;
57945 +
57946 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
57947 + /* we don't place acls on raw sockets , and sometimes
57948 + dgram/ip sockets are opened for ioctl and not
57949 + bind/connect, so we'll fake a bind learn log */
57950 + if (type == SOCK_RAW || type == SOCK_PACKET) {
57951 + __u32 fakeip = 0;
57952 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
57953 + current->role->roletype, cred->uid,
57954 + cred->gid, current->exec_file ?
57955 + gr_to_filename(current->exec_file->f_path.dentry,
57956 + current->exec_file->f_path.mnt) :
57957 + curr->filename, curr->filename,
57958 + &fakeip, 0, type,
57959 + protocol, GR_CONNECT, &current->signal->saved_ip);
57960 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
57961 + __u32 fakeip = 0;
57962 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
57963 + current->role->roletype, cred->uid,
57964 + cred->gid, current->exec_file ?
57965 + gr_to_filename(current->exec_file->f_path.dentry,
57966 + current->exec_file->f_path.mnt) :
57967 + curr->filename, curr->filename,
57968 + &fakeip, 0, type,
57969 + protocol, GR_BIND, &current->signal->saved_ip);
57970 + }
57971 + /* we'll log when they use connect or bind */
57972 + goto exit;
57973 + }
57974 +
57975 +exit_fail:
57976 + if (domain == PF_INET)
57977 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
57978 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
57979 + else
57980 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
57981 + gr_socktype_to_name(type), protocol);
57982 +
57983 + return 0;
57984 +exit:
57985 + return 1;
57986 +}
57987 +
57988 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
57989 +{
57990 + if ((ip->mode & mode) &&
57991 + (ip_port >= ip->low) &&
57992 + (ip_port <= ip->high) &&
57993 + ((ntohl(ip_addr) & our_netmask) ==
57994 + (ntohl(our_addr) & our_netmask))
57995 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
57996 + && (ip->type & (1 << type))) {
57997 + if (ip->mode & GR_INVERT)
57998 + return 2; // specifically denied
57999 + else
58000 + return 1; // allowed
58001 + }
58002 +
58003 + return 0; // not specifically allowed, may continue parsing
58004 +}
58005 +
58006 +static int
58007 +gr_search_connectbind(const int full_mode, struct sock *sk,
58008 + struct sockaddr_in *addr, const int type)
58009 +{
58010 + char iface[IFNAMSIZ] = {0};
58011 + struct acl_subject_label *curr;
58012 + struct acl_ip_label *ip;
58013 + struct inet_sock *isk;
58014 + struct net_device *dev;
58015 + struct in_device *idev;
58016 + unsigned long i;
58017 + int ret;
58018 + int mode = full_mode & (GR_BIND | GR_CONNECT);
58019 + __u32 ip_addr = 0;
58020 + __u32 our_addr;
58021 + __u32 our_netmask;
58022 + char *p;
58023 + __u16 ip_port = 0;
58024 + const struct cred *cred = current_cred();
58025 +
58026 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
58027 + return 0;
58028 +
58029 + curr = current->acl;
58030 + isk = inet_sk(sk);
58031 +
58032 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
58033 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
58034 + addr->sin_addr.s_addr = curr->inaddr_any_override;
58035 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58036 + struct sockaddr_in saddr;
58037 + int err;
58038 +
58039 + saddr.sin_family = AF_INET;
58040 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
58041 + saddr.sin_port = isk->inet_sport;
58042 +
58043 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58044 + if (err)
58045 + return err;
58046 +
58047 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58048 + if (err)
58049 + return err;
58050 + }
58051 +
58052 + if (!curr->ips)
58053 + return 0;
58054 +
58055 + ip_addr = addr->sin_addr.s_addr;
58056 + ip_port = ntohs(addr->sin_port);
58057 +
58058 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58059 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58060 + current->role->roletype, cred->uid,
58061 + cred->gid, current->exec_file ?
58062 + gr_to_filename(current->exec_file->f_path.dentry,
58063 + current->exec_file->f_path.mnt) :
58064 + curr->filename, curr->filename,
58065 + &ip_addr, ip_port, type,
58066 + sk->sk_protocol, mode, &current->signal->saved_ip);
58067 + return 0;
58068 + }
58069 +
58070 + for (i = 0; i < curr->ip_num; i++) {
58071 + ip = *(curr->ips + i);
58072 + if (ip->iface != NULL) {
58073 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
58074 + p = strchr(iface, ':');
58075 + if (p != NULL)
58076 + *p = '\0';
58077 + dev = dev_get_by_name(sock_net(sk), iface);
58078 + if (dev == NULL)
58079 + continue;
58080 + idev = in_dev_get(dev);
58081 + if (idev == NULL) {
58082 + dev_put(dev);
58083 + continue;
58084 + }
58085 + rcu_read_lock();
58086 + for_ifa(idev) {
58087 + if (!strcmp(ip->iface, ifa->ifa_label)) {
58088 + our_addr = ifa->ifa_address;
58089 + our_netmask = 0xffffffff;
58090 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58091 + if (ret == 1) {
58092 + rcu_read_unlock();
58093 + in_dev_put(idev);
58094 + dev_put(dev);
58095 + return 0;
58096 + } else if (ret == 2) {
58097 + rcu_read_unlock();
58098 + in_dev_put(idev);
58099 + dev_put(dev);
58100 + goto denied;
58101 + }
58102 + }
58103 + } endfor_ifa(idev);
58104 + rcu_read_unlock();
58105 + in_dev_put(idev);
58106 + dev_put(dev);
58107 + } else {
58108 + our_addr = ip->addr;
58109 + our_netmask = ip->netmask;
58110 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58111 + if (ret == 1)
58112 + return 0;
58113 + else if (ret == 2)
58114 + goto denied;
58115 + }
58116 + }
58117 +
58118 +denied:
58119 + if (mode == GR_BIND)
58120 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58121 + else if (mode == GR_CONNECT)
58122 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58123 +
58124 + return -EACCES;
58125 +}
58126 +
58127 +int
58128 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
58129 +{
58130 + /* always allow disconnection of dgram sockets with connect */
58131 + if (addr->sin_family == AF_UNSPEC)
58132 + return 0;
58133 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
58134 +}
58135 +
58136 +int
58137 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
58138 +{
58139 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
58140 +}
58141 +
58142 +int gr_search_listen(struct socket *sock)
58143 +{
58144 + struct sock *sk = sock->sk;
58145 + struct sockaddr_in addr;
58146 +
58147 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58148 + addr.sin_port = inet_sk(sk)->inet_sport;
58149 +
58150 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58151 +}
58152 +
58153 +int gr_search_accept(struct socket *sock)
58154 +{
58155 + struct sock *sk = sock->sk;
58156 + struct sockaddr_in addr;
58157 +
58158 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58159 + addr.sin_port = inet_sk(sk)->inet_sport;
58160 +
58161 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58162 +}
58163 +
58164 +int
58165 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
58166 +{
58167 + if (addr)
58168 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
58169 + else {
58170 + struct sockaddr_in sin;
58171 + const struct inet_sock *inet = inet_sk(sk);
58172 +
58173 + sin.sin_addr.s_addr = inet->inet_daddr;
58174 + sin.sin_port = inet->inet_dport;
58175 +
58176 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58177 + }
58178 +}
58179 +
58180 +int
58181 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
58182 +{
58183 + struct sockaddr_in sin;
58184 +
58185 + if (unlikely(skb->len < sizeof (struct udphdr)))
58186 + return 0; // skip this packet
58187 +
58188 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
58189 + sin.sin_port = udp_hdr(skb)->source;
58190 +
58191 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58192 +}
58193 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
58194 new file mode 100644
58195 index 0000000..25f54ef
58196 --- /dev/null
58197 +++ b/grsecurity/gracl_learn.c
58198 @@ -0,0 +1,207 @@
58199 +#include <linux/kernel.h>
58200 +#include <linux/mm.h>
58201 +#include <linux/sched.h>
58202 +#include <linux/poll.h>
58203 +#include <linux/string.h>
58204 +#include <linux/file.h>
58205 +#include <linux/types.h>
58206 +#include <linux/vmalloc.h>
58207 +#include <linux/grinternal.h>
58208 +
58209 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
58210 + size_t count, loff_t *ppos);
58211 +extern int gr_acl_is_enabled(void);
58212 +
58213 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
58214 +static int gr_learn_attached;
58215 +
58216 +/* use a 512k buffer */
58217 +#define LEARN_BUFFER_SIZE (512 * 1024)
58218 +
58219 +static DEFINE_SPINLOCK(gr_learn_lock);
58220 +static DEFINE_MUTEX(gr_learn_user_mutex);
58221 +
58222 +/* we need to maintain two buffers, so that the kernel context of grlearn
58223 + uses a semaphore around the userspace copying, and the other kernel contexts
58224 + use a spinlock when copying into the buffer, since they cannot sleep
58225 +*/
58226 +static char *learn_buffer;
58227 +static char *learn_buffer_user;
58228 +static int learn_buffer_len;
58229 +static int learn_buffer_user_len;
58230 +
58231 +static ssize_t
58232 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
58233 +{
58234 + DECLARE_WAITQUEUE(wait, current);
58235 + ssize_t retval = 0;
58236 +
58237 + add_wait_queue(&learn_wait, &wait);
58238 + set_current_state(TASK_INTERRUPTIBLE);
58239 + do {
58240 + mutex_lock(&gr_learn_user_mutex);
58241 + spin_lock(&gr_learn_lock);
58242 + if (learn_buffer_len)
58243 + break;
58244 + spin_unlock(&gr_learn_lock);
58245 + mutex_unlock(&gr_learn_user_mutex);
58246 + if (file->f_flags & O_NONBLOCK) {
58247 + retval = -EAGAIN;
58248 + goto out;
58249 + }
58250 + if (signal_pending(current)) {
58251 + retval = -ERESTARTSYS;
58252 + goto out;
58253 + }
58254 +
58255 + schedule();
58256 + } while (1);
58257 +
58258 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
58259 + learn_buffer_user_len = learn_buffer_len;
58260 + retval = learn_buffer_len;
58261 + learn_buffer_len = 0;
58262 +
58263 + spin_unlock(&gr_learn_lock);
58264 +
58265 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
58266 + retval = -EFAULT;
58267 +
58268 + mutex_unlock(&gr_learn_user_mutex);
58269 +out:
58270 + set_current_state(TASK_RUNNING);
58271 + remove_wait_queue(&learn_wait, &wait);
58272 + return retval;
58273 +}
58274 +
58275 +static unsigned int
58276 +poll_learn(struct file * file, poll_table * wait)
58277 +{
58278 + poll_wait(file, &learn_wait, wait);
58279 +
58280 + if (learn_buffer_len)
58281 + return (POLLIN | POLLRDNORM);
58282 +
58283 + return 0;
58284 +}
58285 +
58286 +void
58287 +gr_clear_learn_entries(void)
58288 +{
58289 + char *tmp;
58290 +
58291 + mutex_lock(&gr_learn_user_mutex);
58292 + spin_lock(&gr_learn_lock);
58293 + tmp = learn_buffer;
58294 + learn_buffer = NULL;
58295 + spin_unlock(&gr_learn_lock);
58296 + if (tmp)
58297 + vfree(tmp);
58298 + if (learn_buffer_user != NULL) {
58299 + vfree(learn_buffer_user);
58300 + learn_buffer_user = NULL;
58301 + }
58302 + learn_buffer_len = 0;
58303 + mutex_unlock(&gr_learn_user_mutex);
58304 +
58305 + return;
58306 +}
58307 +
58308 +void
58309 +gr_add_learn_entry(const char *fmt, ...)
58310 +{
58311 + va_list args;
58312 + unsigned int len;
58313 +
58314 + if (!gr_learn_attached)
58315 + return;
58316 +
58317 + spin_lock(&gr_learn_lock);
58318 +
58319 + /* leave a gap at the end so we know when it's "full" but don't have to
58320 + compute the exact length of the string we're trying to append
58321 + */
58322 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
58323 + spin_unlock(&gr_learn_lock);
58324 + wake_up_interruptible(&learn_wait);
58325 + return;
58326 + }
58327 + if (learn_buffer == NULL) {
58328 + spin_unlock(&gr_learn_lock);
58329 + return;
58330 + }
58331 +
58332 + va_start(args, fmt);
58333 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
58334 + va_end(args);
58335 +
58336 + learn_buffer_len += len + 1;
58337 +
58338 + spin_unlock(&gr_learn_lock);
58339 + wake_up_interruptible(&learn_wait);
58340 +
58341 + return;
58342 +}
58343 +
58344 +static int
58345 +open_learn(struct inode *inode, struct file *file)
58346 +{
58347 + if (file->f_mode & FMODE_READ && gr_learn_attached)
58348 + return -EBUSY;
58349 + if (file->f_mode & FMODE_READ) {
58350 + int retval = 0;
58351 + mutex_lock(&gr_learn_user_mutex);
58352 + if (learn_buffer == NULL)
58353 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
58354 + if (learn_buffer_user == NULL)
58355 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
58356 + if (learn_buffer == NULL) {
58357 + retval = -ENOMEM;
58358 + goto out_error;
58359 + }
58360 + if (learn_buffer_user == NULL) {
58361 + retval = -ENOMEM;
58362 + goto out_error;
58363 + }
58364 + learn_buffer_len = 0;
58365 + learn_buffer_user_len = 0;
58366 + gr_learn_attached = 1;
58367 +out_error:
58368 + mutex_unlock(&gr_learn_user_mutex);
58369 + return retval;
58370 + }
58371 + return 0;
58372 +}
58373 +
58374 +static int
58375 +close_learn(struct inode *inode, struct file *file)
58376 +{
58377 + if (file->f_mode & FMODE_READ) {
58378 + char *tmp = NULL;
58379 + mutex_lock(&gr_learn_user_mutex);
58380 + spin_lock(&gr_learn_lock);
58381 + tmp = learn_buffer;
58382 + learn_buffer = NULL;
58383 + spin_unlock(&gr_learn_lock);
58384 + if (tmp)
58385 + vfree(tmp);
58386 + if (learn_buffer_user != NULL) {
58387 + vfree(learn_buffer_user);
58388 + learn_buffer_user = NULL;
58389 + }
58390 + learn_buffer_len = 0;
58391 + learn_buffer_user_len = 0;
58392 + gr_learn_attached = 0;
58393 + mutex_unlock(&gr_learn_user_mutex);
58394 + }
58395 +
58396 + return 0;
58397 +}
58398 +
58399 +const struct file_operations grsec_fops = {
58400 + .read = read_learn,
58401 + .write = write_grsec_handler,
58402 + .open = open_learn,
58403 + .release = close_learn,
58404 + .poll = poll_learn,
58405 +};
58406 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
58407 new file mode 100644
58408 index 0000000..39645c9
58409 --- /dev/null
58410 +++ b/grsecurity/gracl_res.c
58411 @@ -0,0 +1,68 @@
58412 +#include <linux/kernel.h>
58413 +#include <linux/sched.h>
58414 +#include <linux/gracl.h>
58415 +#include <linux/grinternal.h>
58416 +
58417 +static const char *restab_log[] = {
58418 + [RLIMIT_CPU] = "RLIMIT_CPU",
58419 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
58420 + [RLIMIT_DATA] = "RLIMIT_DATA",
58421 + [RLIMIT_STACK] = "RLIMIT_STACK",
58422 + [RLIMIT_CORE] = "RLIMIT_CORE",
58423 + [RLIMIT_RSS] = "RLIMIT_RSS",
58424 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
58425 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
58426 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
58427 + [RLIMIT_AS] = "RLIMIT_AS",
58428 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
58429 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
58430 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
58431 + [RLIMIT_NICE] = "RLIMIT_NICE",
58432 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
58433 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
58434 + [GR_CRASH_RES] = "RLIMIT_CRASH"
58435 +};
58436 +
58437 +void
58438 +gr_log_resource(const struct task_struct *task,
58439 + const int res, const unsigned long wanted, const int gt)
58440 +{
58441 + const struct cred *cred;
58442 + unsigned long rlim;
58443 +
58444 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
58445 + return;
58446 +
58447 + // not yet supported resource
58448 + if (unlikely(!restab_log[res]))
58449 + return;
58450 +
58451 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
58452 + rlim = task_rlimit_max(task, res);
58453 + else
58454 + rlim = task_rlimit(task, res);
58455 +
58456 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58457 + return;
58458 +
58459 + rcu_read_lock();
58460 + cred = __task_cred(task);
58461 +
58462 + if (res == RLIMIT_NPROC &&
58463 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
58464 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
58465 + goto out_rcu_unlock;
58466 + else if (res == RLIMIT_MEMLOCK &&
58467 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
58468 + goto out_rcu_unlock;
58469 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
58470 + goto out_rcu_unlock;
58471 + rcu_read_unlock();
58472 +
58473 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58474 +
58475 + return;
58476 +out_rcu_unlock:
58477 + rcu_read_unlock();
58478 + return;
58479 +}
58480 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
58481 new file mode 100644
58482 index 0000000..25197e9
58483 --- /dev/null
58484 +++ b/grsecurity/gracl_segv.c
58485 @@ -0,0 +1,299 @@
58486 +#include <linux/kernel.h>
58487 +#include <linux/mm.h>
58488 +#include <asm/uaccess.h>
58489 +#include <asm/errno.h>
58490 +#include <asm/mman.h>
58491 +#include <net/sock.h>
58492 +#include <linux/file.h>
58493 +#include <linux/fs.h>
58494 +#include <linux/net.h>
58495 +#include <linux/in.h>
58496 +#include <linux/slab.h>
58497 +#include <linux/types.h>
58498 +#include <linux/sched.h>
58499 +#include <linux/timer.h>
58500 +#include <linux/gracl.h>
58501 +#include <linux/grsecurity.h>
58502 +#include <linux/grinternal.h>
58503 +
58504 +static struct crash_uid *uid_set;
58505 +static unsigned short uid_used;
58506 +static DEFINE_SPINLOCK(gr_uid_lock);
58507 +extern rwlock_t gr_inode_lock;
58508 +extern struct acl_subject_label *
58509 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
58510 + struct acl_role_label *role);
58511 +
58512 +#ifdef CONFIG_BTRFS_FS
58513 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
58514 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
58515 +#endif
58516 +
58517 +static inline dev_t __get_dev(const struct dentry *dentry)
58518 +{
58519 +#ifdef CONFIG_BTRFS_FS
58520 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
58521 + return get_btrfs_dev_from_inode(dentry->d_inode);
58522 + else
58523 +#endif
58524 + return dentry->d_inode->i_sb->s_dev;
58525 +}
58526 +
58527 +int
58528 +gr_init_uidset(void)
58529 +{
58530 + uid_set =
58531 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
58532 + uid_used = 0;
58533 +
58534 + return uid_set ? 1 : 0;
58535 +}
58536 +
58537 +void
58538 +gr_free_uidset(void)
58539 +{
58540 + if (uid_set)
58541 + kfree(uid_set);
58542 +
58543 + return;
58544 +}
58545 +
58546 +int
58547 +gr_find_uid(const uid_t uid)
58548 +{
58549 + struct crash_uid *tmp = uid_set;
58550 + uid_t buid;
58551 + int low = 0, high = uid_used - 1, mid;
58552 +
58553 + while (high >= low) {
58554 + mid = (low + high) >> 1;
58555 + buid = tmp[mid].uid;
58556 + if (buid == uid)
58557 + return mid;
58558 + if (buid > uid)
58559 + high = mid - 1;
58560 + if (buid < uid)
58561 + low = mid + 1;
58562 + }
58563 +
58564 + return -1;
58565 +}
58566 +
58567 +static __inline__ void
58568 +gr_insertsort(void)
58569 +{
58570 + unsigned short i, j;
58571 + struct crash_uid index;
58572 +
58573 + for (i = 1; i < uid_used; i++) {
58574 + index = uid_set[i];
58575 + j = i;
58576 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
58577 + uid_set[j] = uid_set[j - 1];
58578 + j--;
58579 + }
58580 + uid_set[j] = index;
58581 + }
58582 +
58583 + return;
58584 +}
58585 +
58586 +static __inline__ void
58587 +gr_insert_uid(const uid_t uid, const unsigned long expires)
58588 +{
58589 + int loc;
58590 +
58591 + if (uid_used == GR_UIDTABLE_MAX)
58592 + return;
58593 +
58594 + loc = gr_find_uid(uid);
58595 +
58596 + if (loc >= 0) {
58597 + uid_set[loc].expires = expires;
58598 + return;
58599 + }
58600 +
58601 + uid_set[uid_used].uid = uid;
58602 + uid_set[uid_used].expires = expires;
58603 + uid_used++;
58604 +
58605 + gr_insertsort();
58606 +
58607 + return;
58608 +}
58609 +
58610 +void
58611 +gr_remove_uid(const unsigned short loc)
58612 +{
58613 + unsigned short i;
58614 +
58615 + for (i = loc + 1; i < uid_used; i++)
58616 + uid_set[i - 1] = uid_set[i];
58617 +
58618 + uid_used--;
58619 +
58620 + return;
58621 +}
58622 +
58623 +int
58624 +gr_check_crash_uid(const uid_t uid)
58625 +{
58626 + int loc;
58627 + int ret = 0;
58628 +
58629 + if (unlikely(!gr_acl_is_enabled()))
58630 + return 0;
58631 +
58632 + spin_lock(&gr_uid_lock);
58633 + loc = gr_find_uid(uid);
58634 +
58635 + if (loc < 0)
58636 + goto out_unlock;
58637 +
58638 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
58639 + gr_remove_uid(loc);
58640 + else
58641 + ret = 1;
58642 +
58643 +out_unlock:
58644 + spin_unlock(&gr_uid_lock);
58645 + return ret;
58646 +}
58647 +
58648 +static __inline__ int
58649 +proc_is_setxid(const struct cred *cred)
58650 +{
58651 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
58652 + !uid_eq(cred->uid, cred->fsuid))
58653 + return 1;
58654 + if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
58655 + !uid_eq(cred->gid, cred->fsgid))
58656 + return 1;
58657 +
58658 + return 0;
58659 +}
58660 +
58661 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
58662 +
58663 +void
58664 +gr_handle_crash(struct task_struct *task, const int sig)
58665 +{
58666 + struct acl_subject_label *curr;
58667 + struct task_struct *tsk, *tsk2;
58668 + const struct cred *cred;
58669 + const struct cred *cred2;
58670 +
58671 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
58672 + return;
58673 +
58674 + if (unlikely(!gr_acl_is_enabled()))
58675 + return;
58676 +
58677 + curr = task->acl;
58678 +
58679 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
58680 + return;
58681 +
58682 + if (time_before_eq(curr->expires, get_seconds())) {
58683 + curr->expires = 0;
58684 + curr->crashes = 0;
58685 + }
58686 +
58687 + curr->crashes++;
58688 +
58689 + if (!curr->expires)
58690 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
58691 +
58692 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
58693 + time_after(curr->expires, get_seconds())) {
58694 + rcu_read_lock();
58695 + cred = __task_cred(task);
58696 + if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
58697 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
58698 + spin_lock(&gr_uid_lock);
58699 + gr_insert_uid(cred->uid, curr->expires);
58700 + spin_unlock(&gr_uid_lock);
58701 + curr->expires = 0;
58702 + curr->crashes = 0;
58703 + read_lock(&tasklist_lock);
58704 + do_each_thread(tsk2, tsk) {
58705 + cred2 = __task_cred(tsk);
58706 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
58707 + gr_fake_force_sig(SIGKILL, tsk);
58708 + } while_each_thread(tsk2, tsk);
58709 + read_unlock(&tasklist_lock);
58710 + } else {
58711 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
58712 + read_lock(&tasklist_lock);
58713 + read_lock(&grsec_exec_file_lock);
58714 + do_each_thread(tsk2, tsk) {
58715 + if (likely(tsk != task)) {
58716 + // if this thread has the same subject as the one that triggered
58717 + // RES_CRASH and it's the same binary, kill it
58718 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
58719 + gr_fake_force_sig(SIGKILL, tsk);
58720 + }
58721 + } while_each_thread(tsk2, tsk);
58722 + read_unlock(&grsec_exec_file_lock);
58723 + read_unlock(&tasklist_lock);
58724 + }
58725 + rcu_read_unlock();
58726 + }
58727 +
58728 + return;
58729 +}
58730 +
58731 +int
58732 +gr_check_crash_exec(const struct file *filp)
58733 +{
58734 + struct acl_subject_label *curr;
58735 +
58736 + if (unlikely(!gr_acl_is_enabled()))
58737 + return 0;
58738 +
58739 + read_lock(&gr_inode_lock);
58740 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
58741 + __get_dev(filp->f_path.dentry),
58742 + current->role);
58743 + read_unlock(&gr_inode_lock);
58744 +
58745 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
58746 + (!curr->crashes && !curr->expires))
58747 + return 0;
58748 +
58749 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
58750 + time_after(curr->expires, get_seconds()))
58751 + return 1;
58752 + else if (time_before_eq(curr->expires, get_seconds())) {
58753 + curr->crashes = 0;
58754 + curr->expires = 0;
58755 + }
58756 +
58757 + return 0;
58758 +}
58759 +
58760 +void
58761 +gr_handle_alertkill(struct task_struct *task)
58762 +{
58763 + struct acl_subject_label *curracl;
58764 + __u32 curr_ip;
58765 + struct task_struct *p, *p2;
58766 +
58767 + if (unlikely(!gr_acl_is_enabled()))
58768 + return;
58769 +
58770 + curracl = task->acl;
58771 + curr_ip = task->signal->curr_ip;
58772 +
58773 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
58774 + read_lock(&tasklist_lock);
58775 + do_each_thread(p2, p) {
58776 + if (p->signal->curr_ip == curr_ip)
58777 + gr_fake_force_sig(SIGKILL, p);
58778 + } while_each_thread(p2, p);
58779 + read_unlock(&tasklist_lock);
58780 + } else if (curracl->mode & GR_KILLPROC)
58781 + gr_fake_force_sig(SIGKILL, task);
58782 +
58783 + return;
58784 +}
58785 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
58786 new file mode 100644
58787 index 0000000..9d83a69
58788 --- /dev/null
58789 +++ b/grsecurity/gracl_shm.c
58790 @@ -0,0 +1,40 @@
58791 +#include <linux/kernel.h>
58792 +#include <linux/mm.h>
58793 +#include <linux/sched.h>
58794 +#include <linux/file.h>
58795 +#include <linux/ipc.h>
58796 +#include <linux/gracl.h>
58797 +#include <linux/grsecurity.h>
58798 +#include <linux/grinternal.h>
58799 +
58800 +int
58801 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58802 + const time_t shm_createtime, const uid_t cuid, const int shmid)
58803 +{
58804 + struct task_struct *task;
58805 +
58806 + if (!gr_acl_is_enabled())
58807 + return 1;
58808 +
58809 + rcu_read_lock();
58810 + read_lock(&tasklist_lock);
58811 +
58812 + task = find_task_by_vpid(shm_cprid);
58813 +
58814 + if (unlikely(!task))
58815 + task = find_task_by_vpid(shm_lapid);
58816 +
58817 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
58818 + (task->pid == shm_lapid)) &&
58819 + (task->acl->mode & GR_PROTSHM) &&
58820 + (task->acl != current->acl))) {
58821 + read_unlock(&tasklist_lock);
58822 + rcu_read_unlock();
58823 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
58824 + return 0;
58825 + }
58826 + read_unlock(&tasklist_lock);
58827 + rcu_read_unlock();
58828 +
58829 + return 1;
58830 +}
58831 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
58832 new file mode 100644
58833 index 0000000..bc0be01
58834 --- /dev/null
58835 +++ b/grsecurity/grsec_chdir.c
58836 @@ -0,0 +1,19 @@
58837 +#include <linux/kernel.h>
58838 +#include <linux/sched.h>
58839 +#include <linux/fs.h>
58840 +#include <linux/file.h>
58841 +#include <linux/grsecurity.h>
58842 +#include <linux/grinternal.h>
58843 +
58844 +void
58845 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
58846 +{
58847 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58848 + if ((grsec_enable_chdir && grsec_enable_group &&
58849 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
58850 + !grsec_enable_group)) {
58851 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
58852 + }
58853 +#endif
58854 + return;
58855 +}
58856 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
58857 new file mode 100644
58858 index 0000000..9807ee2
58859 --- /dev/null
58860 +++ b/grsecurity/grsec_chroot.c
58861 @@ -0,0 +1,368 @@
58862 +#include <linux/kernel.h>
58863 +#include <linux/module.h>
58864 +#include <linux/sched.h>
58865 +#include <linux/file.h>
58866 +#include <linux/fs.h>
58867 +#include <linux/mount.h>
58868 +#include <linux/types.h>
58869 +#include "../fs/mount.h"
58870 +#include <linux/grsecurity.h>
58871 +#include <linux/grinternal.h>
58872 +
58873 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
58874 +{
58875 +#ifdef CONFIG_GRKERNSEC
58876 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
58877 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
58878 + task->gr_is_chrooted = 1;
58879 + else
58880 + task->gr_is_chrooted = 0;
58881 +
58882 + task->gr_chroot_dentry = path->dentry;
58883 +#endif
58884 + return;
58885 +}
58886 +
58887 +void gr_clear_chroot_entries(struct task_struct *task)
58888 +{
58889 +#ifdef CONFIG_GRKERNSEC
58890 + task->gr_is_chrooted = 0;
58891 + task->gr_chroot_dentry = NULL;
58892 +#endif
58893 + return;
58894 +}
58895 +
58896 +int
58897 +gr_handle_chroot_unix(const pid_t pid)
58898 +{
58899 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58900 + struct task_struct *p;
58901 +
58902 + if (unlikely(!grsec_enable_chroot_unix))
58903 + return 1;
58904 +
58905 + if (likely(!proc_is_chrooted(current)))
58906 + return 1;
58907 +
58908 + rcu_read_lock();
58909 + read_lock(&tasklist_lock);
58910 + p = find_task_by_vpid_unrestricted(pid);
58911 + if (unlikely(p && !have_same_root(current, p))) {
58912 + read_unlock(&tasklist_lock);
58913 + rcu_read_unlock();
58914 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
58915 + return 0;
58916 + }
58917 + read_unlock(&tasklist_lock);
58918 + rcu_read_unlock();
58919 +#endif
58920 + return 1;
58921 +}
58922 +
58923 +int
58924 +gr_handle_chroot_nice(void)
58925 +{
58926 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58927 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
58928 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
58929 + return -EPERM;
58930 + }
58931 +#endif
58932 + return 0;
58933 +}
58934 +
58935 +int
58936 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
58937 +{
58938 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58939 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
58940 + && proc_is_chrooted(current)) {
58941 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
58942 + return -EACCES;
58943 + }
58944 +#endif
58945 + return 0;
58946 +}
58947 +
58948 +int
58949 +gr_handle_chroot_rawio(const struct inode *inode)
58950 +{
58951 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58952 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
58953 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
58954 + return 1;
58955 +#endif
58956 + return 0;
58957 +}
58958 +
58959 +int
58960 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
58961 +{
58962 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58963 + struct task_struct *p;
58964 + int ret = 0;
58965 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
58966 + return ret;
58967 +
58968 + read_lock(&tasklist_lock);
58969 + do_each_pid_task(pid, type, p) {
58970 + if (!have_same_root(current, p)) {
58971 + ret = 1;
58972 + goto out;
58973 + }
58974 + } while_each_pid_task(pid, type, p);
58975 +out:
58976 + read_unlock(&tasklist_lock);
58977 + return ret;
58978 +#endif
58979 + return 0;
58980 +}
58981 +
58982 +int
58983 +gr_pid_is_chrooted(struct task_struct *p)
58984 +{
58985 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58986 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
58987 + return 0;
58988 +
58989 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
58990 + !have_same_root(current, p)) {
58991 + return 1;
58992 + }
58993 +#endif
58994 + return 0;
58995 +}
58996 +
58997 +EXPORT_SYMBOL(gr_pid_is_chrooted);
58998 +
58999 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
59000 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
59001 +{
59002 + struct path path, currentroot;
59003 + int ret = 0;
59004 +
59005 + path.dentry = (struct dentry *)u_dentry;
59006 + path.mnt = (struct vfsmount *)u_mnt;
59007 + get_fs_root(current->fs, &currentroot);
59008 + if (path_is_under(&path, &currentroot))
59009 + ret = 1;
59010 + path_put(&currentroot);
59011 +
59012 + return ret;
59013 +}
59014 +#endif
59015 +
59016 +int
59017 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
59018 +{
59019 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59020 + if (!grsec_enable_chroot_fchdir)
59021 + return 1;
59022 +
59023 + if (!proc_is_chrooted(current))
59024 + return 1;
59025 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
59026 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
59027 + return 0;
59028 + }
59029 +#endif
59030 + return 1;
59031 +}
59032 +
59033 +int
59034 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59035 + const time_t shm_createtime)
59036 +{
59037 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59038 + struct task_struct *p;
59039 + time_t starttime;
59040 +
59041 + if (unlikely(!grsec_enable_chroot_shmat))
59042 + return 1;
59043 +
59044 + if (likely(!proc_is_chrooted(current)))
59045 + return 1;
59046 +
59047 + rcu_read_lock();
59048 + read_lock(&tasklist_lock);
59049 +
59050 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
59051 + starttime = p->start_time.tv_sec;
59052 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
59053 + if (have_same_root(current, p)) {
59054 + goto allow;
59055 + } else {
59056 + read_unlock(&tasklist_lock);
59057 + rcu_read_unlock();
59058 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59059 + return 0;
59060 + }
59061 + }
59062 + /* creator exited, pid reuse, fall through to next check */
59063 + }
59064 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
59065 + if (unlikely(!have_same_root(current, p))) {
59066 + read_unlock(&tasklist_lock);
59067 + rcu_read_unlock();
59068 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59069 + return 0;
59070 + }
59071 + }
59072 +
59073 +allow:
59074 + read_unlock(&tasklist_lock);
59075 + rcu_read_unlock();
59076 +#endif
59077 + return 1;
59078 +}
59079 +
59080 +void
59081 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
59082 +{
59083 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59084 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
59085 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
59086 +#endif
59087 + return;
59088 +}
59089 +
59090 +int
59091 +gr_handle_chroot_mknod(const struct dentry *dentry,
59092 + const struct vfsmount *mnt, const int mode)
59093 +{
59094 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59095 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
59096 + proc_is_chrooted(current)) {
59097 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
59098 + return -EPERM;
59099 + }
59100 +#endif
59101 + return 0;
59102 +}
59103 +
59104 +int
59105 +gr_handle_chroot_mount(const struct dentry *dentry,
59106 + const struct vfsmount *mnt, const char *dev_name)
59107 +{
59108 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59109 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
59110 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
59111 + return -EPERM;
59112 + }
59113 +#endif
59114 + return 0;
59115 +}
59116 +
59117 +int
59118 +gr_handle_chroot_pivot(void)
59119 +{
59120 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59121 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
59122 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
59123 + return -EPERM;
59124 + }
59125 +#endif
59126 + return 0;
59127 +}
59128 +
59129 +int
59130 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
59131 +{
59132 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59133 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
59134 + !gr_is_outside_chroot(dentry, mnt)) {
59135 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
59136 + return -EPERM;
59137 + }
59138 +#endif
59139 + return 0;
59140 +}
59141 +
59142 +extern const char *captab_log[];
59143 +extern int captab_log_entries;
59144 +
59145 +int
59146 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59147 +{
59148 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59149 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59150 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59151 + if (cap_raised(chroot_caps, cap)) {
59152 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
59153 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
59154 + }
59155 + return 0;
59156 + }
59157 + }
59158 +#endif
59159 + return 1;
59160 +}
59161 +
59162 +int
59163 +gr_chroot_is_capable(const int cap)
59164 +{
59165 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59166 + return gr_task_chroot_is_capable(current, current_cred(), cap);
59167 +#endif
59168 + return 1;
59169 +}
59170 +
59171 +int
59172 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
59173 +{
59174 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59175 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59176 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59177 + if (cap_raised(chroot_caps, cap)) {
59178 + return 0;
59179 + }
59180 + }
59181 +#endif
59182 + return 1;
59183 +}
59184 +
59185 +int
59186 +gr_chroot_is_capable_nolog(const int cap)
59187 +{
59188 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59189 + return gr_task_chroot_is_capable_nolog(current, cap);
59190 +#endif
59191 + return 1;
59192 +}
59193 +
59194 +int
59195 +gr_handle_chroot_sysctl(const int op)
59196 +{
59197 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59198 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
59199 + proc_is_chrooted(current))
59200 + return -EACCES;
59201 +#endif
59202 + return 0;
59203 +}
59204 +
59205 +void
59206 +gr_handle_chroot_chdir(struct path *path)
59207 +{
59208 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59209 + if (grsec_enable_chroot_chdir)
59210 + set_fs_pwd(current->fs, path);
59211 +#endif
59212 + return;
59213 +}
59214 +
59215 +int
59216 +gr_handle_chroot_chmod(const struct dentry *dentry,
59217 + const struct vfsmount *mnt, const int mode)
59218 +{
59219 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59220 + /* allow chmod +s on directories, but not files */
59221 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
59222 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
59223 + proc_is_chrooted(current)) {
59224 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
59225 + return -EPERM;
59226 + }
59227 +#endif
59228 + return 0;
59229 +}
59230 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
59231 new file mode 100644
59232 index 0000000..7de2055
59233 --- /dev/null
59234 +++ b/grsecurity/grsec_disabled.c
59235 @@ -0,0 +1,442 @@
59236 +#include <linux/kernel.h>
59237 +#include <linux/module.h>
59238 +#include <linux/sched.h>
59239 +#include <linux/file.h>
59240 +#include <linux/fs.h>
59241 +#include <linux/kdev_t.h>
59242 +#include <linux/net.h>
59243 +#include <linux/in.h>
59244 +#include <linux/ip.h>
59245 +#include <linux/skbuff.h>
59246 +#include <linux/sysctl.h>
59247 +
59248 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59249 +void
59250 +pax_set_initial_flags(struct linux_binprm *bprm)
59251 +{
59252 + return;
59253 +}
59254 +#endif
59255 +
59256 +#ifdef CONFIG_SYSCTL
59257 +__u32
59258 +gr_handle_sysctl(const struct ctl_table * table, const int op)
59259 +{
59260 + return 0;
59261 +}
59262 +#endif
59263 +
59264 +#ifdef CONFIG_TASKSTATS
59265 +int gr_is_taskstats_denied(int pid)
59266 +{
59267 + return 0;
59268 +}
59269 +#endif
59270 +
59271 +int
59272 +gr_acl_is_enabled(void)
59273 +{
59274 + return 0;
59275 +}
59276 +
59277 +void
59278 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59279 +{
59280 + return;
59281 +}
59282 +
59283 +int
59284 +gr_handle_rawio(const struct inode *inode)
59285 +{
59286 + return 0;
59287 +}
59288 +
59289 +void
59290 +gr_acl_handle_psacct(struct task_struct *task, const long code)
59291 +{
59292 + return;
59293 +}
59294 +
59295 +int
59296 +gr_handle_ptrace(struct task_struct *task, const long request)
59297 +{
59298 + return 0;
59299 +}
59300 +
59301 +int
59302 +gr_handle_proc_ptrace(struct task_struct *task)
59303 +{
59304 + return 0;
59305 +}
59306 +
59307 +void
59308 +gr_learn_resource(const struct task_struct *task,
59309 + const int res, const unsigned long wanted, const int gt)
59310 +{
59311 + return;
59312 +}
59313 +
59314 +int
59315 +gr_set_acls(const int type)
59316 +{
59317 + return 0;
59318 +}
59319 +
59320 +int
59321 +gr_check_hidden_task(const struct task_struct *tsk)
59322 +{
59323 + return 0;
59324 +}
59325 +
59326 +int
59327 +gr_check_protected_task(const struct task_struct *task)
59328 +{
59329 + return 0;
59330 +}
59331 +
59332 +int
59333 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59334 +{
59335 + return 0;
59336 +}
59337 +
59338 +void
59339 +gr_copy_label(struct task_struct *tsk)
59340 +{
59341 + return;
59342 +}
59343 +
59344 +void
59345 +gr_set_pax_flags(struct task_struct *task)
59346 +{
59347 + return;
59348 +}
59349 +
59350 +int
59351 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59352 + const int unsafe_share)
59353 +{
59354 + return 0;
59355 +}
59356 +
59357 +void
59358 +gr_handle_delete(const ino_t ino, const dev_t dev)
59359 +{
59360 + return;
59361 +}
59362 +
59363 +void
59364 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59365 +{
59366 + return;
59367 +}
59368 +
59369 +void
59370 +gr_handle_crash(struct task_struct *task, const int sig)
59371 +{
59372 + return;
59373 +}
59374 +
59375 +int
59376 +gr_check_crash_exec(const struct file *filp)
59377 +{
59378 + return 0;
59379 +}
59380 +
59381 +int
59382 +gr_check_crash_uid(const uid_t uid)
59383 +{
59384 + return 0;
59385 +}
59386 +
59387 +void
59388 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59389 + struct dentry *old_dentry,
59390 + struct dentry *new_dentry,
59391 + struct vfsmount *mnt, const __u8 replace)
59392 +{
59393 + return;
59394 +}
59395 +
59396 +int
59397 +gr_search_socket(const int family, const int type, const int protocol)
59398 +{
59399 + return 1;
59400 +}
59401 +
59402 +int
59403 +gr_search_connectbind(const int mode, const struct socket *sock,
59404 + const struct sockaddr_in *addr)
59405 +{
59406 + return 0;
59407 +}
59408 +
59409 +void
59410 +gr_handle_alertkill(struct task_struct *task)
59411 +{
59412 + return;
59413 +}
59414 +
59415 +__u32
59416 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
59417 +{
59418 + return 1;
59419 +}
59420 +
59421 +__u32
59422 +gr_acl_handle_hidden_file(const struct dentry * dentry,
59423 + const struct vfsmount * mnt)
59424 +{
59425 + return 1;
59426 +}
59427 +
59428 +__u32
59429 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59430 + int acc_mode)
59431 +{
59432 + return 1;
59433 +}
59434 +
59435 +__u32
59436 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59437 +{
59438 + return 1;
59439 +}
59440 +
59441 +__u32
59442 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
59443 +{
59444 + return 1;
59445 +}
59446 +
59447 +int
59448 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
59449 + unsigned int *vm_flags)
59450 +{
59451 + return 1;
59452 +}
59453 +
59454 +__u32
59455 +gr_acl_handle_truncate(const struct dentry * dentry,
59456 + const struct vfsmount * mnt)
59457 +{
59458 + return 1;
59459 +}
59460 +
59461 +__u32
59462 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
59463 +{
59464 + return 1;
59465 +}
59466 +
59467 +__u32
59468 +gr_acl_handle_access(const struct dentry * dentry,
59469 + const struct vfsmount * mnt, const int fmode)
59470 +{
59471 + return 1;
59472 +}
59473 +
59474 +__u32
59475 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
59476 + umode_t *mode)
59477 +{
59478 + return 1;
59479 +}
59480 +
59481 +__u32
59482 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
59483 +{
59484 + return 1;
59485 +}
59486 +
59487 +__u32
59488 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
59489 +{
59490 + return 1;
59491 +}
59492 +
59493 +void
59494 +grsecurity_init(void)
59495 +{
59496 + return;
59497 +}
59498 +
59499 +umode_t gr_acl_umask(void)
59500 +{
59501 + return 0;
59502 +}
59503 +
59504 +__u32
59505 +gr_acl_handle_mknod(const struct dentry * new_dentry,
59506 + const struct dentry * parent_dentry,
59507 + const struct vfsmount * parent_mnt,
59508 + const int mode)
59509 +{
59510 + return 1;
59511 +}
59512 +
59513 +__u32
59514 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
59515 + const struct dentry * parent_dentry,
59516 + const struct vfsmount * parent_mnt)
59517 +{
59518 + return 1;
59519 +}
59520 +
59521 +__u32
59522 +gr_acl_handle_symlink(const struct dentry * new_dentry,
59523 + const struct dentry * parent_dentry,
59524 + const struct vfsmount * parent_mnt, const struct filename *from)
59525 +{
59526 + return 1;
59527 +}
59528 +
59529 +__u32
59530 +gr_acl_handle_link(const struct dentry * new_dentry,
59531 + const struct dentry * parent_dentry,
59532 + const struct vfsmount * parent_mnt,
59533 + const struct dentry * old_dentry,
59534 + const struct vfsmount * old_mnt, const struct filename *to)
59535 +{
59536 + return 1;
59537 +}
59538 +
59539 +int
59540 +gr_acl_handle_rename(const struct dentry *new_dentry,
59541 + const struct dentry *parent_dentry,
59542 + const struct vfsmount *parent_mnt,
59543 + const struct dentry *old_dentry,
59544 + const struct inode *old_parent_inode,
59545 + const struct vfsmount *old_mnt, const struct filename *newname)
59546 +{
59547 + return 0;
59548 +}
59549 +
59550 +int
59551 +gr_acl_handle_filldir(const struct file *file, const char *name,
59552 + const int namelen, const ino_t ino)
59553 +{
59554 + return 1;
59555 +}
59556 +
59557 +int
59558 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59559 + const time_t shm_createtime, const uid_t cuid, const int shmid)
59560 +{
59561 + return 1;
59562 +}
59563 +
59564 +int
59565 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
59566 +{
59567 + return 0;
59568 +}
59569 +
59570 +int
59571 +gr_search_accept(const struct socket *sock)
59572 +{
59573 + return 0;
59574 +}
59575 +
59576 +int
59577 +gr_search_listen(const struct socket *sock)
59578 +{
59579 + return 0;
59580 +}
59581 +
59582 +int
59583 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
59584 +{
59585 + return 0;
59586 +}
59587 +
59588 +__u32
59589 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
59590 +{
59591 + return 1;
59592 +}
59593 +
59594 +__u32
59595 +gr_acl_handle_creat(const struct dentry * dentry,
59596 + const struct dentry * p_dentry,
59597 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59598 + const int imode)
59599 +{
59600 + return 1;
59601 +}
59602 +
59603 +void
59604 +gr_acl_handle_exit(void)
59605 +{
59606 + return;
59607 +}
59608 +
59609 +int
59610 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59611 +{
59612 + return 1;
59613 +}
59614 +
59615 +void
59616 +gr_set_role_label(const uid_t uid, const gid_t gid)
59617 +{
59618 + return;
59619 +}
59620 +
59621 +int
59622 +gr_acl_handle_procpidmem(const struct task_struct *task)
59623 +{
59624 + return 0;
59625 +}
59626 +
59627 +int
59628 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
59629 +{
59630 + return 0;
59631 +}
59632 +
59633 +int
59634 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
59635 +{
59636 + return 0;
59637 +}
59638 +
59639 +void
59640 +gr_set_kernel_label(struct task_struct *task)
59641 +{
59642 + return;
59643 +}
59644 +
59645 +int
59646 +gr_check_user_change(int real, int effective, int fs)
59647 +{
59648 + return 0;
59649 +}
59650 +
59651 +int
59652 +gr_check_group_change(int real, int effective, int fs)
59653 +{
59654 + return 0;
59655 +}
59656 +
59657 +int gr_acl_enable_at_secure(void)
59658 +{
59659 + return 0;
59660 +}
59661 +
59662 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
59663 +{
59664 + return dentry->d_inode->i_sb->s_dev;
59665 +}
59666 +
59667 +void gr_put_exec_file(struct task_struct *task)
59668 +{
59669 + return;
59670 +}
59671 +
59672 +EXPORT_SYMBOL(gr_learn_resource);
59673 +EXPORT_SYMBOL(gr_set_kernel_label);
59674 +#ifdef CONFIG_SECURITY
59675 +EXPORT_SYMBOL(gr_check_user_change);
59676 +EXPORT_SYMBOL(gr_check_group_change);
59677 +#endif
59678 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
59679 new file mode 100644
59680 index 0000000..abfa971
59681 --- /dev/null
59682 +++ b/grsecurity/grsec_exec.c
59683 @@ -0,0 +1,174 @@
59684 +#include <linux/kernel.h>
59685 +#include <linux/sched.h>
59686 +#include <linux/file.h>
59687 +#include <linux/binfmts.h>
59688 +#include <linux/fs.h>
59689 +#include <linux/types.h>
59690 +#include <linux/grdefs.h>
59691 +#include <linux/grsecurity.h>
59692 +#include <linux/grinternal.h>
59693 +#include <linux/capability.h>
59694 +#include <linux/module.h>
59695 +
59696 +#include <asm/uaccess.h>
59697 +
59698 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59699 +static char gr_exec_arg_buf[132];
59700 +static DEFINE_MUTEX(gr_exec_arg_mutex);
59701 +#endif
59702 +
59703 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
59704 +
59705 +void
59706 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
59707 +{
59708 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59709 + char *grarg = gr_exec_arg_buf;
59710 + unsigned int i, x, execlen = 0;
59711 + char c;
59712 +
59713 + if (!((grsec_enable_execlog && grsec_enable_group &&
59714 + in_group_p(grsec_audit_gid))
59715 + || (grsec_enable_execlog && !grsec_enable_group)))
59716 + return;
59717 +
59718 + mutex_lock(&gr_exec_arg_mutex);
59719 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
59720 +
59721 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
59722 + const char __user *p;
59723 + unsigned int len;
59724 +
59725 + p = get_user_arg_ptr(argv, i);
59726 + if (IS_ERR(p))
59727 + goto log;
59728 +
59729 + len = strnlen_user(p, 128 - execlen);
59730 + if (len > 128 - execlen)
59731 + len = 128 - execlen;
59732 + else if (len > 0)
59733 + len--;
59734 + if (copy_from_user(grarg + execlen, p, len))
59735 + goto log;
59736 +
59737 + /* rewrite unprintable characters */
59738 + for (x = 0; x < len; x++) {
59739 + c = *(grarg + execlen + x);
59740 + if (c < 32 || c > 126)
59741 + *(grarg + execlen + x) = ' ';
59742 + }
59743 +
59744 + execlen += len;
59745 + *(grarg + execlen) = ' ';
59746 + *(grarg + execlen + 1) = '\0';
59747 + execlen++;
59748 + }
59749 +
59750 + log:
59751 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
59752 + bprm->file->f_path.mnt, grarg);
59753 + mutex_unlock(&gr_exec_arg_mutex);
59754 +#endif
59755 + return;
59756 +}
59757 +
59758 +#ifdef CONFIG_GRKERNSEC
59759 +extern int gr_acl_is_capable(const int cap);
59760 +extern int gr_acl_is_capable_nolog(const int cap);
59761 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
59762 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
59763 +extern int gr_chroot_is_capable(const int cap);
59764 +extern int gr_chroot_is_capable_nolog(const int cap);
59765 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
59766 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
59767 +#endif
59768 +
59769 +const char *captab_log[] = {
59770 + "CAP_CHOWN",
59771 + "CAP_DAC_OVERRIDE",
59772 + "CAP_DAC_READ_SEARCH",
59773 + "CAP_FOWNER",
59774 + "CAP_FSETID",
59775 + "CAP_KILL",
59776 + "CAP_SETGID",
59777 + "CAP_SETUID",
59778 + "CAP_SETPCAP",
59779 + "CAP_LINUX_IMMUTABLE",
59780 + "CAP_NET_BIND_SERVICE",
59781 + "CAP_NET_BROADCAST",
59782 + "CAP_NET_ADMIN",
59783 + "CAP_NET_RAW",
59784 + "CAP_IPC_LOCK",
59785 + "CAP_IPC_OWNER",
59786 + "CAP_SYS_MODULE",
59787 + "CAP_SYS_RAWIO",
59788 + "CAP_SYS_CHROOT",
59789 + "CAP_SYS_PTRACE",
59790 + "CAP_SYS_PACCT",
59791 + "CAP_SYS_ADMIN",
59792 + "CAP_SYS_BOOT",
59793 + "CAP_SYS_NICE",
59794 + "CAP_SYS_RESOURCE",
59795 + "CAP_SYS_TIME",
59796 + "CAP_SYS_TTY_CONFIG",
59797 + "CAP_MKNOD",
59798 + "CAP_LEASE",
59799 + "CAP_AUDIT_WRITE",
59800 + "CAP_AUDIT_CONTROL",
59801 + "CAP_SETFCAP",
59802 + "CAP_MAC_OVERRIDE",
59803 + "CAP_MAC_ADMIN",
59804 + "CAP_SYSLOG",
59805 + "CAP_WAKE_ALARM"
59806 +};
59807 +
59808 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
59809 +
59810 +int gr_is_capable(const int cap)
59811 +{
59812 +#ifdef CONFIG_GRKERNSEC
59813 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
59814 + return 1;
59815 + return 0;
59816 +#else
59817 + return 1;
59818 +#endif
59819 +}
59820 +
59821 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59822 +{
59823 +#ifdef CONFIG_GRKERNSEC
59824 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
59825 + return 1;
59826 + return 0;
59827 +#else
59828 + return 1;
59829 +#endif
59830 +}
59831 +
59832 +int gr_is_capable_nolog(const int cap)
59833 +{
59834 +#ifdef CONFIG_GRKERNSEC
59835 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
59836 + return 1;
59837 + return 0;
59838 +#else
59839 + return 1;
59840 +#endif
59841 +}
59842 +
59843 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
59844 +{
59845 +#ifdef CONFIG_GRKERNSEC
59846 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
59847 + return 1;
59848 + return 0;
59849 +#else
59850 + return 1;
59851 +#endif
59852 +}
59853 +
59854 +EXPORT_SYMBOL(gr_is_capable);
59855 +EXPORT_SYMBOL(gr_is_capable_nolog);
59856 +EXPORT_SYMBOL(gr_task_is_capable);
59857 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
59858 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
59859 new file mode 100644
59860 index 0000000..d3ee748
59861 --- /dev/null
59862 +++ b/grsecurity/grsec_fifo.c
59863 @@ -0,0 +1,24 @@
59864 +#include <linux/kernel.h>
59865 +#include <linux/sched.h>
59866 +#include <linux/fs.h>
59867 +#include <linux/file.h>
59868 +#include <linux/grinternal.h>
59869 +
59870 +int
59871 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
59872 + const struct dentry *dir, const int flag, const int acc_mode)
59873 +{
59874 +#ifdef CONFIG_GRKERNSEC_FIFO
59875 + const struct cred *cred = current_cred();
59876 +
59877 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
59878 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
59879 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
59880 + (cred->fsuid != dentry->d_inode->i_uid)) {
59881 + if (!inode_permission(dentry->d_inode, acc_mode))
59882 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
59883 + return -EACCES;
59884 + }
59885 +#endif
59886 + return 0;
59887 +}
59888 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
59889 new file mode 100644
59890 index 0000000..8ca18bf
59891 --- /dev/null
59892 +++ b/grsecurity/grsec_fork.c
59893 @@ -0,0 +1,23 @@
59894 +#include <linux/kernel.h>
59895 +#include <linux/sched.h>
59896 +#include <linux/grsecurity.h>
59897 +#include <linux/grinternal.h>
59898 +#include <linux/errno.h>
59899 +
59900 +void
59901 +gr_log_forkfail(const int retval)
59902 +{
59903 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
59904 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
59905 + switch (retval) {
59906 + case -EAGAIN:
59907 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
59908 + break;
59909 + case -ENOMEM:
59910 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
59911 + break;
59912 + }
59913 + }
59914 +#endif
59915 + return;
59916 +}
59917 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
59918 new file mode 100644
59919 index 0000000..05a6015
59920 --- /dev/null
59921 +++ b/grsecurity/grsec_init.c
59922 @@ -0,0 +1,283 @@
59923 +#include <linux/kernel.h>
59924 +#include <linux/sched.h>
59925 +#include <linux/mm.h>
59926 +#include <linux/gracl.h>
59927 +#include <linux/slab.h>
59928 +#include <linux/vmalloc.h>
59929 +#include <linux/percpu.h>
59930 +#include <linux/module.h>
59931 +
59932 +int grsec_enable_ptrace_readexec;
59933 +int grsec_enable_setxid;
59934 +int grsec_enable_symlinkown;
59935 +int grsec_symlinkown_gid;
59936 +int grsec_enable_brute;
59937 +int grsec_enable_link;
59938 +int grsec_enable_dmesg;
59939 +int grsec_enable_harden_ptrace;
59940 +int grsec_enable_fifo;
59941 +int grsec_enable_execlog;
59942 +int grsec_enable_signal;
59943 +int grsec_enable_forkfail;
59944 +int grsec_enable_audit_ptrace;
59945 +int grsec_enable_time;
59946 +int grsec_enable_audit_textrel;
59947 +int grsec_enable_group;
59948 +int grsec_audit_gid;
59949 +int grsec_enable_chdir;
59950 +int grsec_enable_mount;
59951 +int grsec_enable_rofs;
59952 +int grsec_enable_chroot_findtask;
59953 +int grsec_enable_chroot_mount;
59954 +int grsec_enable_chroot_shmat;
59955 +int grsec_enable_chroot_fchdir;
59956 +int grsec_enable_chroot_double;
59957 +int grsec_enable_chroot_pivot;
59958 +int grsec_enable_chroot_chdir;
59959 +int grsec_enable_chroot_chmod;
59960 +int grsec_enable_chroot_mknod;
59961 +int grsec_enable_chroot_nice;
59962 +int grsec_enable_chroot_execlog;
59963 +int grsec_enable_chroot_caps;
59964 +int grsec_enable_chroot_sysctl;
59965 +int grsec_enable_chroot_unix;
59966 +int grsec_enable_tpe;
59967 +int grsec_tpe_gid;
59968 +int grsec_enable_blackhole;
59969 +#ifdef CONFIG_IPV6_MODULE
59970 +EXPORT_SYMBOL(grsec_enable_blackhole);
59971 +#endif
59972 +int grsec_lastack_retries;
59973 +int grsec_enable_tpe_all;
59974 +int grsec_enable_tpe_invert;
59975 +int grsec_enable_socket_all;
59976 +int grsec_socket_all_gid;
59977 +int grsec_enable_socket_client;
59978 +int grsec_socket_client_gid;
59979 +int grsec_enable_socket_server;
59980 +int grsec_socket_server_gid;
59981 +int grsec_resource_logging;
59982 +int grsec_disable_privio;
59983 +int grsec_enable_log_rwxmaps;
59984 +int grsec_lock;
59985 +
59986 +DEFINE_SPINLOCK(grsec_alert_lock);
59987 +unsigned long grsec_alert_wtime = 0;
59988 +unsigned long grsec_alert_fyet = 0;
59989 +
59990 +DEFINE_SPINLOCK(grsec_audit_lock);
59991 +
59992 +DEFINE_RWLOCK(grsec_exec_file_lock);
59993 +
59994 +char *gr_shared_page[4];
59995 +
59996 +char *gr_alert_log_fmt;
59997 +char *gr_audit_log_fmt;
59998 +char *gr_alert_log_buf;
59999 +char *gr_audit_log_buf;
60000 +
60001 +extern struct gr_arg *gr_usermode;
60002 +extern unsigned char *gr_system_salt;
60003 +extern unsigned char *gr_system_sum;
60004 +
60005 +void __init
60006 +grsecurity_init(void)
60007 +{
60008 + int j;
60009 + /* create the per-cpu shared pages */
60010 +
60011 +#ifdef CONFIG_X86
60012 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
60013 +#endif
60014 +
60015 + for (j = 0; j < 4; j++) {
60016 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
60017 + if (gr_shared_page[j] == NULL) {
60018 + panic("Unable to allocate grsecurity shared page");
60019 + return;
60020 + }
60021 + }
60022 +
60023 + /* allocate log buffers */
60024 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
60025 + if (!gr_alert_log_fmt) {
60026 + panic("Unable to allocate grsecurity alert log format buffer");
60027 + return;
60028 + }
60029 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
60030 + if (!gr_audit_log_fmt) {
60031 + panic("Unable to allocate grsecurity audit log format buffer");
60032 + return;
60033 + }
60034 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60035 + if (!gr_alert_log_buf) {
60036 + panic("Unable to allocate grsecurity alert log buffer");
60037 + return;
60038 + }
60039 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60040 + if (!gr_audit_log_buf) {
60041 + panic("Unable to allocate grsecurity audit log buffer");
60042 + return;
60043 + }
60044 +
60045 + /* allocate memory for authentication structure */
60046 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
60047 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
60048 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
60049 +
60050 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
60051 + panic("Unable to allocate grsecurity authentication structure");
60052 + return;
60053 + }
60054 +
60055 +
60056 +#ifdef CONFIG_GRKERNSEC_IO
60057 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
60058 + grsec_disable_privio = 1;
60059 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60060 + grsec_disable_privio = 1;
60061 +#else
60062 + grsec_disable_privio = 0;
60063 +#endif
60064 +#endif
60065 +
60066 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60067 + /* for backward compatibility, tpe_invert always defaults to on if
60068 + enabled in the kernel
60069 + */
60070 + grsec_enable_tpe_invert = 1;
60071 +#endif
60072 +
60073 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60074 +#ifndef CONFIG_GRKERNSEC_SYSCTL
60075 + grsec_lock = 1;
60076 +#endif
60077 +
60078 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60079 + grsec_enable_audit_textrel = 1;
60080 +#endif
60081 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60082 + grsec_enable_log_rwxmaps = 1;
60083 +#endif
60084 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60085 + grsec_enable_group = 1;
60086 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
60087 +#endif
60088 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60089 + grsec_enable_ptrace_readexec = 1;
60090 +#endif
60091 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60092 + grsec_enable_chdir = 1;
60093 +#endif
60094 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60095 + grsec_enable_harden_ptrace = 1;
60096 +#endif
60097 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60098 + grsec_enable_mount = 1;
60099 +#endif
60100 +#ifdef CONFIG_GRKERNSEC_LINK
60101 + grsec_enable_link = 1;
60102 +#endif
60103 +#ifdef CONFIG_GRKERNSEC_BRUTE
60104 + grsec_enable_brute = 1;
60105 +#endif
60106 +#ifdef CONFIG_GRKERNSEC_DMESG
60107 + grsec_enable_dmesg = 1;
60108 +#endif
60109 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
60110 + grsec_enable_blackhole = 1;
60111 + grsec_lastack_retries = 4;
60112 +#endif
60113 +#ifdef CONFIG_GRKERNSEC_FIFO
60114 + grsec_enable_fifo = 1;
60115 +#endif
60116 +#ifdef CONFIG_GRKERNSEC_EXECLOG
60117 + grsec_enable_execlog = 1;
60118 +#endif
60119 +#ifdef CONFIG_GRKERNSEC_SETXID
60120 + grsec_enable_setxid = 1;
60121 +#endif
60122 +#ifdef CONFIG_GRKERNSEC_SIGNAL
60123 + grsec_enable_signal = 1;
60124 +#endif
60125 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
60126 + grsec_enable_forkfail = 1;
60127 +#endif
60128 +#ifdef CONFIG_GRKERNSEC_TIME
60129 + grsec_enable_time = 1;
60130 +#endif
60131 +#ifdef CONFIG_GRKERNSEC_RESLOG
60132 + grsec_resource_logging = 1;
60133 +#endif
60134 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60135 + grsec_enable_chroot_findtask = 1;
60136 +#endif
60137 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
60138 + grsec_enable_chroot_unix = 1;
60139 +#endif
60140 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
60141 + grsec_enable_chroot_mount = 1;
60142 +#endif
60143 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
60144 + grsec_enable_chroot_fchdir = 1;
60145 +#endif
60146 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
60147 + grsec_enable_chroot_shmat = 1;
60148 +#endif
60149 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60150 + grsec_enable_audit_ptrace = 1;
60151 +#endif
60152 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
60153 + grsec_enable_chroot_double = 1;
60154 +#endif
60155 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
60156 + grsec_enable_chroot_pivot = 1;
60157 +#endif
60158 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
60159 + grsec_enable_chroot_chdir = 1;
60160 +#endif
60161 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
60162 + grsec_enable_chroot_chmod = 1;
60163 +#endif
60164 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
60165 + grsec_enable_chroot_mknod = 1;
60166 +#endif
60167 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60168 + grsec_enable_chroot_nice = 1;
60169 +#endif
60170 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
60171 + grsec_enable_chroot_execlog = 1;
60172 +#endif
60173 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60174 + grsec_enable_chroot_caps = 1;
60175 +#endif
60176 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
60177 + grsec_enable_chroot_sysctl = 1;
60178 +#endif
60179 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60180 + grsec_enable_symlinkown = 1;
60181 + grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
60182 +#endif
60183 +#ifdef CONFIG_GRKERNSEC_TPE
60184 + grsec_enable_tpe = 1;
60185 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
60186 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
60187 + grsec_enable_tpe_all = 1;
60188 +#endif
60189 +#endif
60190 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60191 + grsec_enable_socket_all = 1;
60192 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
60193 +#endif
60194 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60195 + grsec_enable_socket_client = 1;
60196 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
60197 +#endif
60198 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60199 + grsec_enable_socket_server = 1;
60200 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
60201 +#endif
60202 +#endif
60203 +
60204 + return;
60205 +}
60206 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
60207 new file mode 100644
60208 index 0000000..6095407
60209 --- /dev/null
60210 +++ b/grsecurity/grsec_link.c
60211 @@ -0,0 +1,58 @@
60212 +#include <linux/kernel.h>
60213 +#include <linux/sched.h>
60214 +#include <linux/fs.h>
60215 +#include <linux/file.h>
60216 +#include <linux/grinternal.h>
60217 +
60218 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
60219 +{
60220 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60221 + const struct inode *link_inode = link->dentry->d_inode;
60222 +
60223 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
60224 + /* ignore root-owned links, e.g. /proc/self */
60225 + !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
60226 + !uid_eq(link_inode->i_uid, target->i_uid)) {
60227 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
60228 + return 1;
60229 + }
60230 +#endif
60231 + return 0;
60232 +}
60233 +
60234 +int
60235 +gr_handle_follow_link(const struct inode *parent,
60236 + const struct inode *inode,
60237 + const struct dentry *dentry, const struct vfsmount *mnt)
60238 +{
60239 +#ifdef CONFIG_GRKERNSEC_LINK
60240 + const struct cred *cred = current_cred();
60241 +
60242 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
60243 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
60244 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
60245 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
60246 + return -EACCES;
60247 + }
60248 +#endif
60249 + return 0;
60250 +}
60251 +
60252 +int
60253 +gr_handle_hardlink(const struct dentry *dentry,
60254 + const struct vfsmount *mnt,
60255 + struct inode *inode, const int mode, const struct filename *to)
60256 +{
60257 +#ifdef CONFIG_GRKERNSEC_LINK
60258 + const struct cred *cred = current_cred();
60259 +
60260 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
60261 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
60262 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
60263 + !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
60264 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
60265 + return -EPERM;
60266 + }
60267 +#endif
60268 + return 0;
60269 +}
60270 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
60271 new file mode 100644
60272 index 0000000..a45d2e9
60273 --- /dev/null
60274 +++ b/grsecurity/grsec_log.c
60275 @@ -0,0 +1,322 @@
60276 +#include <linux/kernel.h>
60277 +#include <linux/sched.h>
60278 +#include <linux/file.h>
60279 +#include <linux/tty.h>
60280 +#include <linux/fs.h>
60281 +#include <linux/grinternal.h>
60282 +
60283 +#ifdef CONFIG_TREE_PREEMPT_RCU
60284 +#define DISABLE_PREEMPT() preempt_disable()
60285 +#define ENABLE_PREEMPT() preempt_enable()
60286 +#else
60287 +#define DISABLE_PREEMPT()
60288 +#define ENABLE_PREEMPT()
60289 +#endif
60290 +
60291 +#define BEGIN_LOCKS(x) \
60292 + DISABLE_PREEMPT(); \
60293 + rcu_read_lock(); \
60294 + read_lock(&tasklist_lock); \
60295 + read_lock(&grsec_exec_file_lock); \
60296 + if (x != GR_DO_AUDIT) \
60297 + spin_lock(&grsec_alert_lock); \
60298 + else \
60299 + spin_lock(&grsec_audit_lock)
60300 +
60301 +#define END_LOCKS(x) \
60302 + if (x != GR_DO_AUDIT) \
60303 + spin_unlock(&grsec_alert_lock); \
60304 + else \
60305 + spin_unlock(&grsec_audit_lock); \
60306 + read_unlock(&grsec_exec_file_lock); \
60307 + read_unlock(&tasklist_lock); \
60308 + rcu_read_unlock(); \
60309 + ENABLE_PREEMPT(); \
60310 + if (x == GR_DONT_AUDIT) \
60311 + gr_handle_alertkill(current)
60312 +
60313 +enum {
60314 + FLOODING,
60315 + NO_FLOODING
60316 +};
60317 +
60318 +extern char *gr_alert_log_fmt;
60319 +extern char *gr_audit_log_fmt;
60320 +extern char *gr_alert_log_buf;
60321 +extern char *gr_audit_log_buf;
60322 +
60323 +static int gr_log_start(int audit)
60324 +{
60325 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
60326 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
60327 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60328 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
60329 + unsigned long curr_secs = get_seconds();
60330 +
60331 + if (audit == GR_DO_AUDIT)
60332 + goto set_fmt;
60333 +
60334 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
60335 + grsec_alert_wtime = curr_secs;
60336 + grsec_alert_fyet = 0;
60337 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
60338 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
60339 + grsec_alert_fyet++;
60340 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
60341 + grsec_alert_wtime = curr_secs;
60342 + grsec_alert_fyet++;
60343 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
60344 + return FLOODING;
60345 + }
60346 + else return FLOODING;
60347 +
60348 +set_fmt:
60349 +#endif
60350 + memset(buf, 0, PAGE_SIZE);
60351 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
60352 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
60353 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60354 + } else if (current->signal->curr_ip) {
60355 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
60356 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
60357 + } else if (gr_acl_is_enabled()) {
60358 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
60359 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60360 + } else {
60361 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
60362 + strcpy(buf, fmt);
60363 + }
60364 +
60365 + return NO_FLOODING;
60366 +}
60367 +
60368 +static void gr_log_middle(int audit, const char *msg, va_list ap)
60369 + __attribute__ ((format (printf, 2, 0)));
60370 +
60371 +static void gr_log_middle(int audit, const char *msg, va_list ap)
60372 +{
60373 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60374 + unsigned int len = strlen(buf);
60375 +
60376 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60377 +
60378 + return;
60379 +}
60380 +
60381 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
60382 + __attribute__ ((format (printf, 2, 3)));
60383 +
60384 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
60385 +{
60386 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60387 + unsigned int len = strlen(buf);
60388 + va_list ap;
60389 +
60390 + va_start(ap, msg);
60391 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60392 + va_end(ap);
60393 +
60394 + return;
60395 +}
60396 +
60397 +static void gr_log_end(int audit, int append_default)
60398 +{
60399 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60400 +
60401 + if (append_default) {
60402 + unsigned int len = strlen(buf);
60403 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
60404 + }
60405 +
60406 + printk("%s\n", buf);
60407 +
60408 + return;
60409 +}
60410 +
60411 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
60412 +{
60413 + int logtype;
60414 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
60415 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
60416 + void *voidptr = NULL;
60417 + int num1 = 0, num2 = 0;
60418 + unsigned long ulong1 = 0, ulong2 = 0;
60419 + struct dentry *dentry = NULL;
60420 + struct vfsmount *mnt = NULL;
60421 + struct file *file = NULL;
60422 + struct task_struct *task = NULL;
60423 + const struct cred *cred, *pcred;
60424 + va_list ap;
60425 +
60426 + BEGIN_LOCKS(audit);
60427 + logtype = gr_log_start(audit);
60428 + if (logtype == FLOODING) {
60429 + END_LOCKS(audit);
60430 + return;
60431 + }
60432 + va_start(ap, argtypes);
60433 + switch (argtypes) {
60434 + case GR_TTYSNIFF:
60435 + task = va_arg(ap, struct task_struct *);
60436 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
60437 + break;
60438 + case GR_SYSCTL_HIDDEN:
60439 + str1 = va_arg(ap, char *);
60440 + gr_log_middle_varargs(audit, msg, result, str1);
60441 + break;
60442 + case GR_RBAC:
60443 + dentry = va_arg(ap, struct dentry *);
60444 + mnt = va_arg(ap, struct vfsmount *);
60445 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
60446 + break;
60447 + case GR_RBAC_STR:
60448 + dentry = va_arg(ap, struct dentry *);
60449 + mnt = va_arg(ap, struct vfsmount *);
60450 + str1 = va_arg(ap, char *);
60451 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
60452 + break;
60453 + case GR_STR_RBAC:
60454 + str1 = va_arg(ap, char *);
60455 + dentry = va_arg(ap, struct dentry *);
60456 + mnt = va_arg(ap, struct vfsmount *);
60457 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
60458 + break;
60459 + case GR_RBAC_MODE2:
60460 + dentry = va_arg(ap, struct dentry *);
60461 + mnt = va_arg(ap, struct vfsmount *);
60462 + str1 = va_arg(ap, char *);
60463 + str2 = va_arg(ap, char *);
60464 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
60465 + break;
60466 + case GR_RBAC_MODE3:
60467 + dentry = va_arg(ap, struct dentry *);
60468 + mnt = va_arg(ap, struct vfsmount *);
60469 + str1 = va_arg(ap, char *);
60470 + str2 = va_arg(ap, char *);
60471 + str3 = va_arg(ap, char *);
60472 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
60473 + break;
60474 + case GR_FILENAME:
60475 + dentry = va_arg(ap, struct dentry *);
60476 + mnt = va_arg(ap, struct vfsmount *);
60477 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
60478 + break;
60479 + case GR_STR_FILENAME:
60480 + str1 = va_arg(ap, char *);
60481 + dentry = va_arg(ap, struct dentry *);
60482 + mnt = va_arg(ap, struct vfsmount *);
60483 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
60484 + break;
60485 + case GR_FILENAME_STR:
60486 + dentry = va_arg(ap, struct dentry *);
60487 + mnt = va_arg(ap, struct vfsmount *);
60488 + str1 = va_arg(ap, char *);
60489 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
60490 + break;
60491 + case GR_FILENAME_TWO_INT:
60492 + dentry = va_arg(ap, struct dentry *);
60493 + mnt = va_arg(ap, struct vfsmount *);
60494 + num1 = va_arg(ap, int);
60495 + num2 = va_arg(ap, int);
60496 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
60497 + break;
60498 + case GR_FILENAME_TWO_INT_STR:
60499 + dentry = va_arg(ap, struct dentry *);
60500 + mnt = va_arg(ap, struct vfsmount *);
60501 + num1 = va_arg(ap, int);
60502 + num2 = va_arg(ap, int);
60503 + str1 = va_arg(ap, char *);
60504 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
60505 + break;
60506 + case GR_TEXTREL:
60507 + file = va_arg(ap, struct file *);
60508 + ulong1 = va_arg(ap, unsigned long);
60509 + ulong2 = va_arg(ap, unsigned long);
60510 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
60511 + break;
60512 + case GR_PTRACE:
60513 + task = va_arg(ap, struct task_struct *);
60514 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
60515 + break;
60516 + case GR_RESOURCE:
60517 + task = va_arg(ap, struct task_struct *);
60518 + cred = __task_cred(task);
60519 + pcred = __task_cred(task->real_parent);
60520 + ulong1 = va_arg(ap, unsigned long);
60521 + str1 = va_arg(ap, char *);
60522 + ulong2 = va_arg(ap, unsigned long);
60523 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60524 + break;
60525 + case GR_CAP:
60526 + task = va_arg(ap, struct task_struct *);
60527 + cred = __task_cred(task);
60528 + pcred = __task_cred(task->real_parent);
60529 + str1 = va_arg(ap, char *);
60530 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60531 + break;
60532 + case GR_SIG:
60533 + str1 = va_arg(ap, char *);
60534 + voidptr = va_arg(ap, void *);
60535 + gr_log_middle_varargs(audit, msg, str1, voidptr);
60536 + break;
60537 + case GR_SIG2:
60538 + task = va_arg(ap, struct task_struct *);
60539 + cred = __task_cred(task);
60540 + pcred = __task_cred(task->real_parent);
60541 + num1 = va_arg(ap, int);
60542 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60543 + break;
60544 + case GR_CRASH1:
60545 + task = va_arg(ap, struct task_struct *);
60546 + cred = __task_cred(task);
60547 + pcred = __task_cred(task->real_parent);
60548 + ulong1 = va_arg(ap, unsigned long);
60549 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
60550 + break;
60551 + case GR_CRASH2:
60552 + task = va_arg(ap, struct task_struct *);
60553 + cred = __task_cred(task);
60554 + pcred = __task_cred(task->real_parent);
60555 + ulong1 = va_arg(ap, unsigned long);
60556 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
60557 + break;
60558 + case GR_RWXMAP:
60559 + file = va_arg(ap, struct file *);
60560 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
60561 + break;
60562 + case GR_PSACCT:
60563 + {
60564 + unsigned int wday, cday;
60565 + __u8 whr, chr;
60566 + __u8 wmin, cmin;
60567 + __u8 wsec, csec;
60568 + char cur_tty[64] = { 0 };
60569 + char parent_tty[64] = { 0 };
60570 +
60571 + task = va_arg(ap, struct task_struct *);
60572 + wday = va_arg(ap, unsigned int);
60573 + cday = va_arg(ap, unsigned int);
60574 + whr = va_arg(ap, int);
60575 + chr = va_arg(ap, int);
60576 + wmin = va_arg(ap, int);
60577 + cmin = va_arg(ap, int);
60578 + wsec = va_arg(ap, int);
60579 + csec = va_arg(ap, int);
60580 + ulong1 = va_arg(ap, unsigned long);
60581 + cred = __task_cred(task);
60582 + pcred = __task_cred(task->real_parent);
60583 +
60584 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
60585 + }
60586 + break;
60587 + default:
60588 + gr_log_middle(audit, msg, ap);
60589 + }
60590 + va_end(ap);
60591 + // these don't need DEFAULTSECARGS printed on the end
60592 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
60593 + gr_log_end(audit, 0);
60594 + else
60595 + gr_log_end(audit, 1);
60596 + END_LOCKS(audit);
60597 +}
60598 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
60599 new file mode 100644
60600 index 0000000..f536303
60601 --- /dev/null
60602 +++ b/grsecurity/grsec_mem.c
60603 @@ -0,0 +1,40 @@
60604 +#include <linux/kernel.h>
60605 +#include <linux/sched.h>
60606 +#include <linux/mm.h>
60607 +#include <linux/mman.h>
60608 +#include <linux/grinternal.h>
60609 +
60610 +void
60611 +gr_handle_ioperm(void)
60612 +{
60613 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
60614 + return;
60615 +}
60616 +
60617 +void
60618 +gr_handle_iopl(void)
60619 +{
60620 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
60621 + return;
60622 +}
60623 +
60624 +void
60625 +gr_handle_mem_readwrite(u64 from, u64 to)
60626 +{
60627 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
60628 + return;
60629 +}
60630 +
60631 +void
60632 +gr_handle_vm86(void)
60633 +{
60634 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
60635 + return;
60636 +}
60637 +
60638 +void
60639 +gr_log_badprocpid(const char *entry)
60640 +{
60641 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
60642 + return;
60643 +}
60644 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
60645 new file mode 100644
60646 index 0000000..2131422
60647 --- /dev/null
60648 +++ b/grsecurity/grsec_mount.c
60649 @@ -0,0 +1,62 @@
60650 +#include <linux/kernel.h>
60651 +#include <linux/sched.h>
60652 +#include <linux/mount.h>
60653 +#include <linux/grsecurity.h>
60654 +#include <linux/grinternal.h>
60655 +
60656 +void
60657 +gr_log_remount(const char *devname, const int retval)
60658 +{
60659 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60660 + if (grsec_enable_mount && (retval >= 0))
60661 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
60662 +#endif
60663 + return;
60664 +}
60665 +
60666 +void
60667 +gr_log_unmount(const char *devname, const int retval)
60668 +{
60669 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60670 + if (grsec_enable_mount && (retval >= 0))
60671 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
60672 +#endif
60673 + return;
60674 +}
60675 +
60676 +void
60677 +gr_log_mount(const char *from, const char *to, const int retval)
60678 +{
60679 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60680 + if (grsec_enable_mount && (retval >= 0))
60681 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
60682 +#endif
60683 + return;
60684 +}
60685 +
60686 +int
60687 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
60688 +{
60689 +#ifdef CONFIG_GRKERNSEC_ROFS
60690 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
60691 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
60692 + return -EPERM;
60693 + } else
60694 + return 0;
60695 +#endif
60696 + return 0;
60697 +}
60698 +
60699 +int
60700 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
60701 +{
60702 +#ifdef CONFIG_GRKERNSEC_ROFS
60703 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
60704 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
60705 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
60706 + return -EPERM;
60707 + } else
60708 + return 0;
60709 +#endif
60710 + return 0;
60711 +}
60712 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
60713 new file mode 100644
60714 index 0000000..a3b12a0
60715 --- /dev/null
60716 +++ b/grsecurity/grsec_pax.c
60717 @@ -0,0 +1,36 @@
60718 +#include <linux/kernel.h>
60719 +#include <linux/sched.h>
60720 +#include <linux/mm.h>
60721 +#include <linux/file.h>
60722 +#include <linux/grinternal.h>
60723 +#include <linux/grsecurity.h>
60724 +
60725 +void
60726 +gr_log_textrel(struct vm_area_struct * vma)
60727 +{
60728 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60729 + if (grsec_enable_audit_textrel)
60730 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
60731 +#endif
60732 + return;
60733 +}
60734 +
60735 +void
60736 +gr_log_rwxmmap(struct file *file)
60737 +{
60738 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60739 + if (grsec_enable_log_rwxmaps)
60740 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
60741 +#endif
60742 + return;
60743 +}
60744 +
60745 +void
60746 +gr_log_rwxmprotect(struct file *file)
60747 +{
60748 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60749 + if (grsec_enable_log_rwxmaps)
60750 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
60751 +#endif
60752 + return;
60753 +}
60754 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
60755 new file mode 100644
60756 index 0000000..f7f29aa
60757 --- /dev/null
60758 +++ b/grsecurity/grsec_ptrace.c
60759 @@ -0,0 +1,30 @@
60760 +#include <linux/kernel.h>
60761 +#include <linux/sched.h>
60762 +#include <linux/grinternal.h>
60763 +#include <linux/security.h>
60764 +
60765 +void
60766 +gr_audit_ptrace(struct task_struct *task)
60767 +{
60768 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60769 + if (grsec_enable_audit_ptrace)
60770 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
60771 +#endif
60772 + return;
60773 +}
60774 +
60775 +int
60776 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
60777 +{
60778 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60779 + const struct dentry *dentry = file->f_path.dentry;
60780 + const struct vfsmount *mnt = file->f_path.mnt;
60781 +
60782 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
60783 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
60784 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
60785 + return -EACCES;
60786 + }
60787 +#endif
60788 + return 0;
60789 +}
60790 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
60791 new file mode 100644
60792 index 0000000..5c00416
60793 --- /dev/null
60794 +++ b/grsecurity/grsec_sig.c
60795 @@ -0,0 +1,222 @@
60796 +#include <linux/kernel.h>
60797 +#include <linux/sched.h>
60798 +#include <linux/delay.h>
60799 +#include <linux/grsecurity.h>
60800 +#include <linux/grinternal.h>
60801 +#include <linux/hardirq.h>
60802 +
60803 +char *signames[] = {
60804 + [SIGSEGV] = "Segmentation fault",
60805 + [SIGILL] = "Illegal instruction",
60806 + [SIGABRT] = "Abort",
60807 + [SIGBUS] = "Invalid alignment/Bus error"
60808 +};
60809 +
60810 +void
60811 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
60812 +{
60813 +#ifdef CONFIG_GRKERNSEC_SIGNAL
60814 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
60815 + (sig == SIGABRT) || (sig == SIGBUS))) {
60816 + if (t->pid == current->pid) {
60817 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
60818 + } else {
60819 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
60820 + }
60821 + }
60822 +#endif
60823 + return;
60824 +}
60825 +
60826 +int
60827 +gr_handle_signal(const struct task_struct *p, const int sig)
60828 +{
60829 +#ifdef CONFIG_GRKERNSEC
60830 + /* ignore the 0 signal for protected task checks */
60831 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
60832 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
60833 + return -EPERM;
60834 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
60835 + return -EPERM;
60836 + }
60837 +#endif
60838 + return 0;
60839 +}
60840 +
60841 +#ifdef CONFIG_GRKERNSEC
60842 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
60843 +
60844 +int gr_fake_force_sig(int sig, struct task_struct *t)
60845 +{
60846 + unsigned long int flags;
60847 + int ret, blocked, ignored;
60848 + struct k_sigaction *action;
60849 +
60850 + spin_lock_irqsave(&t->sighand->siglock, flags);
60851 + action = &t->sighand->action[sig-1];
60852 + ignored = action->sa.sa_handler == SIG_IGN;
60853 + blocked = sigismember(&t->blocked, sig);
60854 + if (blocked || ignored) {
60855 + action->sa.sa_handler = SIG_DFL;
60856 + if (blocked) {
60857 + sigdelset(&t->blocked, sig);
60858 + recalc_sigpending_and_wake(t);
60859 + }
60860 + }
60861 + if (action->sa.sa_handler == SIG_DFL)
60862 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
60863 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
60864 +
60865 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
60866 +
60867 + return ret;
60868 +}
60869 +#endif
60870 +
60871 +#ifdef CONFIG_GRKERNSEC_BRUTE
60872 +#define GR_USER_BAN_TIME (15 * 60)
60873 +#define GR_DAEMON_BRUTE_TIME (30 * 60)
60874 +
60875 +static int __get_dumpable(unsigned long mm_flags)
60876 +{
60877 + int ret;
60878 +
60879 + ret = mm_flags & MMF_DUMPABLE_MASK;
60880 + return (ret >= 2) ? 2 : ret;
60881 +}
60882 +#endif
60883 +
60884 +void gr_handle_brute_attach(unsigned long mm_flags)
60885 +{
60886 +#ifdef CONFIG_GRKERNSEC_BRUTE
60887 + struct task_struct *p = current;
60888 + kuid_t uid = GLOBAL_ROOT_UID;
60889 + int daemon = 0;
60890 +
60891 + if (!grsec_enable_brute)
60892 + return;
60893 +
60894 + rcu_read_lock();
60895 + read_lock(&tasklist_lock);
60896 + read_lock(&grsec_exec_file_lock);
60897 + if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
60898 + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
60899 + p->real_parent->brute = 1;
60900 + daemon = 1;
60901 + } else {
60902 + const struct cred *cred = __task_cred(p), *cred2;
60903 + struct task_struct *tsk, *tsk2;
60904 +
60905 + if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
60906 + struct user_struct *user;
60907 +
60908 + uid = cred->uid;
60909 +
60910 + /* this is put upon execution past expiration */
60911 + user = find_user(uid);
60912 + if (user == NULL)
60913 + goto unlock;
60914 + user->banned = 1;
60915 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
60916 + if (user->ban_expires == ~0UL)
60917 + user->ban_expires--;
60918 +
60919 + do_each_thread(tsk2, tsk) {
60920 + cred2 = __task_cred(tsk);
60921 + if (tsk != p && uid_eq(cred2->uid, uid))
60922 + gr_fake_force_sig(SIGKILL, tsk);
60923 + } while_each_thread(tsk2, tsk);
60924 + }
60925 + }
60926 +unlock:
60927 + read_unlock(&grsec_exec_file_lock);
60928 + read_unlock(&tasklist_lock);
60929 + rcu_read_unlock();
60930 +
60931 + if (!uid_eq(uid, GLOBAL_ROOT_UID))
60932 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
60933 + from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
60934 + else if (daemon)
60935 + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
60936 +
60937 +#endif
60938 + return;
60939 +}
60940 +
60941 +void gr_handle_brute_check(void)
60942 +{
60943 +#ifdef CONFIG_GRKERNSEC_BRUTE
60944 + struct task_struct *p = current;
60945 +
60946 + if (unlikely(p->brute)) {
60947 + if (!grsec_enable_brute)
60948 + p->brute = 0;
60949 + else if (time_before(get_seconds(), p->brute_expires))
60950 + msleep(30 * 1000);
60951 + }
60952 +#endif
60953 + return;
60954 +}
60955 +
60956 +void gr_handle_kernel_exploit(void)
60957 +{
60958 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
60959 + const struct cred *cred;
60960 + struct task_struct *tsk, *tsk2;
60961 + struct user_struct *user;
60962 + kuid_t uid;
60963 +
60964 + if (in_irq() || in_serving_softirq() || in_nmi())
60965 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
60966 +
60967 + uid = current_uid();
60968 +
60969 + if (uid_eq(uid, GLOBAL_ROOT_UID))
60970 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
60971 + else {
60972 + /* kill all the processes of this user, hold a reference
60973 + to their creds struct, and prevent them from creating
60974 + another process until system reset
60975 + */
60976 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
60977 + from_kuid_munged(&init_user_ns, uid));
60978 + /* we intentionally leak this ref */
60979 + user = get_uid(current->cred->user);
60980 + if (user) {
60981 + user->banned = 1;
60982 + user->ban_expires = ~0UL;
60983 + }
60984 +
60985 + read_lock(&tasklist_lock);
60986 + do_each_thread(tsk2, tsk) {
60987 + cred = __task_cred(tsk);
60988 + if (uid_eq(cred->uid, uid))
60989 + gr_fake_force_sig(SIGKILL, tsk);
60990 + } while_each_thread(tsk2, tsk);
60991 + read_unlock(&tasklist_lock);
60992 + }
60993 +#endif
60994 +}
60995 +
60996 +int __gr_process_user_ban(struct user_struct *user)
60997 +{
60998 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60999 + if (unlikely(user->banned)) {
61000 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
61001 + user->banned = 0;
61002 + user->ban_expires = 0;
61003 + free_uid(user);
61004 + } else
61005 + return -EPERM;
61006 + }
61007 +#endif
61008 + return 0;
61009 +}
61010 +
61011 +int gr_process_user_ban(void)
61012 +{
61013 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61014 + return __gr_process_user_ban(current->cred->user);
61015 +#endif
61016 + return 0;
61017 +}
61018 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
61019 new file mode 100644
61020 index 0000000..4030d57
61021 --- /dev/null
61022 +++ b/grsecurity/grsec_sock.c
61023 @@ -0,0 +1,244 @@
61024 +#include <linux/kernel.h>
61025 +#include <linux/module.h>
61026 +#include <linux/sched.h>
61027 +#include <linux/file.h>
61028 +#include <linux/net.h>
61029 +#include <linux/in.h>
61030 +#include <linux/ip.h>
61031 +#include <net/sock.h>
61032 +#include <net/inet_sock.h>
61033 +#include <linux/grsecurity.h>
61034 +#include <linux/grinternal.h>
61035 +#include <linux/gracl.h>
61036 +
61037 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
61038 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
61039 +
61040 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
61041 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
61042 +
61043 +#ifdef CONFIG_UNIX_MODULE
61044 +EXPORT_SYMBOL(gr_acl_handle_unix);
61045 +EXPORT_SYMBOL(gr_acl_handle_mknod);
61046 +EXPORT_SYMBOL(gr_handle_chroot_unix);
61047 +EXPORT_SYMBOL(gr_handle_create);
61048 +#endif
61049 +
61050 +#ifdef CONFIG_GRKERNSEC
61051 +#define gr_conn_table_size 32749
61052 +struct conn_table_entry {
61053 + struct conn_table_entry *next;
61054 + struct signal_struct *sig;
61055 +};
61056 +
61057 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
61058 +DEFINE_SPINLOCK(gr_conn_table_lock);
61059 +
61060 +extern const char * gr_socktype_to_name(unsigned char type);
61061 +extern const char * gr_proto_to_name(unsigned char proto);
61062 +extern const char * gr_sockfamily_to_name(unsigned char family);
61063 +
61064 +static __inline__ int
61065 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
61066 +{
61067 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
61068 +}
61069 +
61070 +static __inline__ int
61071 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
61072 + __u16 sport, __u16 dport)
61073 +{
61074 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
61075 + sig->gr_sport == sport && sig->gr_dport == dport))
61076 + return 1;
61077 + else
61078 + return 0;
61079 +}
61080 +
61081 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
61082 +{
61083 + struct conn_table_entry **match;
61084 + unsigned int index;
61085 +
61086 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61087 + sig->gr_sport, sig->gr_dport,
61088 + gr_conn_table_size);
61089 +
61090 + newent->sig = sig;
61091 +
61092 + match = &gr_conn_table[index];
61093 + newent->next = *match;
61094 + *match = newent;
61095 +
61096 + return;
61097 +}
61098 +
61099 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
61100 +{
61101 + struct conn_table_entry *match, *last = NULL;
61102 + unsigned int index;
61103 +
61104 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61105 + sig->gr_sport, sig->gr_dport,
61106 + gr_conn_table_size);
61107 +
61108 + match = gr_conn_table[index];
61109 + while (match && !conn_match(match->sig,
61110 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
61111 + sig->gr_dport)) {
61112 + last = match;
61113 + match = match->next;
61114 + }
61115 +
61116 + if (match) {
61117 + if (last)
61118 + last->next = match->next;
61119 + else
61120 + gr_conn_table[index] = NULL;
61121 + kfree(match);
61122 + }
61123 +
61124 + return;
61125 +}
61126 +
61127 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
61128 + __u16 sport, __u16 dport)
61129 +{
61130 + struct conn_table_entry *match;
61131 + unsigned int index;
61132 +
61133 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
61134 +
61135 + match = gr_conn_table[index];
61136 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
61137 + match = match->next;
61138 +
61139 + if (match)
61140 + return match->sig;
61141 + else
61142 + return NULL;
61143 +}
61144 +
61145 +#endif
61146 +
61147 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
61148 +{
61149 +#ifdef CONFIG_GRKERNSEC
61150 + struct signal_struct *sig = task->signal;
61151 + struct conn_table_entry *newent;
61152 +
61153 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
61154 + if (newent == NULL)
61155 + return;
61156 + /* no bh lock needed since we are called with bh disabled */
61157 + spin_lock(&gr_conn_table_lock);
61158 + gr_del_task_from_ip_table_nolock(sig);
61159 + sig->gr_saddr = inet->inet_rcv_saddr;
61160 + sig->gr_daddr = inet->inet_daddr;
61161 + sig->gr_sport = inet->inet_sport;
61162 + sig->gr_dport = inet->inet_dport;
61163 + gr_add_to_task_ip_table_nolock(sig, newent);
61164 + spin_unlock(&gr_conn_table_lock);
61165 +#endif
61166 + return;
61167 +}
61168 +
61169 +void gr_del_task_from_ip_table(struct task_struct *task)
61170 +{
61171 +#ifdef CONFIG_GRKERNSEC
61172 + spin_lock_bh(&gr_conn_table_lock);
61173 + gr_del_task_from_ip_table_nolock(task->signal);
61174 + spin_unlock_bh(&gr_conn_table_lock);
61175 +#endif
61176 + return;
61177 +}
61178 +
61179 +void
61180 +gr_attach_curr_ip(const struct sock *sk)
61181 +{
61182 +#ifdef CONFIG_GRKERNSEC
61183 + struct signal_struct *p, *set;
61184 + const struct inet_sock *inet = inet_sk(sk);
61185 +
61186 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
61187 + return;
61188 +
61189 + set = current->signal;
61190 +
61191 + spin_lock_bh(&gr_conn_table_lock);
61192 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
61193 + inet->inet_dport, inet->inet_sport);
61194 + if (unlikely(p != NULL)) {
61195 + set->curr_ip = p->curr_ip;
61196 + set->used_accept = 1;
61197 + gr_del_task_from_ip_table_nolock(p);
61198 + spin_unlock_bh(&gr_conn_table_lock);
61199 + return;
61200 + }
61201 + spin_unlock_bh(&gr_conn_table_lock);
61202 +
61203 + set->curr_ip = inet->inet_daddr;
61204 + set->used_accept = 1;
61205 +#endif
61206 + return;
61207 +}
61208 +
61209 +int
61210 +gr_handle_sock_all(const int family, const int type, const int protocol)
61211 +{
61212 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61213 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
61214 + (family != AF_UNIX)) {
61215 + if (family == AF_INET)
61216 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
61217 + else
61218 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
61219 + return -EACCES;
61220 + }
61221 +#endif
61222 + return 0;
61223 +}
61224 +
61225 +int
61226 +gr_handle_sock_server(const struct sockaddr *sck)
61227 +{
61228 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61229 + if (grsec_enable_socket_server &&
61230 + in_group_p(grsec_socket_server_gid) &&
61231 + sck && (sck->sa_family != AF_UNIX) &&
61232 + (sck->sa_family != AF_LOCAL)) {
61233 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61234 + return -EACCES;
61235 + }
61236 +#endif
61237 + return 0;
61238 +}
61239 +
61240 +int
61241 +gr_handle_sock_server_other(const struct sock *sck)
61242 +{
61243 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61244 + if (grsec_enable_socket_server &&
61245 + in_group_p(grsec_socket_server_gid) &&
61246 + sck && (sck->sk_family != AF_UNIX) &&
61247 + (sck->sk_family != AF_LOCAL)) {
61248 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61249 + return -EACCES;
61250 + }
61251 +#endif
61252 + return 0;
61253 +}
61254 +
61255 +int
61256 +gr_handle_sock_client(const struct sockaddr *sck)
61257 +{
61258 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61259 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
61260 + sck && (sck->sa_family != AF_UNIX) &&
61261 + (sck->sa_family != AF_LOCAL)) {
61262 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
61263 + return -EACCES;
61264 + }
61265 +#endif
61266 + return 0;
61267 +}
61268 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
61269 new file mode 100644
61270 index 0000000..f55ef0f
61271 --- /dev/null
61272 +++ b/grsecurity/grsec_sysctl.c
61273 @@ -0,0 +1,469 @@
61274 +#include <linux/kernel.h>
61275 +#include <linux/sched.h>
61276 +#include <linux/sysctl.h>
61277 +#include <linux/grsecurity.h>
61278 +#include <linux/grinternal.h>
61279 +
61280 +int
61281 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
61282 +{
61283 +#ifdef CONFIG_GRKERNSEC_SYSCTL
61284 + if (dirname == NULL || name == NULL)
61285 + return 0;
61286 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
61287 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
61288 + return -EACCES;
61289 + }
61290 +#endif
61291 + return 0;
61292 +}
61293 +
61294 +#ifdef CONFIG_GRKERNSEC_ROFS
61295 +static int __maybe_unused one = 1;
61296 +#endif
61297 +
61298 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
61299 +struct ctl_table grsecurity_table[] = {
61300 +#ifdef CONFIG_GRKERNSEC_SYSCTL
61301 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
61302 +#ifdef CONFIG_GRKERNSEC_IO
61303 + {
61304 + .procname = "disable_priv_io",
61305 + .data = &grsec_disable_privio,
61306 + .maxlen = sizeof(int),
61307 + .mode = 0600,
61308 + .proc_handler = &proc_dointvec,
61309 + },
61310 +#endif
61311 +#endif
61312 +#ifdef CONFIG_GRKERNSEC_LINK
61313 + {
61314 + .procname = "linking_restrictions",
61315 + .data = &grsec_enable_link,
61316 + .maxlen = sizeof(int),
61317 + .mode = 0600,
61318 + .proc_handler = &proc_dointvec,
61319 + },
61320 +#endif
61321 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
61322 + {
61323 + .procname = "enforce_symlinksifowner",
61324 + .data = &grsec_enable_symlinkown,
61325 + .maxlen = sizeof(int),
61326 + .mode = 0600,
61327 + .proc_handler = &proc_dointvec,
61328 + },
61329 + {
61330 + .procname = "symlinkown_gid",
61331 + .data = &grsec_symlinkown_gid,
61332 + .maxlen = sizeof(int),
61333 + .mode = 0600,
61334 + .proc_handler = &proc_dointvec,
61335 + },
61336 +#endif
61337 +#ifdef CONFIG_GRKERNSEC_BRUTE
61338 + {
61339 + .procname = "deter_bruteforce",
61340 + .data = &grsec_enable_brute,
61341 + .maxlen = sizeof(int),
61342 + .mode = 0600,
61343 + .proc_handler = &proc_dointvec,
61344 + },
61345 +#endif
61346 +#ifdef CONFIG_GRKERNSEC_FIFO
61347 + {
61348 + .procname = "fifo_restrictions",
61349 + .data = &grsec_enable_fifo,
61350 + .maxlen = sizeof(int),
61351 + .mode = 0600,
61352 + .proc_handler = &proc_dointvec,
61353 + },
61354 +#endif
61355 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
61356 + {
61357 + .procname = "ptrace_readexec",
61358 + .data = &grsec_enable_ptrace_readexec,
61359 + .maxlen = sizeof(int),
61360 + .mode = 0600,
61361 + .proc_handler = &proc_dointvec,
61362 + },
61363 +#endif
61364 +#ifdef CONFIG_GRKERNSEC_SETXID
61365 + {
61366 + .procname = "consistent_setxid",
61367 + .data = &grsec_enable_setxid,
61368 + .maxlen = sizeof(int),
61369 + .mode = 0600,
61370 + .proc_handler = &proc_dointvec,
61371 + },
61372 +#endif
61373 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
61374 + {
61375 + .procname = "ip_blackhole",
61376 + .data = &grsec_enable_blackhole,
61377 + .maxlen = sizeof(int),
61378 + .mode = 0600,
61379 + .proc_handler = &proc_dointvec,
61380 + },
61381 + {
61382 + .procname = "lastack_retries",
61383 + .data = &grsec_lastack_retries,
61384 + .maxlen = sizeof(int),
61385 + .mode = 0600,
61386 + .proc_handler = &proc_dointvec,
61387 + },
61388 +#endif
61389 +#ifdef CONFIG_GRKERNSEC_EXECLOG
61390 + {
61391 + .procname = "exec_logging",
61392 + .data = &grsec_enable_execlog,
61393 + .maxlen = sizeof(int),
61394 + .mode = 0600,
61395 + .proc_handler = &proc_dointvec,
61396 + },
61397 +#endif
61398 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61399 + {
61400 + .procname = "rwxmap_logging",
61401 + .data = &grsec_enable_log_rwxmaps,
61402 + .maxlen = sizeof(int),
61403 + .mode = 0600,
61404 + .proc_handler = &proc_dointvec,
61405 + },
61406 +#endif
61407 +#ifdef CONFIG_GRKERNSEC_SIGNAL
61408 + {
61409 + .procname = "signal_logging",
61410 + .data = &grsec_enable_signal,
61411 + .maxlen = sizeof(int),
61412 + .mode = 0600,
61413 + .proc_handler = &proc_dointvec,
61414 + },
61415 +#endif
61416 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
61417 + {
61418 + .procname = "forkfail_logging",
61419 + .data = &grsec_enable_forkfail,
61420 + .maxlen = sizeof(int),
61421 + .mode = 0600,
61422 + .proc_handler = &proc_dointvec,
61423 + },
61424 +#endif
61425 +#ifdef CONFIG_GRKERNSEC_TIME
61426 + {
61427 + .procname = "timechange_logging",
61428 + .data = &grsec_enable_time,
61429 + .maxlen = sizeof(int),
61430 + .mode = 0600,
61431 + .proc_handler = &proc_dointvec,
61432 + },
61433 +#endif
61434 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61435 + {
61436 + .procname = "chroot_deny_shmat",
61437 + .data = &grsec_enable_chroot_shmat,
61438 + .maxlen = sizeof(int),
61439 + .mode = 0600,
61440 + .proc_handler = &proc_dointvec,
61441 + },
61442 +#endif
61443 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61444 + {
61445 + .procname = "chroot_deny_unix",
61446 + .data = &grsec_enable_chroot_unix,
61447 + .maxlen = sizeof(int),
61448 + .mode = 0600,
61449 + .proc_handler = &proc_dointvec,
61450 + },
61451 +#endif
61452 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61453 + {
61454 + .procname = "chroot_deny_mount",
61455 + .data = &grsec_enable_chroot_mount,
61456 + .maxlen = sizeof(int),
61457 + .mode = 0600,
61458 + .proc_handler = &proc_dointvec,
61459 + },
61460 +#endif
61461 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61462 + {
61463 + .procname = "chroot_deny_fchdir",
61464 + .data = &grsec_enable_chroot_fchdir,
61465 + .maxlen = sizeof(int),
61466 + .mode = 0600,
61467 + .proc_handler = &proc_dointvec,
61468 + },
61469 +#endif
61470 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61471 + {
61472 + .procname = "chroot_deny_chroot",
61473 + .data = &grsec_enable_chroot_double,
61474 + .maxlen = sizeof(int),
61475 + .mode = 0600,
61476 + .proc_handler = &proc_dointvec,
61477 + },
61478 +#endif
61479 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61480 + {
61481 + .procname = "chroot_deny_pivot",
61482 + .data = &grsec_enable_chroot_pivot,
61483 + .maxlen = sizeof(int),
61484 + .mode = 0600,
61485 + .proc_handler = &proc_dointvec,
61486 + },
61487 +#endif
61488 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61489 + {
61490 + .procname = "chroot_enforce_chdir",
61491 + .data = &grsec_enable_chroot_chdir,
61492 + .maxlen = sizeof(int),
61493 + .mode = 0600,
61494 + .proc_handler = &proc_dointvec,
61495 + },
61496 +#endif
61497 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61498 + {
61499 + .procname = "chroot_deny_chmod",
61500 + .data = &grsec_enable_chroot_chmod,
61501 + .maxlen = sizeof(int),
61502 + .mode = 0600,
61503 + .proc_handler = &proc_dointvec,
61504 + },
61505 +#endif
61506 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61507 + {
61508 + .procname = "chroot_deny_mknod",
61509 + .data = &grsec_enable_chroot_mknod,
61510 + .maxlen = sizeof(int),
61511 + .mode = 0600,
61512 + .proc_handler = &proc_dointvec,
61513 + },
61514 +#endif
61515 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61516 + {
61517 + .procname = "chroot_restrict_nice",
61518 + .data = &grsec_enable_chroot_nice,
61519 + .maxlen = sizeof(int),
61520 + .mode = 0600,
61521 + .proc_handler = &proc_dointvec,
61522 + },
61523 +#endif
61524 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61525 + {
61526 + .procname = "chroot_execlog",
61527 + .data = &grsec_enable_chroot_execlog,
61528 + .maxlen = sizeof(int),
61529 + .mode = 0600,
61530 + .proc_handler = &proc_dointvec,
61531 + },
61532 +#endif
61533 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61534 + {
61535 + .procname = "chroot_caps",
61536 + .data = &grsec_enable_chroot_caps,
61537 + .maxlen = sizeof(int),
61538 + .mode = 0600,
61539 + .proc_handler = &proc_dointvec,
61540 + },
61541 +#endif
61542 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61543 + {
61544 + .procname = "chroot_deny_sysctl",
61545 + .data = &grsec_enable_chroot_sysctl,
61546 + .maxlen = sizeof(int),
61547 + .mode = 0600,
61548 + .proc_handler = &proc_dointvec,
61549 + },
61550 +#endif
61551 +#ifdef CONFIG_GRKERNSEC_TPE
61552 + {
61553 + .procname = "tpe",
61554 + .data = &grsec_enable_tpe,
61555 + .maxlen = sizeof(int),
61556 + .mode = 0600,
61557 + .proc_handler = &proc_dointvec,
61558 + },
61559 + {
61560 + .procname = "tpe_gid",
61561 + .data = &grsec_tpe_gid,
61562 + .maxlen = sizeof(int),
61563 + .mode = 0600,
61564 + .proc_handler = &proc_dointvec,
61565 + },
61566 +#endif
61567 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61568 + {
61569 + .procname = "tpe_invert",
61570 + .data = &grsec_enable_tpe_invert,
61571 + .maxlen = sizeof(int),
61572 + .mode = 0600,
61573 + .proc_handler = &proc_dointvec,
61574 + },
61575 +#endif
61576 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
61577 + {
61578 + .procname = "tpe_restrict_all",
61579 + .data = &grsec_enable_tpe_all,
61580 + .maxlen = sizeof(int),
61581 + .mode = 0600,
61582 + .proc_handler = &proc_dointvec,
61583 + },
61584 +#endif
61585 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61586 + {
61587 + .procname = "socket_all",
61588 + .data = &grsec_enable_socket_all,
61589 + .maxlen = sizeof(int),
61590 + .mode = 0600,
61591 + .proc_handler = &proc_dointvec,
61592 + },
61593 + {
61594 + .procname = "socket_all_gid",
61595 + .data = &grsec_socket_all_gid,
61596 + .maxlen = sizeof(int),
61597 + .mode = 0600,
61598 + .proc_handler = &proc_dointvec,
61599 + },
61600 +#endif
61601 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61602 + {
61603 + .procname = "socket_client",
61604 + .data = &grsec_enable_socket_client,
61605 + .maxlen = sizeof(int),
61606 + .mode = 0600,
61607 + .proc_handler = &proc_dointvec,
61608 + },
61609 + {
61610 + .procname = "socket_client_gid",
61611 + .data = &grsec_socket_client_gid,
61612 + .maxlen = sizeof(int),
61613 + .mode = 0600,
61614 + .proc_handler = &proc_dointvec,
61615 + },
61616 +#endif
61617 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61618 + {
61619 + .procname = "socket_server",
61620 + .data = &grsec_enable_socket_server,
61621 + .maxlen = sizeof(int),
61622 + .mode = 0600,
61623 + .proc_handler = &proc_dointvec,
61624 + },
61625 + {
61626 + .procname = "socket_server_gid",
61627 + .data = &grsec_socket_server_gid,
61628 + .maxlen = sizeof(int),
61629 + .mode = 0600,
61630 + .proc_handler = &proc_dointvec,
61631 + },
61632 +#endif
61633 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
61634 + {
61635 + .procname = "audit_group",
61636 + .data = &grsec_enable_group,
61637 + .maxlen = sizeof(int),
61638 + .mode = 0600,
61639 + .proc_handler = &proc_dointvec,
61640 + },
61641 + {
61642 + .procname = "audit_gid",
61643 + .data = &grsec_audit_gid,
61644 + .maxlen = sizeof(int),
61645 + .mode = 0600,
61646 + .proc_handler = &proc_dointvec,
61647 + },
61648 +#endif
61649 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61650 + {
61651 + .procname = "audit_chdir",
61652 + .data = &grsec_enable_chdir,
61653 + .maxlen = sizeof(int),
61654 + .mode = 0600,
61655 + .proc_handler = &proc_dointvec,
61656 + },
61657 +#endif
61658 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61659 + {
61660 + .procname = "audit_mount",
61661 + .data = &grsec_enable_mount,
61662 + .maxlen = sizeof(int),
61663 + .mode = 0600,
61664 + .proc_handler = &proc_dointvec,
61665 + },
61666 +#endif
61667 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
61668 + {
61669 + .procname = "audit_textrel",
61670 + .data = &grsec_enable_audit_textrel,
61671 + .maxlen = sizeof(int),
61672 + .mode = 0600,
61673 + .proc_handler = &proc_dointvec,
61674 + },
61675 +#endif
61676 +#ifdef CONFIG_GRKERNSEC_DMESG
61677 + {
61678 + .procname = "dmesg",
61679 + .data = &grsec_enable_dmesg,
61680 + .maxlen = sizeof(int),
61681 + .mode = 0600,
61682 + .proc_handler = &proc_dointvec,
61683 + },
61684 +#endif
61685 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61686 + {
61687 + .procname = "chroot_findtask",
61688 + .data = &grsec_enable_chroot_findtask,
61689 + .maxlen = sizeof(int),
61690 + .mode = 0600,
61691 + .proc_handler = &proc_dointvec,
61692 + },
61693 +#endif
61694 +#ifdef CONFIG_GRKERNSEC_RESLOG
61695 + {
61696 + .procname = "resource_logging",
61697 + .data = &grsec_resource_logging,
61698 + .maxlen = sizeof(int),
61699 + .mode = 0600,
61700 + .proc_handler = &proc_dointvec,
61701 + },
61702 +#endif
61703 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
61704 + {
61705 + .procname = "audit_ptrace",
61706 + .data = &grsec_enable_audit_ptrace,
61707 + .maxlen = sizeof(int),
61708 + .mode = 0600,
61709 + .proc_handler = &proc_dointvec,
61710 + },
61711 +#endif
61712 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61713 + {
61714 + .procname = "harden_ptrace",
61715 + .data = &grsec_enable_harden_ptrace,
61716 + .maxlen = sizeof(int),
61717 + .mode = 0600,
61718 + .proc_handler = &proc_dointvec,
61719 + },
61720 +#endif
61721 + {
61722 + .procname = "grsec_lock",
61723 + .data = &grsec_lock,
61724 + .maxlen = sizeof(int),
61725 + .mode = 0600,
61726 + .proc_handler = &proc_dointvec,
61727 + },
61728 +#endif
61729 +#ifdef CONFIG_GRKERNSEC_ROFS
61730 + {
61731 + .procname = "romount_protect",
61732 + .data = &grsec_enable_rofs,
61733 + .maxlen = sizeof(int),
61734 + .mode = 0600,
61735 + .proc_handler = &proc_dointvec_minmax,
61736 + .extra1 = &one,
61737 + .extra2 = &one,
61738 + },
61739 +#endif
61740 + { }
61741 +};
61742 +#endif
61743 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
61744 new file mode 100644
61745 index 0000000..0dc13c3
61746 --- /dev/null
61747 +++ b/grsecurity/grsec_time.c
61748 @@ -0,0 +1,16 @@
61749 +#include <linux/kernel.h>
61750 +#include <linux/sched.h>
61751 +#include <linux/grinternal.h>
61752 +#include <linux/module.h>
61753 +
61754 +void
61755 +gr_log_timechange(void)
61756 +{
61757 +#ifdef CONFIG_GRKERNSEC_TIME
61758 + if (grsec_enable_time)
61759 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
61760 +#endif
61761 + return;
61762 +}
61763 +
61764 +EXPORT_SYMBOL(gr_log_timechange);
61765 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
61766 new file mode 100644
61767 index 0000000..07e0dc0
61768 --- /dev/null
61769 +++ b/grsecurity/grsec_tpe.c
61770 @@ -0,0 +1,73 @@
61771 +#include <linux/kernel.h>
61772 +#include <linux/sched.h>
61773 +#include <linux/file.h>
61774 +#include <linux/fs.h>
61775 +#include <linux/grinternal.h>
61776 +
61777 +extern int gr_acl_tpe_check(void);
61778 +
61779 +int
61780 +gr_tpe_allow(const struct file *file)
61781 +{
61782 +#ifdef CONFIG_GRKERNSEC
61783 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
61784 + const struct cred *cred = current_cred();
61785 + char *msg = NULL;
61786 + char *msg2 = NULL;
61787 +
61788 + // never restrict root
61789 + if (!cred->uid)
61790 + return 1;
61791 +
61792 + if (grsec_enable_tpe) {
61793 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61794 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
61795 + msg = "not being in trusted group";
61796 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
61797 + msg = "being in untrusted group";
61798 +#else
61799 + if (in_group_p(grsec_tpe_gid))
61800 + msg = "being in untrusted group";
61801 +#endif
61802 + }
61803 + if (!msg && gr_acl_tpe_check())
61804 + msg = "being in untrusted role";
61805 +
61806 + // not in any affected group/role
61807 + if (!msg)
61808 + goto next_check;
61809 +
61810 + if (inode->i_uid)
61811 + msg2 = "file in non-root-owned directory";
61812 + else if (inode->i_mode & S_IWOTH)
61813 + msg2 = "file in world-writable directory";
61814 + else if (inode->i_mode & S_IWGRP)
61815 + msg2 = "file in group-writable directory";
61816 +
61817 + if (msg && msg2) {
61818 + char fullmsg[70] = {0};
61819 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
61820 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
61821 + return 0;
61822 + }
61823 + msg = NULL;
61824 +next_check:
61825 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
61826 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
61827 + return 1;
61828 +
61829 + if (inode->i_uid && (inode->i_uid != cred->uid))
61830 + msg = "directory not owned by user";
61831 + else if (inode->i_mode & S_IWOTH)
61832 + msg = "file in world-writable directory";
61833 + else if (inode->i_mode & S_IWGRP)
61834 + msg = "file in group-writable directory";
61835 +
61836 + if (msg) {
61837 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
61838 + return 0;
61839 + }
61840 +#endif
61841 +#endif
61842 + return 1;
61843 +}
61844 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
61845 new file mode 100644
61846 index 0000000..9f7b1ac
61847 --- /dev/null
61848 +++ b/grsecurity/grsum.c
61849 @@ -0,0 +1,61 @@
61850 +#include <linux/err.h>
61851 +#include <linux/kernel.h>
61852 +#include <linux/sched.h>
61853 +#include <linux/mm.h>
61854 +#include <linux/scatterlist.h>
61855 +#include <linux/crypto.h>
61856 +#include <linux/gracl.h>
61857 +
61858 +
61859 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
61860 +#error "crypto and sha256 must be built into the kernel"
61861 +#endif
61862 +
61863 +int
61864 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
61865 +{
61866 + char *p;
61867 + struct crypto_hash *tfm;
61868 + struct hash_desc desc;
61869 + struct scatterlist sg;
61870 + unsigned char temp_sum[GR_SHA_LEN];
61871 + volatile int retval = 0;
61872 + volatile int dummy = 0;
61873 + unsigned int i;
61874 +
61875 + sg_init_table(&sg, 1);
61876 +
61877 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
61878 + if (IS_ERR(tfm)) {
61879 + /* should never happen, since sha256 should be built in */
61880 + return 1;
61881 + }
61882 +
61883 + desc.tfm = tfm;
61884 + desc.flags = 0;
61885 +
61886 + crypto_hash_init(&desc);
61887 +
61888 + p = salt;
61889 + sg_set_buf(&sg, p, GR_SALT_LEN);
61890 + crypto_hash_update(&desc, &sg, sg.length);
61891 +
61892 + p = entry->pw;
61893 + sg_set_buf(&sg, p, strlen(p));
61894 +
61895 + crypto_hash_update(&desc, &sg, sg.length);
61896 +
61897 + crypto_hash_final(&desc, temp_sum);
61898 +
61899 + memset(entry->pw, 0, GR_PW_LEN);
61900 +
61901 + for (i = 0; i < GR_SHA_LEN; i++)
61902 + if (sum[i] != temp_sum[i])
61903 + retval = 1;
61904 + else
61905 + dummy = 1; // waste a cycle
61906 +
61907 + crypto_free_hash(tfm);
61908 +
61909 + return retval;
61910 +}
61911 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
61912 index 0daa0fb..f548aa4 100644
61913 --- a/include/acpi/acpi_bus.h
61914 +++ b/include/acpi/acpi_bus.h
61915 @@ -107,7 +107,7 @@ struct acpi_device_ops {
61916 acpi_op_bind bind;
61917 acpi_op_unbind unbind;
61918 acpi_op_notify notify;
61919 -};
61920 +} __no_const;
61921
61922 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
61923
61924 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
61925 index 77ff547..181834f 100644
61926 --- a/include/asm-generic/4level-fixup.h
61927 +++ b/include/asm-generic/4level-fixup.h
61928 @@ -13,8 +13,10 @@
61929 #define pmd_alloc(mm, pud, address) \
61930 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
61931 NULL: pmd_offset(pud, address))
61932 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
61933
61934 #define pud_alloc(mm, pgd, address) (pgd)
61935 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
61936 #define pud_offset(pgd, start) (pgd)
61937 #define pud_none(pud) 0
61938 #define pud_bad(pud) 0
61939 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
61940 index b7babf0..04ad282 100644
61941 --- a/include/asm-generic/atomic-long.h
61942 +++ b/include/asm-generic/atomic-long.h
61943 @@ -22,6 +22,12 @@
61944
61945 typedef atomic64_t atomic_long_t;
61946
61947 +#ifdef CONFIG_PAX_REFCOUNT
61948 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
61949 +#else
61950 +typedef atomic64_t atomic_long_unchecked_t;
61951 +#endif
61952 +
61953 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
61954
61955 static inline long atomic_long_read(atomic_long_t *l)
61956 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
61957 return (long)atomic64_read(v);
61958 }
61959
61960 +#ifdef CONFIG_PAX_REFCOUNT
61961 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
61962 +{
61963 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
61964 +
61965 + return (long)atomic64_read_unchecked(v);
61966 +}
61967 +#endif
61968 +
61969 static inline void atomic_long_set(atomic_long_t *l, long i)
61970 {
61971 atomic64_t *v = (atomic64_t *)l;
61972 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
61973 atomic64_set(v, i);
61974 }
61975
61976 +#ifdef CONFIG_PAX_REFCOUNT
61977 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
61978 +{
61979 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
61980 +
61981 + atomic64_set_unchecked(v, i);
61982 +}
61983 +#endif
61984 +
61985 static inline void atomic_long_inc(atomic_long_t *l)
61986 {
61987 atomic64_t *v = (atomic64_t *)l;
61988 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
61989 atomic64_inc(v);
61990 }
61991
61992 +#ifdef CONFIG_PAX_REFCOUNT
61993 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
61994 +{
61995 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
61996 +
61997 + atomic64_inc_unchecked(v);
61998 +}
61999 +#endif
62000 +
62001 static inline void atomic_long_dec(atomic_long_t *l)
62002 {
62003 atomic64_t *v = (atomic64_t *)l;
62004 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62005 atomic64_dec(v);
62006 }
62007
62008 +#ifdef CONFIG_PAX_REFCOUNT
62009 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62010 +{
62011 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62012 +
62013 + atomic64_dec_unchecked(v);
62014 +}
62015 +#endif
62016 +
62017 static inline void atomic_long_add(long i, atomic_long_t *l)
62018 {
62019 atomic64_t *v = (atomic64_t *)l;
62020 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62021 atomic64_add(i, v);
62022 }
62023
62024 +#ifdef CONFIG_PAX_REFCOUNT
62025 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62026 +{
62027 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62028 +
62029 + atomic64_add_unchecked(i, v);
62030 +}
62031 +#endif
62032 +
62033 static inline void atomic_long_sub(long i, atomic_long_t *l)
62034 {
62035 atomic64_t *v = (atomic64_t *)l;
62036 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62037 atomic64_sub(i, v);
62038 }
62039
62040 +#ifdef CONFIG_PAX_REFCOUNT
62041 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62042 +{
62043 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62044 +
62045 + atomic64_sub_unchecked(i, v);
62046 +}
62047 +#endif
62048 +
62049 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62050 {
62051 atomic64_t *v = (atomic64_t *)l;
62052 @@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62053 return (long)atomic64_add_return(i, v);
62054 }
62055
62056 +#ifdef CONFIG_PAX_REFCOUNT
62057 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62058 +{
62059 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62060 +
62061 + return (long)atomic64_add_return_unchecked(i, v);
62062 +}
62063 +#endif
62064 +
62065 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62066 {
62067 atomic64_t *v = (atomic64_t *)l;
62068 @@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62069 return (long)atomic64_inc_return(v);
62070 }
62071
62072 +#ifdef CONFIG_PAX_REFCOUNT
62073 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62074 +{
62075 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62076 +
62077 + return (long)atomic64_inc_return_unchecked(v);
62078 +}
62079 +#endif
62080 +
62081 static inline long atomic_long_dec_return(atomic_long_t *l)
62082 {
62083 atomic64_t *v = (atomic64_t *)l;
62084 @@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62085
62086 typedef atomic_t atomic_long_t;
62087
62088 +#ifdef CONFIG_PAX_REFCOUNT
62089 +typedef atomic_unchecked_t atomic_long_unchecked_t;
62090 +#else
62091 +typedef atomic_t atomic_long_unchecked_t;
62092 +#endif
62093 +
62094 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
62095 static inline long atomic_long_read(atomic_long_t *l)
62096 {
62097 @@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62098 return (long)atomic_read(v);
62099 }
62100
62101 +#ifdef CONFIG_PAX_REFCOUNT
62102 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62103 +{
62104 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62105 +
62106 + return (long)atomic_read_unchecked(v);
62107 +}
62108 +#endif
62109 +
62110 static inline void atomic_long_set(atomic_long_t *l, long i)
62111 {
62112 atomic_t *v = (atomic_t *)l;
62113 @@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62114 atomic_set(v, i);
62115 }
62116
62117 +#ifdef CONFIG_PAX_REFCOUNT
62118 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62119 +{
62120 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62121 +
62122 + atomic_set_unchecked(v, i);
62123 +}
62124 +#endif
62125 +
62126 static inline void atomic_long_inc(atomic_long_t *l)
62127 {
62128 atomic_t *v = (atomic_t *)l;
62129 @@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62130 atomic_inc(v);
62131 }
62132
62133 +#ifdef CONFIG_PAX_REFCOUNT
62134 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62135 +{
62136 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62137 +
62138 + atomic_inc_unchecked(v);
62139 +}
62140 +#endif
62141 +
62142 static inline void atomic_long_dec(atomic_long_t *l)
62143 {
62144 atomic_t *v = (atomic_t *)l;
62145 @@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62146 atomic_dec(v);
62147 }
62148
62149 +#ifdef CONFIG_PAX_REFCOUNT
62150 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62151 +{
62152 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62153 +
62154 + atomic_dec_unchecked(v);
62155 +}
62156 +#endif
62157 +
62158 static inline void atomic_long_add(long i, atomic_long_t *l)
62159 {
62160 atomic_t *v = (atomic_t *)l;
62161 @@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62162 atomic_add(i, v);
62163 }
62164
62165 +#ifdef CONFIG_PAX_REFCOUNT
62166 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62167 +{
62168 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62169 +
62170 + atomic_add_unchecked(i, v);
62171 +}
62172 +#endif
62173 +
62174 static inline void atomic_long_sub(long i, atomic_long_t *l)
62175 {
62176 atomic_t *v = (atomic_t *)l;
62177 @@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62178 atomic_sub(i, v);
62179 }
62180
62181 +#ifdef CONFIG_PAX_REFCOUNT
62182 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62183 +{
62184 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62185 +
62186 + atomic_sub_unchecked(i, v);
62187 +}
62188 +#endif
62189 +
62190 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62191 {
62192 atomic_t *v = (atomic_t *)l;
62193 @@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62194 return (long)atomic_add_return(i, v);
62195 }
62196
62197 +#ifdef CONFIG_PAX_REFCOUNT
62198 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62199 +{
62200 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62201 +
62202 + return (long)atomic_add_return_unchecked(i, v);
62203 +}
62204 +
62205 +#endif
62206 +
62207 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62208 {
62209 atomic_t *v = (atomic_t *)l;
62210 @@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62211 return (long)atomic_inc_return(v);
62212 }
62213
62214 +#ifdef CONFIG_PAX_REFCOUNT
62215 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62216 +{
62217 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62218 +
62219 + return (long)atomic_inc_return_unchecked(v);
62220 +}
62221 +#endif
62222 +
62223 static inline long atomic_long_dec_return(atomic_long_t *l)
62224 {
62225 atomic_t *v = (atomic_t *)l;
62226 @@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62227
62228 #endif /* BITS_PER_LONG == 64 */
62229
62230 +#ifdef CONFIG_PAX_REFCOUNT
62231 +static inline void pax_refcount_needs_these_functions(void)
62232 +{
62233 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
62234 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
62235 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
62236 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
62237 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
62238 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
62239 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
62240 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
62241 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
62242 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
62243 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
62244 +#ifdef CONFIG_X86
62245 + atomic_clear_mask_unchecked(0, NULL);
62246 + atomic_set_mask_unchecked(0, NULL);
62247 +#endif
62248 +
62249 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
62250 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
62251 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
62252 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
62253 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
62254 + atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
62255 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
62256 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
62257 +}
62258 +#else
62259 +#define atomic_read_unchecked(v) atomic_read(v)
62260 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
62261 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
62262 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
62263 +#define atomic_inc_unchecked(v) atomic_inc(v)
62264 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
62265 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
62266 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
62267 +#define atomic_dec_unchecked(v) atomic_dec(v)
62268 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
62269 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
62270 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
62271 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
62272 +
62273 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
62274 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
62275 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
62276 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
62277 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
62278 +#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
62279 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
62280 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
62281 +#endif
62282 +
62283 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
62284 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
62285 index 1ced641..c896ee8 100644
62286 --- a/include/asm-generic/atomic.h
62287 +++ b/include/asm-generic/atomic.h
62288 @@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
62289 * Atomically clears the bits set in @mask from @v
62290 */
62291 #ifndef atomic_clear_mask
62292 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
62293 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
62294 {
62295 unsigned long flags;
62296
62297 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
62298 index b18ce4f..2ee2843 100644
62299 --- a/include/asm-generic/atomic64.h
62300 +++ b/include/asm-generic/atomic64.h
62301 @@ -16,6 +16,8 @@ typedef struct {
62302 long long counter;
62303 } atomic64_t;
62304
62305 +typedef atomic64_t atomic64_unchecked_t;
62306 +
62307 #define ATOMIC64_INIT(i) { (i) }
62308
62309 extern long long atomic64_read(const atomic64_t *v);
62310 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
62311 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
62312 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
62313
62314 +#define atomic64_read_unchecked(v) atomic64_read(v)
62315 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
62316 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
62317 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
62318 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
62319 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
62320 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
62321 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
62322 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
62323 +
62324 #endif /* _ASM_GENERIC_ATOMIC64_H */
62325 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
62326 index 1bfcfe5..e04c5c9 100644
62327 --- a/include/asm-generic/cache.h
62328 +++ b/include/asm-generic/cache.h
62329 @@ -6,7 +6,7 @@
62330 * cache lines need to provide their own cache.h.
62331 */
62332
62333 -#define L1_CACHE_SHIFT 5
62334 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
62335 +#define L1_CACHE_SHIFT 5UL
62336 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
62337
62338 #endif /* __ASM_GENERIC_CACHE_H */
62339 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
62340 index 0d68a1e..b74a761 100644
62341 --- a/include/asm-generic/emergency-restart.h
62342 +++ b/include/asm-generic/emergency-restart.h
62343 @@ -1,7 +1,7 @@
62344 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
62345 #define _ASM_GENERIC_EMERGENCY_RESTART_H
62346
62347 -static inline void machine_emergency_restart(void)
62348 +static inline __noreturn void machine_emergency_restart(void)
62349 {
62350 machine_restart(NULL);
62351 }
62352 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
62353 index 90f99c7..00ce236 100644
62354 --- a/include/asm-generic/kmap_types.h
62355 +++ b/include/asm-generic/kmap_types.h
62356 @@ -2,9 +2,9 @@
62357 #define _ASM_GENERIC_KMAP_TYPES_H
62358
62359 #ifdef __WITH_KM_FENCE
62360 -# define KM_TYPE_NR 41
62361 +# define KM_TYPE_NR 42
62362 #else
62363 -# define KM_TYPE_NR 20
62364 +# define KM_TYPE_NR 21
62365 #endif
62366
62367 #endif
62368 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
62369 index 9ceb03b..62b0b8f 100644
62370 --- a/include/asm-generic/local.h
62371 +++ b/include/asm-generic/local.h
62372 @@ -23,24 +23,37 @@ typedef struct
62373 atomic_long_t a;
62374 } local_t;
62375
62376 +typedef struct {
62377 + atomic_long_unchecked_t a;
62378 +} local_unchecked_t;
62379 +
62380 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
62381
62382 #define local_read(l) atomic_long_read(&(l)->a)
62383 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
62384 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
62385 +#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
62386 #define local_inc(l) atomic_long_inc(&(l)->a)
62387 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
62388 #define local_dec(l) atomic_long_dec(&(l)->a)
62389 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
62390 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
62391 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
62392 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
62393 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
62394
62395 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
62396 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
62397 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
62398 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
62399 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
62400 +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
62401 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
62402 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
62403 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
62404
62405 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62406 +#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62407 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
62408 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
62409 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
62410 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
62411 index 725612b..9cc513a 100644
62412 --- a/include/asm-generic/pgtable-nopmd.h
62413 +++ b/include/asm-generic/pgtable-nopmd.h
62414 @@ -1,14 +1,19 @@
62415 #ifndef _PGTABLE_NOPMD_H
62416 #define _PGTABLE_NOPMD_H
62417
62418 -#ifndef __ASSEMBLY__
62419 -
62420 #include <asm-generic/pgtable-nopud.h>
62421
62422 -struct mm_struct;
62423 -
62424 #define __PAGETABLE_PMD_FOLDED
62425
62426 +#define PMD_SHIFT PUD_SHIFT
62427 +#define PTRS_PER_PMD 1
62428 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
62429 +#define PMD_MASK (~(PMD_SIZE-1))
62430 +
62431 +#ifndef __ASSEMBLY__
62432 +
62433 +struct mm_struct;
62434 +
62435 /*
62436 * Having the pmd type consist of a pud gets the size right, and allows
62437 * us to conceptually access the pud entry that this pmd is folded into
62438 @@ -16,11 +21,6 @@ struct mm_struct;
62439 */
62440 typedef struct { pud_t pud; } pmd_t;
62441
62442 -#define PMD_SHIFT PUD_SHIFT
62443 -#define PTRS_PER_PMD 1
62444 -#define PMD_SIZE (1UL << PMD_SHIFT)
62445 -#define PMD_MASK (~(PMD_SIZE-1))
62446 -
62447 /*
62448 * The "pud_xxx()" functions here are trivial for a folded two-level
62449 * setup: the pmd is never bad, and a pmd always exists (as it's folded
62450 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
62451 index 810431d..0ec4804f 100644
62452 --- a/include/asm-generic/pgtable-nopud.h
62453 +++ b/include/asm-generic/pgtable-nopud.h
62454 @@ -1,10 +1,15 @@
62455 #ifndef _PGTABLE_NOPUD_H
62456 #define _PGTABLE_NOPUD_H
62457
62458 -#ifndef __ASSEMBLY__
62459 -
62460 #define __PAGETABLE_PUD_FOLDED
62461
62462 +#define PUD_SHIFT PGDIR_SHIFT
62463 +#define PTRS_PER_PUD 1
62464 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
62465 +#define PUD_MASK (~(PUD_SIZE-1))
62466 +
62467 +#ifndef __ASSEMBLY__
62468 +
62469 /*
62470 * Having the pud type consist of a pgd gets the size right, and allows
62471 * us to conceptually access the pgd entry that this pud is folded into
62472 @@ -12,11 +17,6 @@
62473 */
62474 typedef struct { pgd_t pgd; } pud_t;
62475
62476 -#define PUD_SHIFT PGDIR_SHIFT
62477 -#define PTRS_PER_PUD 1
62478 -#define PUD_SIZE (1UL << PUD_SHIFT)
62479 -#define PUD_MASK (~(PUD_SIZE-1))
62480 -
62481 /*
62482 * The "pgd_xxx()" functions here are trivial for a folded two-level
62483 * setup: the pud is never bad, and a pud always exists (as it's folded
62484 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
62485 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
62486
62487 #define pgd_populate(mm, pgd, pud) do { } while (0)
62488 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
62489 /*
62490 * (puds are folded into pgds so this doesn't get actually called,
62491 * but the define is needed for a generic inline function.)
62492 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
62493 index b36ce40..019426d 100644
62494 --- a/include/asm-generic/pgtable.h
62495 +++ b/include/asm-generic/pgtable.h
62496 @@ -554,6 +554,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
62497 #endif
62498 }
62499
62500 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
62501 +static inline unsigned long pax_open_kernel(void) { return 0; }
62502 +#endif
62503 +
62504 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
62505 +static inline unsigned long pax_close_kernel(void) { return 0; }
62506 +#endif
62507 +
62508 #endif /* CONFIG_MMU */
62509
62510 #endif /* !__ASSEMBLY__ */
62511 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
62512 index d1ea7ce..b1ebf2a 100644
62513 --- a/include/asm-generic/vmlinux.lds.h
62514 +++ b/include/asm-generic/vmlinux.lds.h
62515 @@ -218,6 +218,7 @@
62516 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
62517 VMLINUX_SYMBOL(__start_rodata) = .; \
62518 *(.rodata) *(.rodata.*) \
62519 + *(.data..read_only) \
62520 *(__vermagic) /* Kernel version magic */ \
62521 . = ALIGN(8); \
62522 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
62523 @@ -725,17 +726,18 @@
62524 * section in the linker script will go there too. @phdr should have
62525 * a leading colon.
62526 *
62527 - * Note that this macros defines __per_cpu_load as an absolute symbol.
62528 + * Note that this macros defines per_cpu_load as an absolute symbol.
62529 * If there is no need to put the percpu section at a predetermined
62530 * address, use PERCPU_SECTION.
62531 */
62532 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
62533 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
62534 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
62535 + per_cpu_load = .; \
62536 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
62537 - LOAD_OFFSET) { \
62538 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
62539 PERCPU_INPUT(cacheline) \
62540 } phdr \
62541 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
62542 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
62543
62544 /**
62545 * PERCPU_SECTION - define output section for percpu area, simple version
62546 diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
62547 index 418d270..bfd2794 100644
62548 --- a/include/crypto/algapi.h
62549 +++ b/include/crypto/algapi.h
62550 @@ -34,7 +34,7 @@ struct crypto_type {
62551 unsigned int maskclear;
62552 unsigned int maskset;
62553 unsigned int tfmsize;
62554 -};
62555 +} __do_const;
62556
62557 struct crypto_instance {
62558 struct crypto_alg alg;
62559 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
62560 index 3fd8280..2b3c415 100644
62561 --- a/include/drm/drmP.h
62562 +++ b/include/drm/drmP.h
62563 @@ -72,6 +72,7 @@
62564 #include <linux/workqueue.h>
62565 #include <linux/poll.h>
62566 #include <asm/pgalloc.h>
62567 +#include <asm/local.h>
62568 #include <drm/drm.h>
62569 #include <drm/drm_sarea.h>
62570
62571 @@ -1068,7 +1069,7 @@ struct drm_device {
62572
62573 /** \name Usage Counters */
62574 /*@{ */
62575 - int open_count; /**< Outstanding files open */
62576 + local_t open_count; /**< Outstanding files open */
62577 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
62578 atomic_t vma_count; /**< Outstanding vma areas open */
62579 int buf_use; /**< Buffers in use -- cannot alloc */
62580 @@ -1079,7 +1080,7 @@ struct drm_device {
62581 /*@{ */
62582 unsigned long counters;
62583 enum drm_stat_type types[15];
62584 - atomic_t counts[15];
62585 + atomic_unchecked_t counts[15];
62586 /*@} */
62587
62588 struct list_head filelist;
62589 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
62590 index e01cc80..319855d 100644
62591 --- a/include/drm/drm_crtc_helper.h
62592 +++ b/include/drm/drm_crtc_helper.h
62593 @@ -81,7 +81,7 @@ struct drm_crtc_helper_funcs {
62594
62595 /* disable crtc when not in use - more explicit than dpms off */
62596 void (*disable)(struct drm_crtc *crtc);
62597 -};
62598 +} __no_const;
62599
62600 /**
62601 * drm_encoder_helper_funcs - helper operations for encoders
62602 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
62603 struct drm_connector *connector);
62604 /* disable encoder when not in use - more explicit than dpms off */
62605 void (*disable)(struct drm_encoder *encoder);
62606 -};
62607 +} __no_const;
62608
62609 /**
62610 * drm_connector_helper_funcs - helper operations for connectors
62611 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
62612 index d6d1da4..fdd1ac5 100644
62613 --- a/include/drm/ttm/ttm_memory.h
62614 +++ b/include/drm/ttm/ttm_memory.h
62615 @@ -48,7 +48,7 @@
62616
62617 struct ttm_mem_shrink {
62618 int (*do_shrink) (struct ttm_mem_shrink *);
62619 -};
62620 +} __no_const;
62621
62622 /**
62623 * struct ttm_mem_global - Global memory accounting structure.
62624 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
62625 index 22ef21c..75904ba 100644
62626 --- a/include/linux/atmdev.h
62627 +++ b/include/linux/atmdev.h
62628 @@ -28,7 +28,7 @@ struct compat_atm_iobuf {
62629 #endif
62630
62631 struct k_atm_aal_stats {
62632 -#define __HANDLE_ITEM(i) atomic_t i
62633 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62634 __AAL_STAT_ITEMS
62635 #undef __HANDLE_ITEM
62636 };
62637 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
62638 index 5ffc6dd..e5a41ab 100644
62639 --- a/include/linux/backlight.h
62640 +++ b/include/linux/backlight.h
62641 @@ -98,7 +98,7 @@ struct backlight_device {
62642 const struct backlight_ops *ops;
62643
62644 /* The framebuffer notifier block */
62645 - struct notifier_block fb_notif;
62646 + notifier_block_no_const fb_notif;
62647
62648 struct device dev;
62649 };
62650 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
62651 index cfcc6bf..9a7c73e 100644
62652 --- a/include/linux/binfmts.h
62653 +++ b/include/linux/binfmts.h
62654 @@ -75,6 +75,7 @@ struct linux_binfmt {
62655 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
62656 int (*load_shlib)(struct file *);
62657 int (*core_dump)(struct coredump_params *cprm);
62658 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
62659 unsigned long min_coredump; /* minimal dump size */
62660 };
62661
62662 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
62663 index 1756001..ab117ec 100644
62664 --- a/include/linux/blkdev.h
62665 +++ b/include/linux/blkdev.h
62666 @@ -1478,7 +1478,7 @@ struct block_device_operations {
62667 /* this callback is with swap_lock and sometimes page table lock held */
62668 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
62669 struct module *owner;
62670 -};
62671 +} __do_const;
62672
62673 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
62674 unsigned long);
62675 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
62676 index 7c2e030..b72475d 100644
62677 --- a/include/linux/blktrace_api.h
62678 +++ b/include/linux/blktrace_api.h
62679 @@ -23,7 +23,7 @@ struct blk_trace {
62680 struct dentry *dir;
62681 struct dentry *dropped_file;
62682 struct dentry *msg_file;
62683 - atomic_t dropped;
62684 + atomic_unchecked_t dropped;
62685 };
62686
62687 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
62688 diff --git a/include/linux/cache.h b/include/linux/cache.h
62689 index 4c57065..4307975 100644
62690 --- a/include/linux/cache.h
62691 +++ b/include/linux/cache.h
62692 @@ -16,6 +16,10 @@
62693 #define __read_mostly
62694 #endif
62695
62696 +#ifndef __read_only
62697 +#define __read_only __read_mostly
62698 +#endif
62699 +
62700 #ifndef ____cacheline_aligned
62701 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
62702 #endif
62703 diff --git a/include/linux/capability.h b/include/linux/capability.h
62704 index 98503b7..cc36d18 100644
62705 --- a/include/linux/capability.h
62706 +++ b/include/linux/capability.h
62707 @@ -211,8 +211,13 @@ extern bool capable(int cap);
62708 extern bool ns_capable(struct user_namespace *ns, int cap);
62709 extern bool nsown_capable(int cap);
62710 extern bool inode_capable(const struct inode *inode, int cap);
62711 +extern bool capable_nolog(int cap);
62712 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
62713 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
62714
62715 /* audit system wants to get cap info from files as well */
62716 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
62717
62718 +extern int is_privileged_binary(const struct dentry *dentry);
62719 +
62720 #endif /* !_LINUX_CAPABILITY_H */
62721 diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
62722 index 8609d57..86e4d79 100644
62723 --- a/include/linux/cdrom.h
62724 +++ b/include/linux/cdrom.h
62725 @@ -87,7 +87,6 @@ struct cdrom_device_ops {
62726
62727 /* driver specifications */
62728 const int capability; /* capability flags */
62729 - int n_minors; /* number of active minor devices */
62730 /* handle uniform packets for scsi type devices (scsi,atapi) */
62731 int (*generic_packet) (struct cdrom_device_info *,
62732 struct packet_command *);
62733 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
62734 index 42e55de..1cd0e66 100644
62735 --- a/include/linux/cleancache.h
62736 +++ b/include/linux/cleancache.h
62737 @@ -31,7 +31,7 @@ struct cleancache_ops {
62738 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
62739 void (*invalidate_inode)(int, struct cleancache_filekey);
62740 void (*invalidate_fs)(int);
62741 -};
62742 +} __no_const;
62743
62744 extern struct cleancache_ops
62745 cleancache_register_ops(struct cleancache_ops *ops);
62746 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
62747 index f9f5e9e..9fb4d36 100644
62748 --- a/include/linux/clk-provider.h
62749 +++ b/include/linux/clk-provider.h
62750 @@ -112,6 +112,7 @@ struct clk_ops {
62751 unsigned long);
62752 void (*init)(struct clk_hw *hw);
62753 };
62754 +typedef struct clk_ops __no_const clk_ops_no_const;
62755
62756 /**
62757 * struct clk_init_data - holds init data that's common to all clocks and is
62758 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
62759 index 412bc6c..c31666e 100644
62760 --- a/include/linux/compiler-gcc4.h
62761 +++ b/include/linux/compiler-gcc4.h
62762 @@ -32,6 +32,21 @@
62763 #define __linktime_error(message) __attribute__((__error__(message)))
62764
62765 #if __GNUC_MINOR__ >= 5
62766 +
62767 +#ifdef CONSTIFY_PLUGIN
62768 +#define __no_const __attribute__((no_const))
62769 +#define __do_const __attribute__((do_const))
62770 +#endif
62771 +
62772 +#ifdef SIZE_OVERFLOW_PLUGIN
62773 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
62774 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
62775 +#endif
62776 +
62777 +#ifdef LATENT_ENTROPY_PLUGIN
62778 +#define __latent_entropy __attribute__((latent_entropy))
62779 +#endif
62780 +
62781 /*
62782 * Mark a position in code as unreachable. This can be used to
62783 * suppress control flow warnings after asm blocks that transfer
62784 @@ -47,6 +62,11 @@
62785 #define __noclone __attribute__((__noclone__))
62786
62787 #endif
62788 +
62789 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
62790 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
62791 +#define __bos0(ptr) __bos((ptr), 0)
62792 +#define __bos1(ptr) __bos((ptr), 1)
62793 #endif
62794
62795 #if __GNUC_MINOR__ >= 6
62796 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
62797 index f430e41..38be90f 100644
62798 --- a/include/linux/compiler.h
62799 +++ b/include/linux/compiler.h
62800 @@ -5,31 +5,62 @@
62801
62802 #ifdef __CHECKER__
62803 # define __user __attribute__((noderef, address_space(1)))
62804 +# define __force_user __force __user
62805 # define __kernel __attribute__((address_space(0)))
62806 +# define __force_kernel __force __kernel
62807 # define __safe __attribute__((safe))
62808 # define __force __attribute__((force))
62809 # define __nocast __attribute__((nocast))
62810 # define __iomem __attribute__((noderef, address_space(2)))
62811 +# define __force_iomem __force __iomem
62812 # define __acquires(x) __attribute__((context(x,0,1)))
62813 # define __releases(x) __attribute__((context(x,1,0)))
62814 # define __acquire(x) __context__(x,1)
62815 # define __release(x) __context__(x,-1)
62816 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
62817 # define __percpu __attribute__((noderef, address_space(3)))
62818 +# define __force_percpu __force __percpu
62819 #ifdef CONFIG_SPARSE_RCU_POINTER
62820 # define __rcu __attribute__((noderef, address_space(4)))
62821 +# define __force_rcu __force __rcu
62822 #else
62823 # define __rcu
62824 +# define __force_rcu
62825 #endif
62826 extern void __chk_user_ptr(const volatile void __user *);
62827 extern void __chk_io_ptr(const volatile void __iomem *);
62828 +#elif defined(CHECKER_PLUGIN)
62829 +//# define __user
62830 +//# define __force_user
62831 +//# define __kernel
62832 +//# define __force_kernel
62833 +# define __safe
62834 +# define __force
62835 +# define __nocast
62836 +# define __iomem
62837 +# define __force_iomem
62838 +# define __chk_user_ptr(x) (void)0
62839 +# define __chk_io_ptr(x) (void)0
62840 +# define __builtin_warning(x, y...) (1)
62841 +# define __acquires(x)
62842 +# define __releases(x)
62843 +# define __acquire(x) (void)0
62844 +# define __release(x) (void)0
62845 +# define __cond_lock(x,c) (c)
62846 +# define __percpu
62847 +# define __force_percpu
62848 +# define __rcu
62849 +# define __force_rcu
62850 #else
62851 # define __user
62852 +# define __force_user
62853 # define __kernel
62854 +# define __force_kernel
62855 # define __safe
62856 # define __force
62857 # define __nocast
62858 # define __iomem
62859 +# define __force_iomem
62860 # define __chk_user_ptr(x) (void)0
62861 # define __chk_io_ptr(x) (void)0
62862 # define __builtin_warning(x, y...) (1)
62863 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
62864 # define __release(x) (void)0
62865 # define __cond_lock(x,c) (c)
62866 # define __percpu
62867 +# define __force_percpu
62868 # define __rcu
62869 +# define __force_rcu
62870 #endif
62871
62872 #ifdef __KERNEL__
62873 @@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62874 # define __attribute_const__ /* unimplemented */
62875 #endif
62876
62877 +#ifndef __no_const
62878 +# define __no_const
62879 +#endif
62880 +
62881 +#ifndef __do_const
62882 +# define __do_const
62883 +#endif
62884 +
62885 +#ifndef __size_overflow
62886 +# define __size_overflow(...)
62887 +#endif
62888 +
62889 +#ifndef __intentional_overflow
62890 +# define __intentional_overflow(...)
62891 +#endif
62892 +
62893 +#ifndef __latent_entropy
62894 +# define __latent_entropy
62895 +#endif
62896 +
62897 /*
62898 * Tell gcc if a function is cold. The compiler will assume any path
62899 * directly leading to the call is unlikely.
62900 @@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62901 #define __cold
62902 #endif
62903
62904 +#ifndef __alloc_size
62905 +#define __alloc_size(...)
62906 +#endif
62907 +
62908 +#ifndef __bos
62909 +#define __bos(ptr, arg)
62910 +#endif
62911 +
62912 +#ifndef __bos0
62913 +#define __bos0(ptr)
62914 +#endif
62915 +
62916 +#ifndef __bos1
62917 +#define __bos1(ptr)
62918 +#endif
62919 +
62920 /* Simple shorthand for a section definition */
62921 #ifndef __section
62922 # define __section(S) __attribute__ ((__section__(#S)))
62923 @@ -312,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
62924 * use is to mediate communication between process-level code and irq/NMI
62925 * handlers, all running on the same CPU.
62926 */
62927 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
62928 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
62929 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
62930
62931 #endif /* __LINUX_COMPILER_H */
62932 diff --git a/include/linux/cred.h b/include/linux/cred.h
62933 index ebbed2c..908cc2c 100644
62934 --- a/include/linux/cred.h
62935 +++ b/include/linux/cred.h
62936 @@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
62937 static inline void validate_process_creds(void)
62938 {
62939 }
62940 +static inline void validate_task_creds(struct task_struct *task)
62941 +{
62942 +}
62943 #endif
62944
62945 /**
62946 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
62947 index b92eadf..b4ecdc1 100644
62948 --- a/include/linux/crypto.h
62949 +++ b/include/linux/crypto.h
62950 @@ -373,7 +373,7 @@ struct cipher_tfm {
62951 const u8 *key, unsigned int keylen);
62952 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
62953 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
62954 -};
62955 +} __no_const;
62956
62957 struct hash_tfm {
62958 int (*init)(struct hash_desc *desc);
62959 @@ -394,13 +394,13 @@ struct compress_tfm {
62960 int (*cot_decompress)(struct crypto_tfm *tfm,
62961 const u8 *src, unsigned int slen,
62962 u8 *dst, unsigned int *dlen);
62963 -};
62964 +} __no_const;
62965
62966 struct rng_tfm {
62967 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
62968 unsigned int dlen);
62969 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
62970 -};
62971 +} __no_const;
62972
62973 #define crt_ablkcipher crt_u.ablkcipher
62974 #define crt_aead crt_u.aead
62975 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
62976 index 7925bf0..d5143d2 100644
62977 --- a/include/linux/decompress/mm.h
62978 +++ b/include/linux/decompress/mm.h
62979 @@ -77,7 +77,7 @@ static void free(void *where)
62980 * warnings when not needed (indeed large_malloc / large_free are not
62981 * needed by inflate */
62982
62983 -#define malloc(a) kmalloc(a, GFP_KERNEL)
62984 +#define malloc(a) kmalloc((a), GFP_KERNEL)
62985 #define free(a) kfree(a)
62986
62987 #define large_malloc(a) vmalloc(a)
62988 diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
62989 index 281c72a..6438a5e 100644
62990 --- a/include/linux/devfreq.h
62991 +++ b/include/linux/devfreq.h
62992 @@ -152,7 +152,7 @@ struct devfreq {
62993 struct device dev;
62994 struct devfreq_dev_profile *profile;
62995 const struct devfreq_governor *governor;
62996 - struct notifier_block nb;
62997 + notifier_block_no_const nb;
62998
62999 unsigned long polling_jiffies;
63000 unsigned long previous_freq;
63001 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
63002 index 94af418..b1ca7a2 100644
63003 --- a/include/linux/dma-mapping.h
63004 +++ b/include/linux/dma-mapping.h
63005 @@ -54,7 +54,7 @@ struct dma_map_ops {
63006 u64 (*get_required_mask)(struct device *dev);
63007 #endif
63008 int is_phys;
63009 -};
63010 +} __do_const;
63011
63012 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
63013
63014 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
63015 index d3201e4..8281e63 100644
63016 --- a/include/linux/dmaengine.h
63017 +++ b/include/linux/dmaengine.h
63018 @@ -1018,9 +1018,9 @@ struct dma_pinned_list {
63019 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
63020 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
63021
63022 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63023 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63024 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
63025 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63026 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63027 struct dma_pinned_list *pinned_list, struct page *page,
63028 unsigned int offset, size_t len);
63029
63030 diff --git a/include/linux/efi.h b/include/linux/efi.h
63031 index 8670eb1..7eb3ade 100644
63032 --- a/include/linux/efi.h
63033 +++ b/include/linux/efi.h
63034 @@ -643,7 +643,7 @@ struct efivar_operations {
63035 efi_get_variable_t *get_variable;
63036 efi_get_next_variable_t *get_next_variable;
63037 efi_set_variable_t *set_variable;
63038 -};
63039 +} __no_const;
63040
63041 struct efivars {
63042 /*
63043 diff --git a/include/linux/elf.h b/include/linux/elf.h
63044 index 8c9048e..16a4665 100644
63045 --- a/include/linux/elf.h
63046 +++ b/include/linux/elf.h
63047 @@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
63048 #define elf_note elf32_note
63049 #define elf_addr_t Elf32_Off
63050 #define Elf_Half Elf32_Half
63051 +#define elf_dyn Elf32_Dyn
63052
63053 #else
63054
63055 @@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
63056 #define elf_note elf64_note
63057 #define elf_addr_t Elf64_Off
63058 #define Elf_Half Elf64_Half
63059 +#define elf_dyn Elf64_Dyn
63060
63061 #endif
63062
63063 diff --git a/include/linux/extcon.h b/include/linux/extcon.h
63064 index 2c26c14..8d0cb0f 100644
63065 --- a/include/linux/extcon.h
63066 +++ b/include/linux/extcon.h
63067 @@ -165,7 +165,7 @@ struct extcon_cable {
63068 * @previous_value the saved previous event value.
63069 */
63070 struct extcon_specific_cable_nb {
63071 - struct notifier_block internal_nb;
63072 + notifier_block_no_const internal_nb;
63073 struct notifier_block *user_nb;
63074 int cable_index;
63075 struct extcon_dev *edev;
63076 diff --git a/include/linux/filter.h b/include/linux/filter.h
63077 index 24d251f..7afb83d 100644
63078 --- a/include/linux/filter.h
63079 +++ b/include/linux/filter.h
63080 @@ -20,6 +20,7 @@ struct compat_sock_fprog {
63081
63082 struct sk_buff;
63083 struct sock;
63084 +struct bpf_jit_work;
63085
63086 struct sk_filter
63087 {
63088 @@ -27,6 +28,9 @@ struct sk_filter
63089 unsigned int len; /* Number of filter blocks */
63090 unsigned int (*bpf_func)(const struct sk_buff *skb,
63091 const struct sock_filter *filter);
63092 +#ifdef CONFIG_BPF_JIT
63093 + struct bpf_jit_work *work;
63094 +#endif
63095 struct rcu_head rcu;
63096 struct sock_filter insns[0];
63097 };
63098 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
63099 index 191501a..3952e37 100644
63100 --- a/include/linux/firewire.h
63101 +++ b/include/linux/firewire.h
63102 @@ -438,7 +438,7 @@ struct fw_iso_context {
63103 union {
63104 fw_iso_callback_t sc;
63105 fw_iso_mc_callback_t mc;
63106 - } callback;
63107 + } __no_const callback;
63108 void *callback_data;
63109 };
63110
63111 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
63112 index 3044254..9767f41 100644
63113 --- a/include/linux/frontswap.h
63114 +++ b/include/linux/frontswap.h
63115 @@ -11,7 +11,7 @@ struct frontswap_ops {
63116 int (*load)(unsigned, pgoff_t, struct page *);
63117 void (*invalidate_page)(unsigned, pgoff_t);
63118 void (*invalidate_area)(unsigned);
63119 -};
63120 +} __no_const;
63121
63122 extern bool frontswap_enabled;
63123 extern struct frontswap_ops
63124 diff --git a/include/linux/fs.h b/include/linux/fs.h
63125 index 75fe9a1..72a4a6b 100644
63126 --- a/include/linux/fs.h
63127 +++ b/include/linux/fs.h
63128 @@ -1543,7 +1543,8 @@ struct file_operations {
63129 int (*setlease)(struct file *, long, struct file_lock **);
63130 long (*fallocate)(struct file *file, int mode, loff_t offset,
63131 loff_t len);
63132 -};
63133 +} __do_const;
63134 +typedef struct file_operations __no_const file_operations_no_const;
63135
63136 struct inode_operations {
63137 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
63138 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
63139 index 003dc0f..3c4ea97 100644
63140 --- a/include/linux/fs_struct.h
63141 +++ b/include/linux/fs_struct.h
63142 @@ -6,7 +6,7 @@
63143 #include <linux/seqlock.h>
63144
63145 struct fs_struct {
63146 - int users;
63147 + atomic_t users;
63148 spinlock_t lock;
63149 seqcount_t seq;
63150 int umask;
63151 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
63152 index ce31408..b1ad003 100644
63153 --- a/include/linux/fscache-cache.h
63154 +++ b/include/linux/fscache-cache.h
63155 @@ -102,7 +102,7 @@ struct fscache_operation {
63156 fscache_operation_release_t release;
63157 };
63158
63159 -extern atomic_t fscache_op_debug_id;
63160 +extern atomic_unchecked_t fscache_op_debug_id;
63161 extern void fscache_op_work_func(struct work_struct *work);
63162
63163 extern void fscache_enqueue_operation(struct fscache_operation *);
63164 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
63165 {
63166 INIT_WORK(&op->work, fscache_op_work_func);
63167 atomic_set(&op->usage, 1);
63168 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63169 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63170 op->processor = processor;
63171 op->release = release;
63172 INIT_LIST_HEAD(&op->pend_link);
63173 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
63174 index 0fbfb46..52a6556 100644
63175 --- a/include/linux/fsnotify.h
63176 +++ b/include/linux/fsnotify.h
63177 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
63178 */
63179 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
63180 {
63181 - return kstrdup(name, GFP_KERNEL);
63182 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
63183 }
63184
63185 /*
63186 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
63187 index 63d966d..cdcb717 100644
63188 --- a/include/linux/fsnotify_backend.h
63189 +++ b/include/linux/fsnotify_backend.h
63190 @@ -105,6 +105,7 @@ struct fsnotify_ops {
63191 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
63192 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
63193 };
63194 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
63195
63196 /*
63197 * A group is a "thing" that wants to receive notification about filesystem
63198 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
63199 index 642928c..e6c83a7 100644
63200 --- a/include/linux/ftrace_event.h
63201 +++ b/include/linux/ftrace_event.h
63202 @@ -97,7 +97,7 @@ struct trace_event_functions {
63203 trace_print_func raw;
63204 trace_print_func hex;
63205 trace_print_func binary;
63206 -};
63207 +} __no_const;
63208
63209 struct trace_event {
63210 struct hlist_node node;
63211 @@ -266,7 +266,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
63212 extern int trace_add_event_call(struct ftrace_event_call *call);
63213 extern void trace_remove_event_call(struct ftrace_event_call *call);
63214
63215 -#define is_signed_type(type) (((type)(-1)) < 0)
63216 +#define is_signed_type(type) (((type)(-1)) < (type)1)
63217
63218 int trace_set_clr_event(const char *system, const char *event, int set);
63219
63220 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
63221 index 4f440b3..342233a 100644
63222 --- a/include/linux/genhd.h
63223 +++ b/include/linux/genhd.h
63224 @@ -190,7 +190,7 @@ struct gendisk {
63225 struct kobject *slave_dir;
63226
63227 struct timer_rand_state *random;
63228 - atomic_t sync_io; /* RAID */
63229 + atomic_unchecked_t sync_io; /* RAID */
63230 struct disk_events *ev;
63231 #ifdef CONFIG_BLK_DEV_INTEGRITY
63232 struct blk_integrity *integrity;
63233 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
63234 index d0a7967..63c4c47 100644
63235 --- a/include/linux/gfp.h
63236 +++ b/include/linux/gfp.h
63237 @@ -35,6 +35,12 @@ struct vm_area_struct;
63238 #define ___GFP_OTHER_NODE 0x800000u
63239 #define ___GFP_WRITE 0x1000000u
63240
63241 +#ifdef CONFIG_PAX_USERCOPY_SLABS
63242 +#define ___GFP_USERCOPY 0x2000000u
63243 +#else
63244 +#define ___GFP_USERCOPY 0
63245 +#endif
63246 +
63247 /*
63248 * GFP bitmasks..
63249 *
63250 @@ -89,6 +95,7 @@ struct vm_area_struct;
63251 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
63252 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
63253 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
63254 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
63255
63256 /*
63257 * This may seem redundant, but it's a way of annotating false positives vs.
63258 @@ -96,7 +103,7 @@ struct vm_area_struct;
63259 */
63260 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
63261
63262 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
63263 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
63264 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
63265
63266 /* This equals 0, but use constants in case they ever change */
63267 @@ -150,6 +157,8 @@ struct vm_area_struct;
63268 /* 4GB DMA on some platforms */
63269 #define GFP_DMA32 __GFP_DMA32
63270
63271 +#define GFP_USERCOPY __GFP_USERCOPY
63272 +
63273 /* Convert GFP flags to their corresponding migrate type */
63274 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
63275 {
63276 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
63277 new file mode 100644
63278 index 0000000..ebe6d72
63279 --- /dev/null
63280 +++ b/include/linux/gracl.h
63281 @@ -0,0 +1,319 @@
63282 +#ifndef GR_ACL_H
63283 +#define GR_ACL_H
63284 +
63285 +#include <linux/grdefs.h>
63286 +#include <linux/resource.h>
63287 +#include <linux/capability.h>
63288 +#include <linux/dcache.h>
63289 +#include <asm/resource.h>
63290 +
63291 +/* Major status information */
63292 +
63293 +#define GR_VERSION "grsecurity 2.9.1"
63294 +#define GRSECURITY_VERSION 0x2901
63295 +
63296 +enum {
63297 + GR_SHUTDOWN = 0,
63298 + GR_ENABLE = 1,
63299 + GR_SPROLE = 2,
63300 + GR_RELOAD = 3,
63301 + GR_SEGVMOD = 4,
63302 + GR_STATUS = 5,
63303 + GR_UNSPROLE = 6,
63304 + GR_PASSSET = 7,
63305 + GR_SPROLEPAM = 8,
63306 +};
63307 +
63308 +/* Password setup definitions
63309 + * kernel/grhash.c */
63310 +enum {
63311 + GR_PW_LEN = 128,
63312 + GR_SALT_LEN = 16,
63313 + GR_SHA_LEN = 32,
63314 +};
63315 +
63316 +enum {
63317 + GR_SPROLE_LEN = 64,
63318 +};
63319 +
63320 +enum {
63321 + GR_NO_GLOB = 0,
63322 + GR_REG_GLOB,
63323 + GR_CREATE_GLOB
63324 +};
63325 +
63326 +#define GR_NLIMITS 32
63327 +
63328 +/* Begin Data Structures */
63329 +
63330 +struct sprole_pw {
63331 + unsigned char *rolename;
63332 + unsigned char salt[GR_SALT_LEN];
63333 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
63334 +};
63335 +
63336 +struct name_entry {
63337 + __u32 key;
63338 + ino_t inode;
63339 + dev_t device;
63340 + char *name;
63341 + __u16 len;
63342 + __u8 deleted;
63343 + struct name_entry *prev;
63344 + struct name_entry *next;
63345 +};
63346 +
63347 +struct inodev_entry {
63348 + struct name_entry *nentry;
63349 + struct inodev_entry *prev;
63350 + struct inodev_entry *next;
63351 +};
63352 +
63353 +struct acl_role_db {
63354 + struct acl_role_label **r_hash;
63355 + __u32 r_size;
63356 +};
63357 +
63358 +struct inodev_db {
63359 + struct inodev_entry **i_hash;
63360 + __u32 i_size;
63361 +};
63362 +
63363 +struct name_db {
63364 + struct name_entry **n_hash;
63365 + __u32 n_size;
63366 +};
63367 +
63368 +struct crash_uid {
63369 + uid_t uid;
63370 + unsigned long expires;
63371 +};
63372 +
63373 +struct gr_hash_struct {
63374 + void **table;
63375 + void **nametable;
63376 + void *first;
63377 + __u32 table_size;
63378 + __u32 used_size;
63379 + int type;
63380 +};
63381 +
63382 +/* Userspace Grsecurity ACL data structures */
63383 +
63384 +struct acl_subject_label {
63385 + char *filename;
63386 + ino_t inode;
63387 + dev_t device;
63388 + __u32 mode;
63389 + kernel_cap_t cap_mask;
63390 + kernel_cap_t cap_lower;
63391 + kernel_cap_t cap_invert_audit;
63392 +
63393 + struct rlimit res[GR_NLIMITS];
63394 + __u32 resmask;
63395 +
63396 + __u8 user_trans_type;
63397 + __u8 group_trans_type;
63398 + uid_t *user_transitions;
63399 + gid_t *group_transitions;
63400 + __u16 user_trans_num;
63401 + __u16 group_trans_num;
63402 +
63403 + __u32 sock_families[2];
63404 + __u32 ip_proto[8];
63405 + __u32 ip_type;
63406 + struct acl_ip_label **ips;
63407 + __u32 ip_num;
63408 + __u32 inaddr_any_override;
63409 +
63410 + __u32 crashes;
63411 + unsigned long expires;
63412 +
63413 + struct acl_subject_label *parent_subject;
63414 + struct gr_hash_struct *hash;
63415 + struct acl_subject_label *prev;
63416 + struct acl_subject_label *next;
63417 +
63418 + struct acl_object_label **obj_hash;
63419 + __u32 obj_hash_size;
63420 + __u16 pax_flags;
63421 +};
63422 +
63423 +struct role_allowed_ip {
63424 + __u32 addr;
63425 + __u32 netmask;
63426 +
63427 + struct role_allowed_ip *prev;
63428 + struct role_allowed_ip *next;
63429 +};
63430 +
63431 +struct role_transition {
63432 + char *rolename;
63433 +
63434 + struct role_transition *prev;
63435 + struct role_transition *next;
63436 +};
63437 +
63438 +struct acl_role_label {
63439 + char *rolename;
63440 + uid_t uidgid;
63441 + __u16 roletype;
63442 +
63443 + __u16 auth_attempts;
63444 + unsigned long expires;
63445 +
63446 + struct acl_subject_label *root_label;
63447 + struct gr_hash_struct *hash;
63448 +
63449 + struct acl_role_label *prev;
63450 + struct acl_role_label *next;
63451 +
63452 + struct role_transition *transitions;
63453 + struct role_allowed_ip *allowed_ips;
63454 + uid_t *domain_children;
63455 + __u16 domain_child_num;
63456 +
63457 + umode_t umask;
63458 +
63459 + struct acl_subject_label **subj_hash;
63460 + __u32 subj_hash_size;
63461 +};
63462 +
63463 +struct user_acl_role_db {
63464 + struct acl_role_label **r_table;
63465 + __u32 num_pointers; /* Number of allocations to track */
63466 + __u32 num_roles; /* Number of roles */
63467 + __u32 num_domain_children; /* Number of domain children */
63468 + __u32 num_subjects; /* Number of subjects */
63469 + __u32 num_objects; /* Number of objects */
63470 +};
63471 +
63472 +struct acl_object_label {
63473 + char *filename;
63474 + ino_t inode;
63475 + dev_t device;
63476 + __u32 mode;
63477 +
63478 + struct acl_subject_label *nested;
63479 + struct acl_object_label *globbed;
63480 +
63481 + /* next two structures not used */
63482 +
63483 + struct acl_object_label *prev;
63484 + struct acl_object_label *next;
63485 +};
63486 +
63487 +struct acl_ip_label {
63488 + char *iface;
63489 + __u32 addr;
63490 + __u32 netmask;
63491 + __u16 low, high;
63492 + __u8 mode;
63493 + __u32 type;
63494 + __u32 proto[8];
63495 +
63496 + /* next two structures not used */
63497 +
63498 + struct acl_ip_label *prev;
63499 + struct acl_ip_label *next;
63500 +};
63501 +
63502 +struct gr_arg {
63503 + struct user_acl_role_db role_db;
63504 + unsigned char pw[GR_PW_LEN];
63505 + unsigned char salt[GR_SALT_LEN];
63506 + unsigned char sum[GR_SHA_LEN];
63507 + unsigned char sp_role[GR_SPROLE_LEN];
63508 + struct sprole_pw *sprole_pws;
63509 + dev_t segv_device;
63510 + ino_t segv_inode;
63511 + uid_t segv_uid;
63512 + __u16 num_sprole_pws;
63513 + __u16 mode;
63514 +};
63515 +
63516 +struct gr_arg_wrapper {
63517 + struct gr_arg *arg;
63518 + __u32 version;
63519 + __u32 size;
63520 +};
63521 +
63522 +struct subject_map {
63523 + struct acl_subject_label *user;
63524 + struct acl_subject_label *kernel;
63525 + struct subject_map *prev;
63526 + struct subject_map *next;
63527 +};
63528 +
63529 +struct acl_subj_map_db {
63530 + struct subject_map **s_hash;
63531 + __u32 s_size;
63532 +};
63533 +
63534 +/* End Data Structures Section */
63535 +
63536 +/* Hash functions generated by empirical testing by Brad Spengler
63537 + Makes good use of the low bits of the inode. Generally 0-1 times
63538 + in loop for successful match. 0-3 for unsuccessful match.
63539 + Shift/add algorithm with modulus of table size and an XOR*/
63540 +
63541 +static __inline__ unsigned int
63542 +gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
63543 +{
63544 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
63545 +}
63546 +
63547 + static __inline__ unsigned int
63548 +gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
63549 +{
63550 + return ((const unsigned long)userp % sz);
63551 +}
63552 +
63553 +static __inline__ unsigned int
63554 +gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
63555 +{
63556 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
63557 +}
63558 +
63559 +static __inline__ unsigned int
63560 +gr_nhash(const char *name, const __u16 len, const unsigned int sz)
63561 +{
63562 + return full_name_hash((const unsigned char *)name, len) % sz;
63563 +}
63564 +
63565 +#define FOR_EACH_ROLE_START(role) \
63566 + role = role_list; \
63567 + while (role) {
63568 +
63569 +#define FOR_EACH_ROLE_END(role) \
63570 + role = role->prev; \
63571 + }
63572 +
63573 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
63574 + subj = NULL; \
63575 + iter = 0; \
63576 + while (iter < role->subj_hash_size) { \
63577 + if (subj == NULL) \
63578 + subj = role->subj_hash[iter]; \
63579 + if (subj == NULL) { \
63580 + iter++; \
63581 + continue; \
63582 + }
63583 +
63584 +#define FOR_EACH_SUBJECT_END(subj,iter) \
63585 + subj = subj->next; \
63586 + if (subj == NULL) \
63587 + iter++; \
63588 + }
63589 +
63590 +
63591 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
63592 + subj = role->hash->first; \
63593 + while (subj != NULL) {
63594 +
63595 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
63596 + subj = subj->next; \
63597 + }
63598 +
63599 +#endif
63600 +
63601 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
63602 new file mode 100644
63603 index 0000000..323ecf2
63604 --- /dev/null
63605 +++ b/include/linux/gralloc.h
63606 @@ -0,0 +1,9 @@
63607 +#ifndef __GRALLOC_H
63608 +#define __GRALLOC_H
63609 +
63610 +void acl_free_all(void);
63611 +int acl_alloc_stack_init(unsigned long size);
63612 +void *acl_alloc(unsigned long len);
63613 +void *acl_alloc_num(unsigned long num, unsigned long len);
63614 +
63615 +#endif
63616 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
63617 new file mode 100644
63618 index 0000000..b30e9bc
63619 --- /dev/null
63620 +++ b/include/linux/grdefs.h
63621 @@ -0,0 +1,140 @@
63622 +#ifndef GRDEFS_H
63623 +#define GRDEFS_H
63624 +
63625 +/* Begin grsecurity status declarations */
63626 +
63627 +enum {
63628 + GR_READY = 0x01,
63629 + GR_STATUS_INIT = 0x00 // disabled state
63630 +};
63631 +
63632 +/* Begin ACL declarations */
63633 +
63634 +/* Role flags */
63635 +
63636 +enum {
63637 + GR_ROLE_USER = 0x0001,
63638 + GR_ROLE_GROUP = 0x0002,
63639 + GR_ROLE_DEFAULT = 0x0004,
63640 + GR_ROLE_SPECIAL = 0x0008,
63641 + GR_ROLE_AUTH = 0x0010,
63642 + GR_ROLE_NOPW = 0x0020,
63643 + GR_ROLE_GOD = 0x0040,
63644 + GR_ROLE_LEARN = 0x0080,
63645 + GR_ROLE_TPE = 0x0100,
63646 + GR_ROLE_DOMAIN = 0x0200,
63647 + GR_ROLE_PAM = 0x0400,
63648 + GR_ROLE_PERSIST = 0x0800
63649 +};
63650 +
63651 +/* ACL Subject and Object mode flags */
63652 +enum {
63653 + GR_DELETED = 0x80000000
63654 +};
63655 +
63656 +/* ACL Object-only mode flags */
63657 +enum {
63658 + GR_READ = 0x00000001,
63659 + GR_APPEND = 0x00000002,
63660 + GR_WRITE = 0x00000004,
63661 + GR_EXEC = 0x00000008,
63662 + GR_FIND = 0x00000010,
63663 + GR_INHERIT = 0x00000020,
63664 + GR_SETID = 0x00000040,
63665 + GR_CREATE = 0x00000080,
63666 + GR_DELETE = 0x00000100,
63667 + GR_LINK = 0x00000200,
63668 + GR_AUDIT_READ = 0x00000400,
63669 + GR_AUDIT_APPEND = 0x00000800,
63670 + GR_AUDIT_WRITE = 0x00001000,
63671 + GR_AUDIT_EXEC = 0x00002000,
63672 + GR_AUDIT_FIND = 0x00004000,
63673 + GR_AUDIT_INHERIT= 0x00008000,
63674 + GR_AUDIT_SETID = 0x00010000,
63675 + GR_AUDIT_CREATE = 0x00020000,
63676 + GR_AUDIT_DELETE = 0x00040000,
63677 + GR_AUDIT_LINK = 0x00080000,
63678 + GR_PTRACERD = 0x00100000,
63679 + GR_NOPTRACE = 0x00200000,
63680 + GR_SUPPRESS = 0x00400000,
63681 + GR_NOLEARN = 0x00800000,
63682 + GR_INIT_TRANSFER= 0x01000000
63683 +};
63684 +
63685 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
63686 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
63687 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
63688 +
63689 +/* ACL subject-only mode flags */
63690 +enum {
63691 + GR_KILL = 0x00000001,
63692 + GR_VIEW = 0x00000002,
63693 + GR_PROTECTED = 0x00000004,
63694 + GR_LEARN = 0x00000008,
63695 + GR_OVERRIDE = 0x00000010,
63696 + /* just a placeholder, this mode is only used in userspace */
63697 + GR_DUMMY = 0x00000020,
63698 + GR_PROTSHM = 0x00000040,
63699 + GR_KILLPROC = 0x00000080,
63700 + GR_KILLIPPROC = 0x00000100,
63701 + /* just a placeholder, this mode is only used in userspace */
63702 + GR_NOTROJAN = 0x00000200,
63703 + GR_PROTPROCFD = 0x00000400,
63704 + GR_PROCACCT = 0x00000800,
63705 + GR_RELAXPTRACE = 0x00001000,
63706 + GR_NESTED = 0x00002000,
63707 + GR_INHERITLEARN = 0x00004000,
63708 + GR_PROCFIND = 0x00008000,
63709 + GR_POVERRIDE = 0x00010000,
63710 + GR_KERNELAUTH = 0x00020000,
63711 + GR_ATSECURE = 0x00040000,
63712 + GR_SHMEXEC = 0x00080000
63713 +};
63714 +
63715 +enum {
63716 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
63717 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
63718 + GR_PAX_ENABLE_MPROTECT = 0x0004,
63719 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
63720 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
63721 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
63722 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
63723 + GR_PAX_DISABLE_MPROTECT = 0x0400,
63724 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
63725 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
63726 +};
63727 +
63728 +enum {
63729 + GR_ID_USER = 0x01,
63730 + GR_ID_GROUP = 0x02,
63731 +};
63732 +
63733 +enum {
63734 + GR_ID_ALLOW = 0x01,
63735 + GR_ID_DENY = 0x02,
63736 +};
63737 +
63738 +#define GR_CRASH_RES 31
63739 +#define GR_UIDTABLE_MAX 500
63740 +
63741 +/* begin resource learning section */
63742 +enum {
63743 + GR_RLIM_CPU_BUMP = 60,
63744 + GR_RLIM_FSIZE_BUMP = 50000,
63745 + GR_RLIM_DATA_BUMP = 10000,
63746 + GR_RLIM_STACK_BUMP = 1000,
63747 + GR_RLIM_CORE_BUMP = 10000,
63748 + GR_RLIM_RSS_BUMP = 500000,
63749 + GR_RLIM_NPROC_BUMP = 1,
63750 + GR_RLIM_NOFILE_BUMP = 5,
63751 + GR_RLIM_MEMLOCK_BUMP = 50000,
63752 + GR_RLIM_AS_BUMP = 500000,
63753 + GR_RLIM_LOCKS_BUMP = 2,
63754 + GR_RLIM_SIGPENDING_BUMP = 5,
63755 + GR_RLIM_MSGQUEUE_BUMP = 10000,
63756 + GR_RLIM_NICE_BUMP = 1,
63757 + GR_RLIM_RTPRIO_BUMP = 1,
63758 + GR_RLIM_RTTIME_BUMP = 1000000
63759 +};
63760 +
63761 +#endif
63762 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
63763 new file mode 100644
63764 index 0000000..c9292f7
63765 --- /dev/null
63766 +++ b/include/linux/grinternal.h
63767 @@ -0,0 +1,223 @@
63768 +#ifndef __GRINTERNAL_H
63769 +#define __GRINTERNAL_H
63770 +
63771 +#ifdef CONFIG_GRKERNSEC
63772 +
63773 +#include <linux/fs.h>
63774 +#include <linux/mnt_namespace.h>
63775 +#include <linux/nsproxy.h>
63776 +#include <linux/gracl.h>
63777 +#include <linux/grdefs.h>
63778 +#include <linux/grmsg.h>
63779 +
63780 +void gr_add_learn_entry(const char *fmt, ...)
63781 + __attribute__ ((format (printf, 1, 2)));
63782 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
63783 + const struct vfsmount *mnt);
63784 +__u32 gr_check_create(const struct dentry *new_dentry,
63785 + const struct dentry *parent,
63786 + const struct vfsmount *mnt, const __u32 mode);
63787 +int gr_check_protected_task(const struct task_struct *task);
63788 +__u32 to_gr_audit(const __u32 reqmode);
63789 +int gr_set_acls(const int type);
63790 +int gr_apply_subject_to_task(struct task_struct *task);
63791 +int gr_acl_is_enabled(void);
63792 +char gr_roletype_to_char(void);
63793 +
63794 +void gr_handle_alertkill(struct task_struct *task);
63795 +char *gr_to_filename(const struct dentry *dentry,
63796 + const struct vfsmount *mnt);
63797 +char *gr_to_filename1(const struct dentry *dentry,
63798 + const struct vfsmount *mnt);
63799 +char *gr_to_filename2(const struct dentry *dentry,
63800 + const struct vfsmount *mnt);
63801 +char *gr_to_filename3(const struct dentry *dentry,
63802 + const struct vfsmount *mnt);
63803 +
63804 +extern int grsec_enable_ptrace_readexec;
63805 +extern int grsec_enable_harden_ptrace;
63806 +extern int grsec_enable_link;
63807 +extern int grsec_enable_fifo;
63808 +extern int grsec_enable_execve;
63809 +extern int grsec_enable_shm;
63810 +extern int grsec_enable_execlog;
63811 +extern int grsec_enable_signal;
63812 +extern int grsec_enable_audit_ptrace;
63813 +extern int grsec_enable_forkfail;
63814 +extern int grsec_enable_time;
63815 +extern int grsec_enable_rofs;
63816 +extern int grsec_enable_chroot_shmat;
63817 +extern int grsec_enable_chroot_mount;
63818 +extern int grsec_enable_chroot_double;
63819 +extern int grsec_enable_chroot_pivot;
63820 +extern int grsec_enable_chroot_chdir;
63821 +extern int grsec_enable_chroot_chmod;
63822 +extern int grsec_enable_chroot_mknod;
63823 +extern int grsec_enable_chroot_fchdir;
63824 +extern int grsec_enable_chroot_nice;
63825 +extern int grsec_enable_chroot_execlog;
63826 +extern int grsec_enable_chroot_caps;
63827 +extern int grsec_enable_chroot_sysctl;
63828 +extern int grsec_enable_chroot_unix;
63829 +extern int grsec_enable_symlinkown;
63830 +extern int grsec_symlinkown_gid;
63831 +extern int grsec_enable_tpe;
63832 +extern int grsec_tpe_gid;
63833 +extern int grsec_enable_tpe_all;
63834 +extern int grsec_enable_tpe_invert;
63835 +extern int grsec_enable_socket_all;
63836 +extern int grsec_socket_all_gid;
63837 +extern int grsec_enable_socket_client;
63838 +extern int grsec_socket_client_gid;
63839 +extern int grsec_enable_socket_server;
63840 +extern int grsec_socket_server_gid;
63841 +extern int grsec_audit_gid;
63842 +extern int grsec_enable_group;
63843 +extern int grsec_enable_audit_textrel;
63844 +extern int grsec_enable_log_rwxmaps;
63845 +extern int grsec_enable_mount;
63846 +extern int grsec_enable_chdir;
63847 +extern int grsec_resource_logging;
63848 +extern int grsec_enable_blackhole;
63849 +extern int grsec_lastack_retries;
63850 +extern int grsec_enable_brute;
63851 +extern int grsec_lock;
63852 +
63853 +extern spinlock_t grsec_alert_lock;
63854 +extern unsigned long grsec_alert_wtime;
63855 +extern unsigned long grsec_alert_fyet;
63856 +
63857 +extern spinlock_t grsec_audit_lock;
63858 +
63859 +extern rwlock_t grsec_exec_file_lock;
63860 +
63861 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
63862 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
63863 + (tsk)->exec_file->f_vfsmnt) : "/")
63864 +
63865 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
63866 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
63867 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
63868 +
63869 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
63870 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
63871 + (tsk)->exec_file->f_vfsmnt) : "/")
63872 +
63873 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
63874 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
63875 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
63876 +
63877 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
63878 +
63879 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
63880 +
63881 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
63882 + (task)->pid, (cred)->uid, \
63883 + (cred)->euid, (cred)->gid, (cred)->egid, \
63884 + gr_parent_task_fullpath(task), \
63885 + (task)->real_parent->comm, (task)->real_parent->pid, \
63886 + (pcred)->uid, (pcred)->euid, \
63887 + (pcred)->gid, (pcred)->egid
63888 +
63889 +#define GR_CHROOT_CAPS {{ \
63890 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
63891 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
63892 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
63893 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
63894 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
63895 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
63896 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
63897 +
63898 +#define security_learn(normal_msg,args...) \
63899 +({ \
63900 + read_lock(&grsec_exec_file_lock); \
63901 + gr_add_learn_entry(normal_msg "\n", ## args); \
63902 + read_unlock(&grsec_exec_file_lock); \
63903 +})
63904 +
63905 +enum {
63906 + GR_DO_AUDIT,
63907 + GR_DONT_AUDIT,
63908 + /* used for non-audit messages that we shouldn't kill the task on */
63909 + GR_DONT_AUDIT_GOOD
63910 +};
63911 +
63912 +enum {
63913 + GR_TTYSNIFF,
63914 + GR_RBAC,
63915 + GR_RBAC_STR,
63916 + GR_STR_RBAC,
63917 + GR_RBAC_MODE2,
63918 + GR_RBAC_MODE3,
63919 + GR_FILENAME,
63920 + GR_SYSCTL_HIDDEN,
63921 + GR_NOARGS,
63922 + GR_ONE_INT,
63923 + GR_ONE_INT_TWO_STR,
63924 + GR_ONE_STR,
63925 + GR_STR_INT,
63926 + GR_TWO_STR_INT,
63927 + GR_TWO_INT,
63928 + GR_TWO_U64,
63929 + GR_THREE_INT,
63930 + GR_FIVE_INT_TWO_STR,
63931 + GR_TWO_STR,
63932 + GR_THREE_STR,
63933 + GR_FOUR_STR,
63934 + GR_STR_FILENAME,
63935 + GR_FILENAME_STR,
63936 + GR_FILENAME_TWO_INT,
63937 + GR_FILENAME_TWO_INT_STR,
63938 + GR_TEXTREL,
63939 + GR_PTRACE,
63940 + GR_RESOURCE,
63941 + GR_CAP,
63942 + GR_SIG,
63943 + GR_SIG2,
63944 + GR_CRASH1,
63945 + GR_CRASH2,
63946 + GR_PSACCT,
63947 + GR_RWXMAP
63948 +};
63949 +
63950 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
63951 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
63952 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
63953 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
63954 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
63955 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
63956 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
63957 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
63958 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
63959 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
63960 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
63961 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
63962 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
63963 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
63964 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
63965 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
63966 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
63967 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
63968 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
63969 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
63970 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
63971 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
63972 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
63973 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
63974 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
63975 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
63976 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
63977 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
63978 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
63979 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
63980 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
63981 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
63982 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
63983 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
63984 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
63985 +
63986 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
63987 +
63988 +#endif
63989 +
63990 +#endif
63991 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
63992 new file mode 100644
63993 index 0000000..2bd4c8d
63994 --- /dev/null
63995 +++ b/include/linux/grmsg.h
63996 @@ -0,0 +1,111 @@
63997 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
63998 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
63999 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
64000 +#define GR_STOPMOD_MSG "denied modification of module state by "
64001 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
64002 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
64003 +#define GR_IOPERM_MSG "denied use of ioperm() by "
64004 +#define GR_IOPL_MSG "denied use of iopl() by "
64005 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
64006 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
64007 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
64008 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
64009 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
64010 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
64011 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
64012 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
64013 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
64014 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
64015 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
64016 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
64017 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
64018 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
64019 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
64020 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
64021 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
64022 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
64023 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
64024 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
64025 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
64026 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
64027 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
64028 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
64029 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
64030 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
64031 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
64032 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
64033 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
64034 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
64035 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
64036 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
64037 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
64038 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
64039 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
64040 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
64041 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
64042 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
64043 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
64044 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
64045 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
64046 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
64047 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
64048 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
64049 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
64050 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
64051 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
64052 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
64053 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
64054 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
64055 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
64056 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
64057 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
64058 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
64059 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
64060 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
64061 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
64062 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
64063 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
64064 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
64065 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
64066 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
64067 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
64068 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
64069 +#define GR_NICE_CHROOT_MSG "denied priority change by "
64070 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
64071 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
64072 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
64073 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
64074 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
64075 +#define GR_TIME_MSG "time set by "
64076 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
64077 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
64078 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
64079 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
64080 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
64081 +#define GR_BIND_MSG "denied bind() by "
64082 +#define GR_CONNECT_MSG "denied connect() by "
64083 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
64084 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
64085 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
64086 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
64087 +#define GR_CAP_ACL_MSG "use of %s denied for "
64088 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
64089 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
64090 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
64091 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
64092 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
64093 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
64094 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
64095 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
64096 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
64097 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
64098 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
64099 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
64100 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
64101 +#define GR_VM86_MSG "denied use of vm86 by "
64102 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
64103 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
64104 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
64105 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
64106 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
64107 +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
64108 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
64109 new file mode 100644
64110 index 0000000..f2f5d5b
64111 --- /dev/null
64112 +++ b/include/linux/grsecurity.h
64113 @@ -0,0 +1,239 @@
64114 +#ifndef GR_SECURITY_H
64115 +#define GR_SECURITY_H
64116 +#include <linux/fs.h>
64117 +#include <linux/fs_struct.h>
64118 +#include <linux/binfmts.h>
64119 +#include <linux/gracl.h>
64120 +
64121 +/* notify of brain-dead configs */
64122 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64123 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
64124 +#endif
64125 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
64126 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
64127 +#endif
64128 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
64129 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
64130 +#endif
64131 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
64132 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
64133 +#endif
64134 +
64135 +#include <linux/compat.h>
64136 +
64137 +struct user_arg_ptr {
64138 +#ifdef CONFIG_COMPAT
64139 + bool is_compat;
64140 +#endif
64141 + union {
64142 + const char __user *const __user *native;
64143 +#ifdef CONFIG_COMPAT
64144 + const compat_uptr_t __user *compat;
64145 +#endif
64146 + } ptr;
64147 +};
64148 +
64149 +void gr_handle_brute_attach(unsigned long mm_flags);
64150 +void gr_handle_brute_check(void);
64151 +void gr_handle_kernel_exploit(void);
64152 +int gr_process_user_ban(void);
64153 +
64154 +char gr_roletype_to_char(void);
64155 +
64156 +int gr_acl_enable_at_secure(void);
64157 +
64158 +int gr_check_user_change(int real, int effective, int fs);
64159 +int gr_check_group_change(int real, int effective, int fs);
64160 +
64161 +void gr_del_task_from_ip_table(struct task_struct *p);
64162 +
64163 +int gr_pid_is_chrooted(struct task_struct *p);
64164 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
64165 +int gr_handle_chroot_nice(void);
64166 +int gr_handle_chroot_sysctl(const int op);
64167 +int gr_handle_chroot_setpriority(struct task_struct *p,
64168 + const int niceval);
64169 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
64170 +int gr_handle_chroot_chroot(const struct dentry *dentry,
64171 + const struct vfsmount *mnt);
64172 +void gr_handle_chroot_chdir(struct path *path);
64173 +int gr_handle_chroot_chmod(const struct dentry *dentry,
64174 + const struct vfsmount *mnt, const int mode);
64175 +int gr_handle_chroot_mknod(const struct dentry *dentry,
64176 + const struct vfsmount *mnt, const int mode);
64177 +int gr_handle_chroot_mount(const struct dentry *dentry,
64178 + const struct vfsmount *mnt,
64179 + const char *dev_name);
64180 +int gr_handle_chroot_pivot(void);
64181 +int gr_handle_chroot_unix(const pid_t pid);
64182 +
64183 +int gr_handle_rawio(const struct inode *inode);
64184 +
64185 +void gr_handle_ioperm(void);
64186 +void gr_handle_iopl(void);
64187 +
64188 +umode_t gr_acl_umask(void);
64189 +
64190 +int gr_tpe_allow(const struct file *file);
64191 +
64192 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
64193 +void gr_clear_chroot_entries(struct task_struct *task);
64194 +
64195 +void gr_log_forkfail(const int retval);
64196 +void gr_log_timechange(void);
64197 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
64198 +void gr_log_chdir(const struct dentry *dentry,
64199 + const struct vfsmount *mnt);
64200 +void gr_log_chroot_exec(const struct dentry *dentry,
64201 + const struct vfsmount *mnt);
64202 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
64203 +void gr_log_remount(const char *devname, const int retval);
64204 +void gr_log_unmount(const char *devname, const int retval);
64205 +void gr_log_mount(const char *from, const char *to, const int retval);
64206 +void gr_log_textrel(struct vm_area_struct *vma);
64207 +void gr_log_rwxmmap(struct file *file);
64208 +void gr_log_rwxmprotect(struct file *file);
64209 +
64210 +int gr_handle_follow_link(const struct inode *parent,
64211 + const struct inode *inode,
64212 + const struct dentry *dentry,
64213 + const struct vfsmount *mnt);
64214 +int gr_handle_fifo(const struct dentry *dentry,
64215 + const struct vfsmount *mnt,
64216 + const struct dentry *dir, const int flag,
64217 + const int acc_mode);
64218 +int gr_handle_hardlink(const struct dentry *dentry,
64219 + const struct vfsmount *mnt,
64220 + struct inode *inode,
64221 + const int mode, const struct filename *to);
64222 +
64223 +int gr_is_capable(const int cap);
64224 +int gr_is_capable_nolog(const int cap);
64225 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64226 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
64227 +
64228 +void gr_learn_resource(const struct task_struct *task, const int limit,
64229 + const unsigned long wanted, const int gt);
64230 +void gr_copy_label(struct task_struct *tsk);
64231 +void gr_handle_crash(struct task_struct *task, const int sig);
64232 +int gr_handle_signal(const struct task_struct *p, const int sig);
64233 +int gr_check_crash_uid(const uid_t uid);
64234 +int gr_check_protected_task(const struct task_struct *task);
64235 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
64236 +int gr_acl_handle_mmap(const struct file *file,
64237 + const unsigned long prot);
64238 +int gr_acl_handle_mprotect(const struct file *file,
64239 + const unsigned long prot);
64240 +int gr_check_hidden_task(const struct task_struct *tsk);
64241 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
64242 + const struct vfsmount *mnt);
64243 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
64244 + const struct vfsmount *mnt);
64245 +__u32 gr_acl_handle_access(const struct dentry *dentry,
64246 + const struct vfsmount *mnt, const int fmode);
64247 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
64248 + const struct vfsmount *mnt, umode_t *mode);
64249 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
64250 + const struct vfsmount *mnt);
64251 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
64252 + const struct vfsmount *mnt);
64253 +int gr_handle_ptrace(struct task_struct *task, const long request);
64254 +int gr_handle_proc_ptrace(struct task_struct *task);
64255 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
64256 + const struct vfsmount *mnt);
64257 +int gr_check_crash_exec(const struct file *filp);
64258 +int gr_acl_is_enabled(void);
64259 +void gr_set_kernel_label(struct task_struct *task);
64260 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
64261 + const gid_t gid);
64262 +int gr_set_proc_label(const struct dentry *dentry,
64263 + const struct vfsmount *mnt,
64264 + const int unsafe_flags);
64265 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
64266 + const struct vfsmount *mnt);
64267 +__u32 gr_acl_handle_open(const struct dentry *dentry,
64268 + const struct vfsmount *mnt, int acc_mode);
64269 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
64270 + const struct dentry *p_dentry,
64271 + const struct vfsmount *p_mnt,
64272 + int open_flags, int acc_mode, const int imode);
64273 +void gr_handle_create(const struct dentry *dentry,
64274 + const struct vfsmount *mnt);
64275 +void gr_handle_proc_create(const struct dentry *dentry,
64276 + const struct inode *inode);
64277 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
64278 + const struct dentry *parent_dentry,
64279 + const struct vfsmount *parent_mnt,
64280 + const int mode);
64281 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
64282 + const struct dentry *parent_dentry,
64283 + const struct vfsmount *parent_mnt);
64284 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
64285 + const struct vfsmount *mnt);
64286 +void gr_handle_delete(const ino_t ino, const dev_t dev);
64287 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
64288 + const struct vfsmount *mnt);
64289 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
64290 + const struct dentry *parent_dentry,
64291 + const struct vfsmount *parent_mnt,
64292 + const struct filename *from);
64293 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
64294 + const struct dentry *parent_dentry,
64295 + const struct vfsmount *parent_mnt,
64296 + const struct dentry *old_dentry,
64297 + const struct vfsmount *old_mnt, const struct filename *to);
64298 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
64299 +int gr_acl_handle_rename(struct dentry *new_dentry,
64300 + struct dentry *parent_dentry,
64301 + const struct vfsmount *parent_mnt,
64302 + struct dentry *old_dentry,
64303 + struct inode *old_parent_inode,
64304 + struct vfsmount *old_mnt, const struct filename *newname);
64305 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64306 + struct dentry *old_dentry,
64307 + struct dentry *new_dentry,
64308 + struct vfsmount *mnt, const __u8 replace);
64309 +__u32 gr_check_link(const struct dentry *new_dentry,
64310 + const struct dentry *parent_dentry,
64311 + const struct vfsmount *parent_mnt,
64312 + const struct dentry *old_dentry,
64313 + const struct vfsmount *old_mnt);
64314 +int gr_acl_handle_filldir(const struct file *file, const char *name,
64315 + const unsigned int namelen, const ino_t ino);
64316 +
64317 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
64318 + const struct vfsmount *mnt);
64319 +void gr_acl_handle_exit(void);
64320 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
64321 +int gr_acl_handle_procpidmem(const struct task_struct *task);
64322 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
64323 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
64324 +void gr_audit_ptrace(struct task_struct *task);
64325 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
64326 +void gr_put_exec_file(struct task_struct *task);
64327 +
64328 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
64329 +
64330 +#ifdef CONFIG_GRKERNSEC
64331 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
64332 +void gr_handle_vm86(void);
64333 +void gr_handle_mem_readwrite(u64 from, u64 to);
64334 +
64335 +void gr_log_badprocpid(const char *entry);
64336 +
64337 +extern int grsec_enable_dmesg;
64338 +extern int grsec_disable_privio;
64339 +
64340 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64341 +extern int grsec_proc_gid;
64342 +#endif
64343 +
64344 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64345 +extern int grsec_enable_chroot_findtask;
64346 +#endif
64347 +#ifdef CONFIG_GRKERNSEC_SETXID
64348 +extern int grsec_enable_setxid;
64349 +#endif
64350 +#endif
64351 +
64352 +#endif
64353 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
64354 new file mode 100644
64355 index 0000000..e7ffaaf
64356 --- /dev/null
64357 +++ b/include/linux/grsock.h
64358 @@ -0,0 +1,19 @@
64359 +#ifndef __GRSOCK_H
64360 +#define __GRSOCK_H
64361 +
64362 +extern void gr_attach_curr_ip(const struct sock *sk);
64363 +extern int gr_handle_sock_all(const int family, const int type,
64364 + const int protocol);
64365 +extern int gr_handle_sock_server(const struct sockaddr *sck);
64366 +extern int gr_handle_sock_server_other(const struct sock *sck);
64367 +extern int gr_handle_sock_client(const struct sockaddr *sck);
64368 +extern int gr_search_connect(struct socket * sock,
64369 + struct sockaddr_in * addr);
64370 +extern int gr_search_bind(struct socket * sock,
64371 + struct sockaddr_in * addr);
64372 +extern int gr_search_listen(struct socket * sock);
64373 +extern int gr_search_accept(struct socket * sock);
64374 +extern int gr_search_socket(const int domain, const int type,
64375 + const int protocol);
64376 +
64377 +#endif
64378 diff --git a/include/linux/hid.h b/include/linux/hid.h
64379 index c076041..6f54d73 100644
64380 --- a/include/linux/hid.h
64381 +++ b/include/linux/hid.h
64382 @@ -671,7 +671,7 @@ struct hid_ll_driver {
64383 unsigned int code, int value);
64384
64385 int (*parse)(struct hid_device *hdev);
64386 -};
64387 +} __no_const;
64388
64389 #define PM_HINT_FULLON 1<<5
64390 #define PM_HINT_NORMAL 1<<1
64391 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
64392 index ef788b5..ac41b7b 100644
64393 --- a/include/linux/highmem.h
64394 +++ b/include/linux/highmem.h
64395 @@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
64396 kunmap_atomic(kaddr);
64397 }
64398
64399 +static inline void sanitize_highpage(struct page *page)
64400 +{
64401 + void *kaddr;
64402 + unsigned long flags;
64403 +
64404 + local_irq_save(flags);
64405 + kaddr = kmap_atomic(page);
64406 + clear_page(kaddr);
64407 + kunmap_atomic(kaddr);
64408 + local_irq_restore(flags);
64409 +}
64410 +
64411 static inline void zero_user_segments(struct page *page,
64412 unsigned start1, unsigned end1,
64413 unsigned start2, unsigned end2)
64414 diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
64415 index 56fae86..5565cde 100644
64416 --- a/include/linux/hsi/hsi.h
64417 +++ b/include/linux/hsi/hsi.h
64418 @@ -132,7 +132,7 @@ struct hsi_client {
64419 /* private: */
64420 void (*ehandler)(struct hsi_client *, unsigned long);
64421 unsigned int pclaimed:1;
64422 - struct notifier_block nb;
64423 + notifier_block_no_const nb;
64424 };
64425
64426 #define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
64427 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
64428 index 800de22..7a2fa46 100644
64429 --- a/include/linux/i2c.h
64430 +++ b/include/linux/i2c.h
64431 @@ -367,6 +367,7 @@ struct i2c_algorithm {
64432 /* To determine what the adapter supports */
64433 u32 (*functionality) (struct i2c_adapter *);
64434 };
64435 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
64436
64437 /*
64438 * i2c_adapter is the structure used to identify a physical i2c bus along
64439 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
64440 index d23c3c2..eb63c81 100644
64441 --- a/include/linux/i2o.h
64442 +++ b/include/linux/i2o.h
64443 @@ -565,7 +565,7 @@ struct i2o_controller {
64444 struct i2o_device *exec; /* Executive */
64445 #if BITS_PER_LONG == 64
64446 spinlock_t context_list_lock; /* lock for context_list */
64447 - atomic_t context_list_counter; /* needed for unique contexts */
64448 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
64449 struct list_head context_list; /* list of context id's
64450 and pointers */
64451 #endif
64452 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
64453 index aff7ad8..3942bbd 100644
64454 --- a/include/linux/if_pppox.h
64455 +++ b/include/linux/if_pppox.h
64456 @@ -76,7 +76,7 @@ struct pppox_proto {
64457 int (*ioctl)(struct socket *sock, unsigned int cmd,
64458 unsigned long arg);
64459 struct module *owner;
64460 -};
64461 +} __do_const;
64462
64463 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
64464 extern void unregister_pppox_proto(int proto_num);
64465 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
64466 index 0245def..1199c5a 100644
64467 --- a/include/linux/if_team.h
64468 +++ b/include/linux/if_team.h
64469 @@ -111,6 +111,7 @@ struct team_mode_ops {
64470 void (*port_enabled)(struct team *team, struct team_port *port);
64471 void (*port_disabled)(struct team *team, struct team_port *port);
64472 };
64473 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
64474
64475 enum team_option_type {
64476 TEAM_OPTION_TYPE_U32,
64477 @@ -185,7 +186,7 @@ struct team {
64478 struct list_head option_inst_list; /* list of option instances */
64479
64480 const struct team_mode *mode;
64481 - struct team_mode_ops ops;
64482 + team_mode_ops_no_const ops;
64483 bool queue_override_enabled;
64484 struct list_head *qom_lists; /* array of queue override mapping lists */
64485 long mode_priv[TEAM_MODE_PRIV_LONGS];
64486 diff --git a/include/linux/init.h b/include/linux/init.h
64487 index e59041e..df0a975 100644
64488 --- a/include/linux/init.h
64489 +++ b/include/linux/init.h
64490 @@ -39,9 +39,36 @@
64491 * Also note, that this data cannot be "const".
64492 */
64493
64494 +#ifdef MODULE
64495 +#define add_init_latent_entropy
64496 +#define add_devinit_latent_entropy
64497 +#define add_cpuinit_latent_entropy
64498 +#define add_meminit_latent_entropy
64499 +#else
64500 +#define add_init_latent_entropy __latent_entropy
64501 +
64502 +#ifdef CONFIG_HOTPLUG
64503 +#define add_devinit_latent_entropy
64504 +#else
64505 +#define add_devinit_latent_entropy __latent_entropy
64506 +#endif
64507 +
64508 +#ifdef CONFIG_HOTPLUG_CPU
64509 +#define add_cpuinit_latent_entropy
64510 +#else
64511 +#define add_cpuinit_latent_entropy __latent_entropy
64512 +#endif
64513 +
64514 +#ifdef CONFIG_MEMORY_HOTPLUG
64515 +#define add_meminit_latent_entropy
64516 +#else
64517 +#define add_meminit_latent_entropy __latent_entropy
64518 +#endif
64519 +#endif
64520 +
64521 /* These are for everybody (although not all archs will actually
64522 discard it in modules) */
64523 -#define __init __section(.init.text) __cold notrace
64524 +#define __init __section(.init.text) __cold notrace add_init_latent_entropy
64525 #define __initdata __section(.init.data)
64526 #define __initconst __constsection(.init.rodata)
64527 #define __exitdata __section(.exit.data)
64528 @@ -94,7 +121,7 @@
64529 #define __exit __section(.exit.text) __exitused __cold notrace
64530
64531 /* Used for HOTPLUG */
64532 -#define __devinit __section(.devinit.text) __cold notrace
64533 +#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
64534 #define __devinitdata __section(.devinit.data)
64535 #define __devinitconst __constsection(.devinit.rodata)
64536 #define __devexit __section(.devexit.text) __exitused __cold notrace
64537 @@ -102,7 +129,7 @@
64538 #define __devexitconst __constsection(.devexit.rodata)
64539
64540 /* Used for HOTPLUG_CPU */
64541 -#define __cpuinit __section(.cpuinit.text) __cold notrace
64542 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
64543 #define __cpuinitdata __section(.cpuinit.data)
64544 #define __cpuinitconst __constsection(.cpuinit.rodata)
64545 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
64546 @@ -110,7 +137,7 @@
64547 #define __cpuexitconst __constsection(.cpuexit.rodata)
64548
64549 /* Used for MEMORY_HOTPLUG */
64550 -#define __meminit __section(.meminit.text) __cold notrace
64551 +#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
64552 #define __meminitdata __section(.meminit.data)
64553 #define __meminitconst __constsection(.meminit.rodata)
64554 #define __memexit __section(.memexit.text) __exitused __cold notrace
64555 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
64556 index 6d087c5..401cab8 100644
64557 --- a/include/linux/init_task.h
64558 +++ b/include/linux/init_task.h
64559 @@ -143,6 +143,12 @@ extern struct task_group root_task_group;
64560
64561 #define INIT_TASK_COMM "swapper"
64562
64563 +#ifdef CONFIG_X86
64564 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
64565 +#else
64566 +#define INIT_TASK_THREAD_INFO
64567 +#endif
64568 +
64569 /*
64570 * INIT_TASK is used to set up the first task table, touch at
64571 * your own risk!. Base=0, limit=0x1fffff (=2MB)
64572 @@ -182,6 +188,7 @@ extern struct task_group root_task_group;
64573 RCU_POINTER_INITIALIZER(cred, &init_cred), \
64574 .comm = INIT_TASK_COMM, \
64575 .thread = INIT_THREAD, \
64576 + INIT_TASK_THREAD_INFO \
64577 .fs = &init_fs, \
64578 .files = &init_files, \
64579 .signal = &init_signals, \
64580 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
64581 index 78e2ada..745564d 100644
64582 --- a/include/linux/intel-iommu.h
64583 +++ b/include/linux/intel-iommu.h
64584 @@ -296,7 +296,7 @@ struct iommu_flush {
64585 u8 fm, u64 type);
64586 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
64587 unsigned int size_order, u64 type);
64588 -};
64589 +} __no_const;
64590
64591 enum {
64592 SR_DMAR_FECTL_REG,
64593 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
64594 index 5e4e617..eee383d 100644
64595 --- a/include/linux/interrupt.h
64596 +++ b/include/linux/interrupt.h
64597 @@ -435,7 +435,7 @@ enum
64598 /* map softirq index to softirq name. update 'softirq_to_name' in
64599 * kernel/softirq.c when adding a new softirq.
64600 */
64601 -extern char *softirq_to_name[NR_SOFTIRQS];
64602 +extern const char * const softirq_to_name[NR_SOFTIRQS];
64603
64604 /* softirq mask and active fields moved to irq_cpustat_t in
64605 * asm/hardirq.h to get better cache usage. KAO
64606 @@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
64607
64608 struct softirq_action
64609 {
64610 - void (*action)(struct softirq_action *);
64611 + void (*action)(void);
64612 };
64613
64614 asmlinkage void do_softirq(void);
64615 asmlinkage void __do_softirq(void);
64616 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
64617 +extern void open_softirq(int nr, void (*action)(void));
64618 extern void softirq_init(void);
64619 extern void __raise_softirq_irqoff(unsigned int nr);
64620
64621 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
64622 index 5499c92..e6770b3 100644
64623 --- a/include/linux/ipc_namespace.h
64624 +++ b/include/linux/ipc_namespace.h
64625 @@ -50,7 +50,7 @@ struct ipc_namespace {
64626 */
64627 int shm_rmid_forced;
64628
64629 - struct notifier_block ipcns_nb;
64630 + notifier_block_no_const ipcns_nb;
64631
64632 /* The kern_mount of the mqueuefs sb. We take a ref on it */
64633 struct vfsmount *mq_mnt;
64634 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
64635 index 6883e19..06992b1 100644
64636 --- a/include/linux/kallsyms.h
64637 +++ b/include/linux/kallsyms.h
64638 @@ -15,7 +15,8 @@
64639
64640 struct module;
64641
64642 -#ifdef CONFIG_KALLSYMS
64643 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
64644 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64645 /* Lookup the address for a symbol. Returns 0 if not found. */
64646 unsigned long kallsyms_lookup_name(const char *name);
64647
64648 @@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
64649 /* Stupid that this does nothing, but I didn't create this mess. */
64650 #define __print_symbol(fmt, addr)
64651 #endif /*CONFIG_KALLSYMS*/
64652 +#else /* when included by kallsyms.c, vsnprintf.c, or
64653 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
64654 +extern void __print_symbol(const char *fmt, unsigned long address);
64655 +extern int sprint_backtrace(char *buffer, unsigned long address);
64656 +extern int sprint_symbol(char *buffer, unsigned long address);
64657 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
64658 +const char *kallsyms_lookup(unsigned long addr,
64659 + unsigned long *symbolsize,
64660 + unsigned long *offset,
64661 + char **modname, char *namebuf);
64662 +#endif
64663
64664 /* This macro allows us to keep printk typechecking */
64665 static __printf(1, 2)
64666 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
64667 index 4dff0c6..1ca9b72 100644
64668 --- a/include/linux/kgdb.h
64669 +++ b/include/linux/kgdb.h
64670 @@ -53,7 +53,7 @@ extern int kgdb_connected;
64671 extern int kgdb_io_module_registered;
64672
64673 extern atomic_t kgdb_setting_breakpoint;
64674 -extern atomic_t kgdb_cpu_doing_single_step;
64675 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
64676
64677 extern struct task_struct *kgdb_usethread;
64678 extern struct task_struct *kgdb_contthread;
64679 @@ -255,7 +255,7 @@ struct kgdb_arch {
64680 void (*correct_hw_break)(void);
64681
64682 void (*enable_nmi)(bool on);
64683 -};
64684 +} __do_const;
64685
64686 /**
64687 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
64688 @@ -280,7 +280,7 @@ struct kgdb_io {
64689 void (*pre_exception) (void);
64690 void (*post_exception) (void);
64691 int is_console;
64692 -};
64693 +} __do_const;
64694
64695 extern struct kgdb_arch arch_kgdb_ops;
64696
64697 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
64698 index 5398d58..5883a34 100644
64699 --- a/include/linux/kmod.h
64700 +++ b/include/linux/kmod.h
64701 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
64702 * usually useless though. */
64703 extern __printf(2, 3)
64704 int __request_module(bool wait, const char *name, ...);
64705 +extern __printf(3, 4)
64706 +int ___request_module(bool wait, char *param_name, const char *name, ...);
64707 #define request_module(mod...) __request_module(true, mod)
64708 #define request_module_nowait(mod...) __request_module(false, mod)
64709 #define try_then_request_module(x, mod...) \
64710 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
64711 index 1e57449..4fede7b 100644
64712 --- a/include/linux/kobject.h
64713 +++ b/include/linux/kobject.h
64714 @@ -111,7 +111,7 @@ struct kobj_type {
64715 struct attribute **default_attrs;
64716 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
64717 const void *(*namespace)(struct kobject *kobj);
64718 -};
64719 +} __do_const;
64720
64721 struct kobj_uevent_env {
64722 char *envp[UEVENT_NUM_ENVP];
64723 diff --git a/include/linux/kref.h b/include/linux/kref.h
64724 index 65af688..0592677 100644
64725 --- a/include/linux/kref.h
64726 +++ b/include/linux/kref.h
64727 @@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
64728 static inline int kref_sub(struct kref *kref, unsigned int count,
64729 void (*release)(struct kref *kref))
64730 {
64731 - WARN_ON(release == NULL);
64732 + BUG_ON(release == NULL);
64733
64734 if (atomic_sub_and_test((int) count, &kref->refcount)) {
64735 release(kref);
64736 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
64737 index ecc5543..0e96bcc 100644
64738 --- a/include/linux/kvm_host.h
64739 +++ b/include/linux/kvm_host.h
64740 @@ -403,7 +403,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
64741 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
64742 void vcpu_put(struct kvm_vcpu *vcpu);
64743
64744 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64745 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64746 struct module *module);
64747 void kvm_exit(void);
64748
64749 @@ -558,7 +558,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
64750 struct kvm_guest_debug *dbg);
64751 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
64752
64753 -int kvm_arch_init(void *opaque);
64754 +int kvm_arch_init(const void *opaque);
64755 void kvm_arch_exit(void);
64756
64757 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
64758 diff --git a/include/linux/lcd.h b/include/linux/lcd.h
64759 index e00c3b0..2ab1e49 100644
64760 --- a/include/linux/lcd.h
64761 +++ b/include/linux/lcd.h
64762 @@ -74,7 +74,7 @@ struct lcd_device {
64763 /* Serialise access to set_power method */
64764 struct mutex update_lock;
64765 /* The framebuffer notifier block */
64766 - struct notifier_block fb_notif;
64767 + notifier_block_no_const fb_notif;
64768
64769 struct device dev;
64770 };
64771 diff --git a/include/linux/libata.h b/include/linux/libata.h
64772 index 77eeeda..062ed69c 100644
64773 --- a/include/linux/libata.h
64774 +++ b/include/linux/libata.h
64775 @@ -914,7 +914,7 @@ struct ata_port_operations {
64776 * fields must be pointers.
64777 */
64778 const struct ata_port_operations *inherits;
64779 -};
64780 +} __do_const;
64781
64782 struct ata_port_info {
64783 unsigned long flags;
64784 diff --git a/include/linux/memory.h b/include/linux/memory.h
64785 index ff9a9f8..c715deb 100644
64786 --- a/include/linux/memory.h
64787 +++ b/include/linux/memory.h
64788 @@ -143,7 +143,7 @@ struct memory_accessor {
64789 size_t count);
64790 ssize_t (*write)(struct memory_accessor *, const char *buf,
64791 off_t offset, size_t count);
64792 -};
64793 +} __no_const;
64794
64795 /*
64796 * Kernel text modification mutex, used for code patching. Users of this lock
64797 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
64798 index 5d5298d..e3e5a2e 100644
64799 --- a/include/linux/mfd/abx500.h
64800 +++ b/include/linux/mfd/abx500.h
64801 @@ -337,6 +337,7 @@ struct abx500_ops {
64802 int (*event_registers_startup_state_get) (struct device *, u8 *);
64803 int (*startup_irq_enabled) (struct device *, unsigned int);
64804 };
64805 +typedef struct abx500_ops __no_const abx500_ops_no_const;
64806
64807 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
64808 void abx500_remove_ops(struct device *dev);
64809 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
64810 index 9b07725..3d55001 100644
64811 --- a/include/linux/mfd/abx500/ux500_chargalg.h
64812 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
64813 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
64814 int (*enable) (struct ux500_charger *, int, int, int);
64815 int (*kick_wd) (struct ux500_charger *);
64816 int (*update_curr) (struct ux500_charger *, int);
64817 -};
64818 +} __no_const;
64819
64820 /**
64821 * struct ux500_charger - power supply ux500 charger sub class
64822 diff --git a/include/linux/mm.h b/include/linux/mm.h
64823 index bcaab4e..f842186 100644
64824 --- a/include/linux/mm.h
64825 +++ b/include/linux/mm.h
64826 @@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
64827 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
64828 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
64829 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
64830 +
64831 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64832 +#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
64833 +#endif
64834 +
64835 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
64836
64837 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
64838 @@ -1040,34 +1045,6 @@ int set_page_dirty(struct page *page);
64839 int set_page_dirty_lock(struct page *page);
64840 int clear_page_dirty_for_io(struct page *page);
64841
64842 -/* Is the vma a continuation of the stack vma above it? */
64843 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
64844 -{
64845 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
64846 -}
64847 -
64848 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
64849 - unsigned long addr)
64850 -{
64851 - return (vma->vm_flags & VM_GROWSDOWN) &&
64852 - (vma->vm_start == addr) &&
64853 - !vma_growsdown(vma->vm_prev, addr);
64854 -}
64855 -
64856 -/* Is the vma a continuation of the stack vma below it? */
64857 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
64858 -{
64859 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
64860 -}
64861 -
64862 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
64863 - unsigned long addr)
64864 -{
64865 - return (vma->vm_flags & VM_GROWSUP) &&
64866 - (vma->vm_end == addr) &&
64867 - !vma_growsup(vma->vm_next, addr);
64868 -}
64869 -
64870 extern pid_t
64871 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
64872
64873 @@ -1167,6 +1144,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
64874 }
64875 #endif
64876
64877 +#ifdef CONFIG_MMU
64878 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
64879 +#else
64880 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
64881 +{
64882 + return __pgprot(0);
64883 +}
64884 +#endif
64885 +
64886 int vma_wants_writenotify(struct vm_area_struct *vma);
64887
64888 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
64889 @@ -1185,8 +1171,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
64890 {
64891 return 0;
64892 }
64893 +
64894 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
64895 + unsigned long address)
64896 +{
64897 + return 0;
64898 +}
64899 #else
64900 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
64901 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
64902 #endif
64903
64904 #ifdef __PAGETABLE_PMD_FOLDED
64905 @@ -1195,8 +1188,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
64906 {
64907 return 0;
64908 }
64909 +
64910 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
64911 + unsigned long address)
64912 +{
64913 + return 0;
64914 +}
64915 #else
64916 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
64917 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
64918 #endif
64919
64920 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
64921 @@ -1214,11 +1214,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
64922 NULL: pud_offset(pgd, address);
64923 }
64924
64925 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
64926 +{
64927 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
64928 + NULL: pud_offset(pgd, address);
64929 +}
64930 +
64931 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
64932 {
64933 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
64934 NULL: pmd_offset(pud, address);
64935 }
64936 +
64937 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
64938 +{
64939 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
64940 + NULL: pmd_offset(pud, address);
64941 +}
64942 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
64943
64944 #if USE_SPLIT_PTLOCKS
64945 @@ -1448,6 +1460,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
64946 unsigned long, unsigned long,
64947 unsigned long, unsigned long);
64948 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
64949 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
64950
64951 /* These take the mm semaphore themselves */
64952 extern unsigned long vm_brk(unsigned long, unsigned long);
64953 @@ -1511,6 +1524,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
64954 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
64955 struct vm_area_struct **pprev);
64956
64957 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
64958 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
64959 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
64960 +
64961 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
64962 NULL if none. Assume start_addr < end_addr. */
64963 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
64964 @@ -1539,15 +1556,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
64965 return vma;
64966 }
64967
64968 -#ifdef CONFIG_MMU
64969 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
64970 -#else
64971 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
64972 -{
64973 - return __pgprot(0);
64974 -}
64975 -#endif
64976 -
64977 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
64978 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
64979 unsigned long pfn, unsigned long size, pgprot_t);
64980 @@ -1653,7 +1661,7 @@ extern int unpoison_memory(unsigned long pfn);
64981 extern int sysctl_memory_failure_early_kill;
64982 extern int sysctl_memory_failure_recovery;
64983 extern void shake_page(struct page *p, int access);
64984 -extern atomic_long_t mce_bad_pages;
64985 +extern atomic_long_unchecked_t mce_bad_pages;
64986 extern int soft_offline_page(struct page *page, int flags);
64987
64988 extern void dump_page(struct page *page);
64989 @@ -1684,5 +1692,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
64990 static inline bool page_is_guard(struct page *page) { return false; }
64991 #endif /* CONFIG_DEBUG_PAGEALLOC */
64992
64993 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
64994 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
64995 +#else
64996 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
64997 +#endif
64998 +
64999 #endif /* __KERNEL__ */
65000 #endif /* _LINUX_MM_H */
65001 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
65002 index 31f8a3a..499f1db 100644
65003 --- a/include/linux/mm_types.h
65004 +++ b/include/linux/mm_types.h
65005 @@ -275,6 +275,8 @@ struct vm_area_struct {
65006 #ifdef CONFIG_NUMA
65007 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
65008 #endif
65009 +
65010 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
65011 };
65012
65013 struct core_thread {
65014 @@ -348,7 +350,7 @@ struct mm_struct {
65015 unsigned long def_flags;
65016 unsigned long nr_ptes; /* Page table pages */
65017 unsigned long start_code, end_code, start_data, end_data;
65018 - unsigned long start_brk, brk, start_stack;
65019 + unsigned long brk_gap, start_brk, brk, start_stack;
65020 unsigned long arg_start, arg_end, env_start, env_end;
65021
65022 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
65023 @@ -399,6 +401,24 @@ struct mm_struct {
65024 struct cpumask cpumask_allocation;
65025 #endif
65026 struct uprobes_state uprobes_state;
65027 +
65028 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65029 + unsigned long pax_flags;
65030 +#endif
65031 +
65032 +#ifdef CONFIG_PAX_DLRESOLVE
65033 + unsigned long call_dl_resolve;
65034 +#endif
65035 +
65036 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65037 + unsigned long call_syscall;
65038 +#endif
65039 +
65040 +#ifdef CONFIG_PAX_ASLR
65041 + unsigned long delta_mmap; /* randomized offset */
65042 + unsigned long delta_stack; /* randomized offset */
65043 +#endif
65044 +
65045 };
65046
65047 static inline void mm_init_cpumask(struct mm_struct *mm)
65048 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
65049 index 7abb0e1..cf526e3 100644
65050 --- a/include/linux/mmc/host.h
65051 +++ b/include/linux/mmc/host.h
65052 @@ -188,7 +188,7 @@ struct mmc_host {
65053 u32 ocr_avail_sdio; /* SDIO-specific OCR */
65054 u32 ocr_avail_sd; /* SD-specific OCR */
65055 u32 ocr_avail_mmc; /* MMC-specific OCR */
65056 - struct notifier_block pm_notify;
65057 + notifier_block_no_const pm_notify;
65058 u32 max_current_330;
65059 u32 max_current_300;
65060 u32 max_current_180;
65061 diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
65062 index c5d5278..f0b68c8 100644
65063 --- a/include/linux/mmiotrace.h
65064 +++ b/include/linux/mmiotrace.h
65065 @@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
65066 /* Called from ioremap.c */
65067 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
65068 void __iomem *addr);
65069 -extern void mmiotrace_iounmap(volatile void __iomem *addr);
65070 +extern void mmiotrace_iounmap(const volatile void __iomem *addr);
65071
65072 /* For anyone to insert markers. Remember trailing newline. */
65073 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
65074 @@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
65075 {
65076 }
65077
65078 -static inline void mmiotrace_iounmap(volatile void __iomem *addr)
65079 +static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
65080 {
65081 }
65082
65083 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
65084 index a23923b..073fee4 100644
65085 --- a/include/linux/mmzone.h
65086 +++ b/include/linux/mmzone.h
65087 @@ -421,7 +421,7 @@ struct zone {
65088 unsigned long flags; /* zone flags, see below */
65089
65090 /* Zone statistics */
65091 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65092 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65093
65094 /*
65095 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
65096 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
65097 index fed3def..7cc3f93 100644
65098 --- a/include/linux/mod_devicetable.h
65099 +++ b/include/linux/mod_devicetable.h
65100 @@ -12,7 +12,7 @@
65101 typedef unsigned long kernel_ulong_t;
65102 #endif
65103
65104 -#define PCI_ANY_ID (~0)
65105 +#define PCI_ANY_ID ((__u16)~0)
65106
65107 struct pci_device_id {
65108 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
65109 @@ -139,7 +139,7 @@ struct usb_device_id {
65110 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
65111 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
65112
65113 -#define HID_ANY_ID (~0)
65114 +#define HID_ANY_ID (~0U)
65115 #define HID_BUS_ANY 0xffff
65116 #define HID_GROUP_ANY 0x0000
65117
65118 diff --git a/include/linux/module.h b/include/linux/module.h
65119 index 7760c6d..983ee18 100644
65120 --- a/include/linux/module.h
65121 +++ b/include/linux/module.h
65122 @@ -17,9 +17,11 @@
65123 #include <linux/moduleparam.h>
65124 #include <linux/tracepoint.h>
65125 #include <linux/export.h>
65126 +#include <linux/fs.h>
65127
65128 #include <linux/percpu.h>
65129 #include <asm/module.h>
65130 +#include <asm/pgtable.h>
65131
65132 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
65133 #define MODULE_SIG_STRING "~Module signature appended~\n"
65134 @@ -281,19 +283,16 @@ struct module
65135 int (*init)(void);
65136
65137 /* If this is non-NULL, vfree after init() returns */
65138 - void *module_init;
65139 + void *module_init_rx, *module_init_rw;
65140
65141 /* Here is the actual code + data, vfree'd on unload. */
65142 - void *module_core;
65143 + void *module_core_rx, *module_core_rw;
65144
65145 /* Here are the sizes of the init and core sections */
65146 - unsigned int init_size, core_size;
65147 + unsigned int init_size_rw, core_size_rw;
65148
65149 /* The size of the executable code in each section. */
65150 - unsigned int init_text_size, core_text_size;
65151 -
65152 - /* Size of RO sections of the module (text+rodata) */
65153 - unsigned int init_ro_size, core_ro_size;
65154 + unsigned int init_size_rx, core_size_rx;
65155
65156 /* Arch-specific module values */
65157 struct mod_arch_specific arch;
65158 @@ -349,6 +348,10 @@ struct module
65159 #ifdef CONFIG_EVENT_TRACING
65160 struct ftrace_event_call **trace_events;
65161 unsigned int num_trace_events;
65162 + struct file_operations trace_id;
65163 + struct file_operations trace_enable;
65164 + struct file_operations trace_format;
65165 + struct file_operations trace_filter;
65166 #endif
65167 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
65168 unsigned int num_ftrace_callsites;
65169 @@ -396,16 +399,46 @@ bool is_module_address(unsigned long addr);
65170 bool is_module_percpu_address(unsigned long addr);
65171 bool is_module_text_address(unsigned long addr);
65172
65173 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
65174 +{
65175 +
65176 +#ifdef CONFIG_PAX_KERNEXEC
65177 + if (ktla_ktva(addr) >= (unsigned long)start &&
65178 + ktla_ktva(addr) < (unsigned long)start + size)
65179 + return 1;
65180 +#endif
65181 +
65182 + return ((void *)addr >= start && (void *)addr < start + size);
65183 +}
65184 +
65185 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
65186 +{
65187 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
65188 +}
65189 +
65190 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
65191 +{
65192 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
65193 +}
65194 +
65195 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
65196 +{
65197 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
65198 +}
65199 +
65200 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
65201 +{
65202 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
65203 +}
65204 +
65205 static inline int within_module_core(unsigned long addr, struct module *mod)
65206 {
65207 - return (unsigned long)mod->module_core <= addr &&
65208 - addr < (unsigned long)mod->module_core + mod->core_size;
65209 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
65210 }
65211
65212 static inline int within_module_init(unsigned long addr, struct module *mod)
65213 {
65214 - return (unsigned long)mod->module_init <= addr &&
65215 - addr < (unsigned long)mod->module_init + mod->init_size;
65216 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
65217 }
65218
65219 /* Search for module by name: must hold module_mutex. */
65220 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
65221 index 560ca53..5ee8d73 100644
65222 --- a/include/linux/moduleloader.h
65223 +++ b/include/linux/moduleloader.h
65224 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
65225
65226 /* Allocator used for allocating struct module, core sections and init
65227 sections. Returns NULL on failure. */
65228 -void *module_alloc(unsigned long size);
65229 +void *module_alloc(unsigned long size) __size_overflow(1);
65230 +
65231 +#ifdef CONFIG_PAX_KERNEXEC
65232 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
65233 +#else
65234 +#define module_alloc_exec(x) module_alloc(x)
65235 +#endif
65236
65237 /* Free memory returned from module_alloc. */
65238 void module_free(struct module *mod, void *module_region);
65239
65240 +#ifdef CONFIG_PAX_KERNEXEC
65241 +void module_free_exec(struct module *mod, void *module_region);
65242 +#else
65243 +#define module_free_exec(x, y) module_free((x), (y))
65244 +#endif
65245 +
65246 /*
65247 * Apply the given relocation to the (simplified) ELF. Return -error
65248 * or 0.
65249 @@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
65250 unsigned int relsec,
65251 struct module *me)
65252 {
65253 +#ifdef CONFIG_MODULES
65254 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65255 +#endif
65256 return -ENOEXEC;
65257 }
65258 #endif
65259 @@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
65260 unsigned int relsec,
65261 struct module *me)
65262 {
65263 +#ifdef CONFIG_MODULES
65264 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65265 +#endif
65266 return -ENOEXEC;
65267 }
65268 #endif
65269 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
65270 index d6a5806..7c13347 100644
65271 --- a/include/linux/moduleparam.h
65272 +++ b/include/linux/moduleparam.h
65273 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
65274 * @len is usually just sizeof(string).
65275 */
65276 #define module_param_string(name, string, len, perm) \
65277 - static const struct kparam_string __param_string_##name \
65278 + static const struct kparam_string __param_string_##name __used \
65279 = { len, string }; \
65280 __module_param_call(MODULE_PARAM_PREFIX, name, \
65281 &param_ops_string, \
65282 @@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
65283 */
65284 #define module_param_array_named(name, array, type, nump, perm) \
65285 param_check_##type(name, &(array)[0]); \
65286 - static const struct kparam_array __param_arr_##name \
65287 + static const struct kparam_array __param_arr_##name __used \
65288 = { .max = ARRAY_SIZE(array), .num = nump, \
65289 .ops = &param_ops_##type, \
65290 .elemsize = sizeof(array[0]), .elem = array }; \
65291 diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
65292 index 81d61e7..eadc192 100644
65293 --- a/include/linux/mtd/mtd.h
65294 +++ b/include/linux/mtd/mtd.h
65295 @@ -238,7 +238,7 @@ struct mtd_info {
65296 */
65297 struct backing_dev_info *backing_dev_info;
65298
65299 - struct notifier_block reboot_notifier; /* default mode before reboot */
65300 + notifier_block_no_const reboot_notifier; /* default mode before reboot */
65301
65302 /* ECC status information */
65303 struct mtd_ecc_stats ecc_stats;
65304 diff --git a/include/linux/namei.h b/include/linux/namei.h
65305 index 4bf19d8..5268cea 100644
65306 --- a/include/linux/namei.h
65307 +++ b/include/linux/namei.h
65308 @@ -18,7 +18,7 @@ struct nameidata {
65309 unsigned seq;
65310 int last_type;
65311 unsigned depth;
65312 - char *saved_names[MAX_NESTED_LINKS + 1];
65313 + const char *saved_names[MAX_NESTED_LINKS + 1];
65314 };
65315
65316 /*
65317 @@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
65318
65319 extern void nd_jump_link(struct nameidata *nd, struct path *path);
65320
65321 -static inline void nd_set_link(struct nameidata *nd, char *path)
65322 +static inline void nd_set_link(struct nameidata *nd, const char *path)
65323 {
65324 nd->saved_names[nd->depth] = path;
65325 }
65326
65327 -static inline char *nd_get_link(struct nameidata *nd)
65328 +static inline const char *nd_get_link(const struct nameidata *nd)
65329 {
65330 return nd->saved_names[nd->depth];
65331 }
65332 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
65333 index a848ffc..3bbbaee 100644
65334 --- a/include/linux/netdevice.h
65335 +++ b/include/linux/netdevice.h
65336 @@ -999,6 +999,7 @@ struct net_device_ops {
65337 struct net_device *dev,
65338 int idx);
65339 };
65340 +typedef struct net_device_ops __no_const net_device_ops_no_const;
65341
65342 /*
65343 * The DEVICE structure.
65344 @@ -1059,7 +1060,7 @@ struct net_device {
65345 int iflink;
65346
65347 struct net_device_stats stats;
65348 - atomic_long_t rx_dropped; /* dropped packets by core network
65349 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
65350 * Do not use this in drivers.
65351 */
65352
65353 diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
65354 index 7958e84..ed74d7a 100644
65355 --- a/include/linux/netfilter/ipset/ip_set.h
65356 +++ b/include/linux/netfilter/ipset/ip_set.h
65357 @@ -98,7 +98,7 @@ struct ip_set_type_variant {
65358 /* Return true if "b" set is the same as "a"
65359 * according to the create set parameters */
65360 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
65361 -};
65362 +} __do_const;
65363
65364 /* The core set type structure */
65365 struct ip_set_type {
65366 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
65367 index 4966dde..7d8ce06 100644
65368 --- a/include/linux/netfilter/nfnetlink.h
65369 +++ b/include/linux/netfilter/nfnetlink.h
65370 @@ -16,7 +16,7 @@ struct nfnl_callback {
65371 const struct nlattr * const cda[]);
65372 const struct nla_policy *policy; /* netlink attribute policy */
65373 const u_int16_t attr_count; /* number of nlattr's */
65374 -};
65375 +} __do_const;
65376
65377 struct nfnetlink_subsystem {
65378 const char *name;
65379 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
65380 new file mode 100644
65381 index 0000000..33f4af8
65382 --- /dev/null
65383 +++ b/include/linux/netfilter/xt_gradm.h
65384 @@ -0,0 +1,9 @@
65385 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
65386 +#define _LINUX_NETFILTER_XT_GRADM_H 1
65387 +
65388 +struct xt_gradm_mtinfo {
65389 + __u16 flags;
65390 + __u16 invflags;
65391 +};
65392 +
65393 +#endif
65394 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
65395 index d65746e..62e72c2 100644
65396 --- a/include/linux/notifier.h
65397 +++ b/include/linux/notifier.h
65398 @@ -51,7 +51,8 @@ struct notifier_block {
65399 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
65400 struct notifier_block __rcu *next;
65401 int priority;
65402 -};
65403 +} __do_const;
65404 +typedef struct notifier_block __no_const notifier_block_no_const;
65405
65406 struct atomic_notifier_head {
65407 spinlock_t lock;
65408 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
65409 index c65a18a..0c05f3a 100644
65410 --- a/include/linux/of_pdt.h
65411 +++ b/include/linux/of_pdt.h
65412 @@ -32,7 +32,7 @@ struct of_pdt_ops {
65413
65414 /* return 0 on success; fill in 'len' with number of bytes in path */
65415 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
65416 -};
65417 +} __no_const;
65418
65419 extern void *prom_early_alloc(unsigned long size);
65420
65421 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
65422 index a4c5624..79d6d88 100644
65423 --- a/include/linux/oprofile.h
65424 +++ b/include/linux/oprofile.h
65425 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
65426 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
65427 char const * name, ulong * val);
65428
65429 -/** Create a file for read-only access to an atomic_t. */
65430 +/** Create a file for read-only access to an atomic_unchecked_t. */
65431 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
65432 - char const * name, atomic_t * val);
65433 + char const * name, atomic_unchecked_t * val);
65434
65435 /** create a directory */
65436 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
65437 diff --git a/include/linux/padata.h b/include/linux/padata.h
65438 index 86292be..d67326a 100644
65439 --- a/include/linux/padata.h
65440 +++ b/include/linux/padata.h
65441 @@ -152,7 +152,7 @@ struct parallel_data {
65442 * @flags: padata flags.
65443 */
65444 struct padata_instance {
65445 - struct notifier_block cpu_notifier;
65446 + notifier_block_no_const cpu_notifier;
65447 struct workqueue_struct *wq;
65448 struct parallel_data *pd;
65449 struct padata_cpumask cpumask;
65450 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
65451 index b5d1384..70473da 100644
65452 --- a/include/linux/page-flags.h
65453 +++ b/include/linux/page-flags.h
65454 @@ -362,7 +362,7 @@ static inline void ClearPageCompound(struct page *page)
65455 * pages on the LRU and/or pagecache.
65456 */
65457 TESTPAGEFLAG(Compound, compound)
65458 -__PAGEFLAG(Head, compound)
65459 +__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
65460
65461 /*
65462 * PG_reclaim is used in combination with PG_compound to mark the
65463 @@ -374,8 +374,14 @@ __PAGEFLAG(Head, compound)
65464 * PG_compound & PG_reclaim => Tail page
65465 * PG_compound & ~PG_reclaim => Head page
65466 */
65467 +#define PG_head_mask ((1L << PG_compound))
65468 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
65469
65470 +static inline int PageHead(struct page *page)
65471 +{
65472 + return ((page->flags & PG_head_tail_mask) == PG_head_mask);
65473 +}
65474 +
65475 static inline int PageTail(struct page *page)
65476 {
65477 return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
65478 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
65479 index 6bfb2faa..1204767 100644
65480 --- a/include/linux/perf_event.h
65481 +++ b/include/linux/perf_event.h
65482 @@ -328,8 +328,8 @@ struct perf_event {
65483
65484 enum perf_event_active_state state;
65485 unsigned int attach_state;
65486 - local64_t count;
65487 - atomic64_t child_count;
65488 + local64_t count; /* PaX: fix it one day */
65489 + atomic64_unchecked_t child_count;
65490
65491 /*
65492 * These are the total time in nanoseconds that the event
65493 @@ -380,8 +380,8 @@ struct perf_event {
65494 * These accumulate total time (in nanoseconds) that children
65495 * events have been enabled and running, respectively.
65496 */
65497 - atomic64_t child_total_time_enabled;
65498 - atomic64_t child_total_time_running;
65499 + atomic64_unchecked_t child_total_time_enabled;
65500 + atomic64_unchecked_t child_total_time_running;
65501
65502 /*
65503 * Protect attach/detach and child_list:
65504 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
65505 index ad1a427..6419649 100644
65506 --- a/include/linux/pipe_fs_i.h
65507 +++ b/include/linux/pipe_fs_i.h
65508 @@ -45,9 +45,9 @@ struct pipe_buffer {
65509 struct pipe_inode_info {
65510 wait_queue_head_t wait;
65511 unsigned int nrbufs, curbuf, buffers;
65512 - unsigned int readers;
65513 - unsigned int writers;
65514 - unsigned int waiting_writers;
65515 + atomic_t readers;
65516 + atomic_t writers;
65517 + atomic_t waiting_writers;
65518 unsigned int r_counter;
65519 unsigned int w_counter;
65520 struct page *tmp_page;
65521 diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
65522 index 5f28cae..3d23723 100644
65523 --- a/include/linux/platform_data/usb-ehci-s5p.h
65524 +++ b/include/linux/platform_data/usb-ehci-s5p.h
65525 @@ -14,7 +14,7 @@
65526 struct s5p_ehci_platdata {
65527 int (*phy_init)(struct platform_device *pdev, int type);
65528 int (*phy_exit)(struct platform_device *pdev, int type);
65529 -};
65530 +} __no_const;
65531
65532 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
65533
65534 diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
65535 index 8348866..25604bd 100644
65536 --- a/include/linux/pm_clock.h
65537 +++ b/include/linux/pm_clock.h
65538 @@ -13,7 +13,7 @@
65539 #include <linux/notifier.h>
65540
65541 struct pm_clk_notifier_block {
65542 - struct notifier_block nb;
65543 + notifier_block_no_const nb;
65544 struct dev_pm_domain *pm_domain;
65545 char *con_ids[];
65546 };
65547 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
65548 index f271860..6b3bec5 100644
65549 --- a/include/linux/pm_runtime.h
65550 +++ b/include/linux/pm_runtime.h
65551 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
65552
65553 static inline void pm_runtime_mark_last_busy(struct device *dev)
65554 {
65555 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
65556 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
65557 }
65558
65559 #else /* !CONFIG_PM_RUNTIME */
65560 diff --git a/include/linux/poison.h b/include/linux/poison.h
65561 index 2110a81..13a11bb 100644
65562 --- a/include/linux/poison.h
65563 +++ b/include/linux/poison.h
65564 @@ -19,8 +19,8 @@
65565 * under normal circumstances, used to verify that nobody uses
65566 * non-initialized list entries.
65567 */
65568 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
65569 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
65570 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
65571 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
65572
65573 /********** include/linux/timer.h **********/
65574 /*
65575 diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
65576 index 0e86840..13032aa 100644
65577 --- a/include/linux/power/charger-manager.h
65578 +++ b/include/linux/power/charger-manager.h
65579 @@ -88,7 +88,7 @@ struct charger_cable {
65580 /* The charger-manager use Exton framework*/
65581 struct extcon_specific_cable_nb extcon_dev;
65582 struct work_struct wq;
65583 - struct notifier_block nb;
65584 + notifier_block_no_const nb;
65585
65586 /* The state of charger cable */
65587 bool attached;
65588 diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
65589 index 4a496eb..d9c5659 100644
65590 --- a/include/linux/power/smartreflex.h
65591 +++ b/include/linux/power/smartreflex.h
65592 @@ -238,7 +238,7 @@ struct omap_sr_class_data {
65593 int (*notify)(struct omap_sr *sr, u32 status);
65594 u8 notify_flags;
65595 u8 class_type;
65596 -};
65597 +} __do_const;
65598
65599 /**
65600 * struct omap_sr_nvalue_table - Smartreflex n-target value info
65601 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
65602 index 5a710b9..0b0dab9 100644
65603 --- a/include/linux/preempt.h
65604 +++ b/include/linux/preempt.h
65605 @@ -126,7 +126,7 @@ struct preempt_ops {
65606 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
65607 void (*sched_out)(struct preempt_notifier *notifier,
65608 struct task_struct *next);
65609 -};
65610 +} __no_const;
65611
65612 /**
65613 * preempt_notifier - key for installing preemption notifiers
65614 diff --git a/include/linux/printk.h b/include/linux/printk.h
65615 index 9afc01e..92c32e8 100644
65616 --- a/include/linux/printk.h
65617 +++ b/include/linux/printk.h
65618 @@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
65619 extern int printk_needs_cpu(int cpu);
65620 extern void printk_tick(void);
65621
65622 +extern int kptr_restrict;
65623 +
65624 #ifdef CONFIG_PRINTK
65625 asmlinkage __printf(5, 0)
65626 int vprintk_emit(int facility, int level,
65627 @@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
65628
65629 extern int printk_delay_msec;
65630 extern int dmesg_restrict;
65631 -extern int kptr_restrict;
65632
65633 void log_buf_kexec_setup(void);
65634 void __init setup_log_buf(int early);
65635 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
65636 index 3fd2e87..d93a721 100644
65637 --- a/include/linux/proc_fs.h
65638 +++ b/include/linux/proc_fs.h
65639 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
65640 return proc_create_data(name, mode, parent, proc_fops, NULL);
65641 }
65642
65643 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
65644 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
65645 +{
65646 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65647 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
65648 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65649 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
65650 +#else
65651 + return proc_create_data(name, mode, parent, proc_fops, NULL);
65652 +#endif
65653 +}
65654 +
65655 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
65656 umode_t mode, struct proc_dir_entry *base,
65657 read_proc_t *read_proc, void * data)
65658 @@ -258,7 +270,7 @@ union proc_op {
65659 int (*proc_show)(struct seq_file *m,
65660 struct pid_namespace *ns, struct pid *pid,
65661 struct task_struct *task);
65662 -};
65663 +} __no_const;
65664
65665 struct ctl_table_header;
65666 struct ctl_table;
65667 diff --git a/include/linux/random.h b/include/linux/random.h
65668 index 6330ed4..419c6c3 100644
65669 --- a/include/linux/random.h
65670 +++ b/include/linux/random.h
65671 @@ -30,12 +30,17 @@ void srandom32(u32 seed);
65672
65673 u32 prandom32(struct rnd_state *);
65674
65675 +static inline unsigned long pax_get_random_long(void)
65676 +{
65677 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
65678 +}
65679 +
65680 /*
65681 * Handle minimum values for seeds
65682 */
65683 static inline u32 __seed(u32 x, u32 m)
65684 {
65685 - return (x < m) ? x + m : x;
65686 + return (x <= m) ? x + m + 1 : x;
65687 }
65688
65689 /**
65690 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
65691 index 23b3630..e1bc12b 100644
65692 --- a/include/linux/reboot.h
65693 +++ b/include/linux/reboot.h
65694 @@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
65695 * Architecture-specific implementations of sys_reboot commands.
65696 */
65697
65698 -extern void machine_restart(char *cmd);
65699 -extern void machine_halt(void);
65700 -extern void machine_power_off(void);
65701 +extern void machine_restart(char *cmd) __noreturn;
65702 +extern void machine_halt(void) __noreturn;
65703 +extern void machine_power_off(void) __noreturn;
65704
65705 extern void machine_shutdown(void);
65706 struct pt_regs;
65707 @@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
65708 */
65709
65710 extern void kernel_restart_prepare(char *cmd);
65711 -extern void kernel_restart(char *cmd);
65712 -extern void kernel_halt(void);
65713 -extern void kernel_power_off(void);
65714 +extern void kernel_restart(char *cmd) __noreturn;
65715 +extern void kernel_halt(void) __noreturn;
65716 +extern void kernel_power_off(void) __noreturn;
65717
65718 extern int C_A_D; /* for sysctl */
65719 void ctrl_alt_del(void);
65720 @@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
65721 * Emergency restart, callable from an interrupt handler.
65722 */
65723
65724 -extern void emergency_restart(void);
65725 +extern void emergency_restart(void) __noreturn;
65726 #include <asm/emergency-restart.h>
65727
65728 #endif /* _LINUX_REBOOT_H */
65729 diff --git a/include/linux/regset.h b/include/linux/regset.h
65730 index 8e0c9fe..fdb64bc 100644
65731 --- a/include/linux/regset.h
65732 +++ b/include/linux/regset.h
65733 @@ -161,7 +161,7 @@ struct user_regset {
65734 unsigned int align;
65735 unsigned int bias;
65736 unsigned int core_note_type;
65737 -};
65738 +} __do_const;
65739
65740 /**
65741 * struct user_regset_view - available regsets
65742 diff --git a/include/linux/relay.h b/include/linux/relay.h
65743 index 91cacc3..b55ff74 100644
65744 --- a/include/linux/relay.h
65745 +++ b/include/linux/relay.h
65746 @@ -160,7 +160,7 @@ struct rchan_callbacks
65747 * The callback should return 0 if successful, negative if not.
65748 */
65749 int (*remove_buf_file)(struct dentry *dentry);
65750 -};
65751 +} __no_const;
65752
65753 /*
65754 * CONFIG_RELAY kernel API, kernel/relay.c
65755 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
65756 index d901078..0b7d00e 100644
65757 --- a/include/linux/rfkill.h
65758 +++ b/include/linux/rfkill.h
65759 @@ -63,6 +63,7 @@ struct rfkill_ops {
65760 void (*query)(struct rfkill *rfkill, void *data);
65761 int (*set_block)(void *data, bool blocked);
65762 };
65763 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
65764
65765 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
65766 /**
65767 diff --git a/include/linux/rio.h b/include/linux/rio.h
65768 index a3e7842..d973ca6 100644
65769 --- a/include/linux/rio.h
65770 +++ b/include/linux/rio.h
65771 @@ -339,7 +339,7 @@ struct rio_ops {
65772 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
65773 u64 rstart, u32 size, u32 flags);
65774 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
65775 -};
65776 +} __no_const;
65777
65778 #define RIO_RESOURCE_MEM 0x00000100
65779 #define RIO_RESOURCE_DOORBELL 0x00000200
65780 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
65781 index bfe1f47..6a33ee3 100644
65782 --- a/include/linux/rmap.h
65783 +++ b/include/linux/rmap.h
65784 @@ -134,8 +134,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
65785 void anon_vma_init(void); /* create anon_vma_cachep */
65786 int anon_vma_prepare(struct vm_area_struct *);
65787 void unlink_anon_vmas(struct vm_area_struct *);
65788 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
65789 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
65790 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
65791 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
65792
65793 static inline void anon_vma_merge(struct vm_area_struct *vma,
65794 struct vm_area_struct *next)
65795 diff --git a/include/linux/sched.h b/include/linux/sched.h
65796 index 0dd42a0..cc9bffb 100644
65797 --- a/include/linux/sched.h
65798 +++ b/include/linux/sched.h
65799 @@ -61,6 +61,7 @@ struct bio_list;
65800 struct fs_struct;
65801 struct perf_event_context;
65802 struct blk_plug;
65803 +struct linux_binprm;
65804
65805 /*
65806 * List of flags we want to share for kernel threads,
65807 @@ -344,10 +345,13 @@ struct user_namespace;
65808 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
65809
65810 extern int sysctl_max_map_count;
65811 +extern unsigned long sysctl_heap_stack_gap;
65812
65813 #include <linux/aio.h>
65814
65815 #ifdef CONFIG_MMU
65816 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
65817 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
65818 extern void arch_pick_mmap_layout(struct mm_struct *mm);
65819 extern unsigned long
65820 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
65821 @@ -614,6 +618,17 @@ struct signal_struct {
65822 #ifdef CONFIG_TASKSTATS
65823 struct taskstats *stats;
65824 #endif
65825 +
65826 +#ifdef CONFIG_GRKERNSEC
65827 + u32 curr_ip;
65828 + u32 saved_ip;
65829 + u32 gr_saddr;
65830 + u32 gr_daddr;
65831 + u16 gr_sport;
65832 + u16 gr_dport;
65833 + u8 used_accept:1;
65834 +#endif
65835 +
65836 #ifdef CONFIG_AUDIT
65837 unsigned audit_tty;
65838 struct tty_audit_buf *tty_audit_buf;
65839 @@ -691,6 +706,11 @@ struct user_struct {
65840 struct key *session_keyring; /* UID's default session keyring */
65841 #endif
65842
65843 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65844 + unsigned int banned;
65845 + unsigned long ban_expires;
65846 +#endif
65847 +
65848 /* Hash table maintenance information */
65849 struct hlist_node uidhash_node;
65850 kuid_t uid;
65851 @@ -1312,8 +1332,8 @@ struct task_struct {
65852 struct list_head thread_group;
65853
65854 struct completion *vfork_done; /* for vfork() */
65855 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
65856 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65857 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
65858 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65859
65860 cputime_t utime, stime, utimescaled, stimescaled;
65861 cputime_t gtime;
65862 @@ -1329,11 +1349,6 @@ struct task_struct {
65863 struct task_cputime cputime_expires;
65864 struct list_head cpu_timers[3];
65865
65866 -/* process credentials */
65867 - const struct cred __rcu *real_cred; /* objective and real subjective task
65868 - * credentials (COW) */
65869 - const struct cred __rcu *cred; /* effective (overridable) subjective task
65870 - * credentials (COW) */
65871 char comm[TASK_COMM_LEN]; /* executable name excluding path
65872 - access with [gs]et_task_comm (which lock
65873 it with task_lock())
65874 @@ -1350,6 +1365,10 @@ struct task_struct {
65875 #endif
65876 /* CPU-specific state of this task */
65877 struct thread_struct thread;
65878 +/* thread_info moved to task_struct */
65879 +#ifdef CONFIG_X86
65880 + struct thread_info tinfo;
65881 +#endif
65882 /* filesystem information */
65883 struct fs_struct *fs;
65884 /* open file information */
65885 @@ -1423,6 +1442,10 @@ struct task_struct {
65886 gfp_t lockdep_reclaim_gfp;
65887 #endif
65888
65889 +/* process credentials */
65890 + const struct cred __rcu *real_cred; /* objective and real subjective task
65891 + * credentials (COW) */
65892 +
65893 /* journalling filesystem info */
65894 void *journal_info;
65895
65896 @@ -1461,6 +1484,10 @@ struct task_struct {
65897 /* cg_list protected by css_set_lock and tsk->alloc_lock */
65898 struct list_head cg_list;
65899 #endif
65900 +
65901 + const struct cred __rcu *cred; /* effective (overridable) subjective task
65902 + * credentials (COW) */
65903 +
65904 #ifdef CONFIG_FUTEX
65905 struct robust_list_head __user *robust_list;
65906 #ifdef CONFIG_COMPAT
65907 @@ -1548,8 +1575,75 @@ struct task_struct {
65908 #ifdef CONFIG_UPROBES
65909 struct uprobe_task *utask;
65910 #endif
65911 +
65912 +#ifdef CONFIG_GRKERNSEC
65913 + /* grsecurity */
65914 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65915 + u64 exec_id;
65916 +#endif
65917 +#ifdef CONFIG_GRKERNSEC_SETXID
65918 + const struct cred *delayed_cred;
65919 +#endif
65920 + struct dentry *gr_chroot_dentry;
65921 + struct acl_subject_label *acl;
65922 + struct acl_role_label *role;
65923 + struct file *exec_file;
65924 + unsigned long brute_expires;
65925 + u16 acl_role_id;
65926 + /* is this the task that authenticated to the special role */
65927 + u8 acl_sp_role;
65928 + u8 is_writable;
65929 + u8 brute;
65930 + u8 gr_is_chrooted;
65931 +#endif
65932 +
65933 };
65934
65935 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
65936 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
65937 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
65938 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
65939 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
65940 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
65941 +
65942 +#ifdef CONFIG_PAX_SOFTMODE
65943 +extern int pax_softmode;
65944 +#endif
65945 +
65946 +extern int pax_check_flags(unsigned long *);
65947 +
65948 +/* if tsk != current then task_lock must be held on it */
65949 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65950 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
65951 +{
65952 + if (likely(tsk->mm))
65953 + return tsk->mm->pax_flags;
65954 + else
65955 + return 0UL;
65956 +}
65957 +
65958 +/* if tsk != current then task_lock must be held on it */
65959 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
65960 +{
65961 + if (likely(tsk->mm)) {
65962 + tsk->mm->pax_flags = flags;
65963 + return 0;
65964 + }
65965 + return -EINVAL;
65966 +}
65967 +#endif
65968 +
65969 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65970 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
65971 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65972 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
65973 +#endif
65974 +
65975 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
65976 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
65977 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
65978 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
65979 +
65980 /* Future-safe accessor for struct task_struct's cpus_allowed. */
65981 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
65982
65983 @@ -2092,7 +2186,9 @@ void yield(void);
65984 extern struct exec_domain default_exec_domain;
65985
65986 union thread_union {
65987 +#ifndef CONFIG_X86
65988 struct thread_info thread_info;
65989 +#endif
65990 unsigned long stack[THREAD_SIZE/sizeof(long)];
65991 };
65992
65993 @@ -2125,6 +2221,7 @@ extern struct pid_namespace init_pid_ns;
65994 */
65995
65996 extern struct task_struct *find_task_by_vpid(pid_t nr);
65997 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
65998 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
65999 struct pid_namespace *ns);
66000
66001 @@ -2281,7 +2378,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
66002 extern void exit_itimers(struct signal_struct *);
66003 extern void flush_itimer_signals(void);
66004
66005 -extern void do_group_exit(int);
66006 +extern __noreturn void do_group_exit(int);
66007
66008 extern void daemonize(const char *, ...);
66009 extern int allow_signal(int);
66010 @@ -2485,9 +2582,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
66011
66012 #endif
66013
66014 -static inline int object_is_on_stack(void *obj)
66015 +static inline int object_starts_on_stack(void *obj)
66016 {
66017 - void *stack = task_stack_page(current);
66018 + const void *stack = task_stack_page(current);
66019
66020 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
66021 }
66022 diff --git a/include/linux/security.h b/include/linux/security.h
66023 index 05e88bd..5cda002 100644
66024 --- a/include/linux/security.h
66025 +++ b/include/linux/security.h
66026 @@ -26,6 +26,7 @@
66027 #include <linux/capability.h>
66028 #include <linux/slab.h>
66029 #include <linux/err.h>
66030 +#include <linux/grsecurity.h>
66031
66032 struct linux_binprm;
66033 struct cred;
66034 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
66035 index 68a04a3..866e6a1 100644
66036 --- a/include/linux/seq_file.h
66037 +++ b/include/linux/seq_file.h
66038 @@ -26,6 +26,9 @@ struct seq_file {
66039 struct mutex lock;
66040 const struct seq_operations *op;
66041 int poll_event;
66042 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66043 + u64 exec_id;
66044 +#endif
66045 #ifdef CONFIG_USER_NS
66046 struct user_namespace *user_ns;
66047 #endif
66048 @@ -38,6 +41,7 @@ struct seq_operations {
66049 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
66050 int (*show) (struct seq_file *m, void *v);
66051 };
66052 +typedef struct seq_operations __no_const seq_operations_no_const;
66053
66054 #define SEQ_SKIP 1
66055
66056 diff --git a/include/linux/shm.h b/include/linux/shm.h
66057 index bcf8a6a..4d0af77 100644
66058 --- a/include/linux/shm.h
66059 +++ b/include/linux/shm.h
66060 @@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
66061
66062 /* The task created the shm object. NULL if the task is dead. */
66063 struct task_struct *shm_creator;
66064 +#ifdef CONFIG_GRKERNSEC
66065 + time_t shm_createtime;
66066 + pid_t shm_lapid;
66067 +#endif
66068 };
66069
66070 /* shm_mode upper byte flags */
66071 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
66072 index 6a2c34e..a1f320f 100644
66073 --- a/include/linux/skbuff.h
66074 +++ b/include/linux/skbuff.h
66075 @@ -577,7 +577,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
66076 extern struct sk_buff *__alloc_skb(unsigned int size,
66077 gfp_t priority, int flags, int node);
66078 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
66079 -static inline struct sk_buff *alloc_skb(unsigned int size,
66080 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
66081 gfp_t priority)
66082 {
66083 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
66084 @@ -687,7 +687,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
66085 */
66086 static inline int skb_queue_empty(const struct sk_buff_head *list)
66087 {
66088 - return list->next == (struct sk_buff *)list;
66089 + return list->next == (const struct sk_buff *)list;
66090 }
66091
66092 /**
66093 @@ -700,7 +700,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
66094 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
66095 const struct sk_buff *skb)
66096 {
66097 - return skb->next == (struct sk_buff *)list;
66098 + return skb->next == (const struct sk_buff *)list;
66099 }
66100
66101 /**
66102 @@ -713,7 +713,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
66103 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
66104 const struct sk_buff *skb)
66105 {
66106 - return skb->prev == (struct sk_buff *)list;
66107 + return skb->prev == (const struct sk_buff *)list;
66108 }
66109
66110 /**
66111 @@ -1626,7 +1626,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
66112 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
66113 */
66114 #ifndef NET_SKB_PAD
66115 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
66116 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
66117 #endif
66118
66119 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
66120 @@ -2204,7 +2204,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
66121 int noblock, int *err);
66122 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
66123 struct poll_table_struct *wait);
66124 -extern int skb_copy_datagram_iovec(const struct sk_buff *from,
66125 +extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
66126 int offset, struct iovec *to,
66127 int size);
66128 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
66129 diff --git a/include/linux/slab.h b/include/linux/slab.h
66130 index 83d1a14..209e1a6 100644
66131 --- a/include/linux/slab.h
66132 +++ b/include/linux/slab.h
66133 @@ -11,12 +11,20 @@
66134
66135 #include <linux/gfp.h>
66136 #include <linux/types.h>
66137 +#include <linux/err.h>
66138
66139 /*
66140 * Flags to pass to kmem_cache_create().
66141 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
66142 */
66143 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
66144 +
66145 +#ifdef CONFIG_PAX_USERCOPY_SLABS
66146 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
66147 +#else
66148 +#define SLAB_USERCOPY 0x00000000UL
66149 +#endif
66150 +
66151 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
66152 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
66153 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
66154 @@ -87,10 +95,13 @@
66155 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
66156 * Both make kfree a no-op.
66157 */
66158 -#define ZERO_SIZE_PTR ((void *)16)
66159 +#define ZERO_SIZE_PTR \
66160 +({ \
66161 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
66162 + (void *)(-MAX_ERRNO-1L); \
66163 +})
66164
66165 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
66166 - (unsigned long)ZERO_SIZE_PTR)
66167 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
66168
66169 /*
66170 * Common fields provided in kmem_cache by all slab allocators
66171 @@ -110,7 +121,7 @@ struct kmem_cache {
66172 unsigned int align; /* Alignment as calculated */
66173 unsigned long flags; /* Active flags on the slab */
66174 const char *name; /* Slab name for sysfs */
66175 - int refcount; /* Use counter */
66176 + atomic_t refcount; /* Use counter */
66177 void (*ctor)(void *); /* Called on object slot creation */
66178 struct list_head list; /* List of all slab caches on the system */
66179 };
66180 @@ -185,6 +196,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
66181 void kfree(const void *);
66182 void kzfree(const void *);
66183 size_t ksize(const void *);
66184 +const char *check_heap_object(const void *ptr, unsigned long n);
66185 +bool is_usercopy_object(const void *ptr);
66186
66187 /*
66188 * Allocator specific definitions. These are mainly used to establish optimized
66189 @@ -323,7 +336,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
66190 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
66191 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
66192 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
66193 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
66194 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
66195 #define kmalloc_track_caller(size, flags) \
66196 __kmalloc_track_caller(size, flags, _RET_IP_)
66197 #else
66198 @@ -343,7 +356,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
66199 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
66200 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
66201 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
66202 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
66203 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
66204 #define kmalloc_node_track_caller(size, flags, node) \
66205 __kmalloc_node_track_caller(size, flags, node, \
66206 _RET_IP_)
66207 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
66208 index cc290f0..0ba60931 100644
66209 --- a/include/linux/slab_def.h
66210 +++ b/include/linux/slab_def.h
66211 @@ -52,7 +52,7 @@ struct kmem_cache {
66212 /* 4) cache creation/removal */
66213 const char *name;
66214 struct list_head list;
66215 - int refcount;
66216 + atomic_t refcount;
66217 int object_size;
66218 int align;
66219
66220 @@ -68,10 +68,10 @@ struct kmem_cache {
66221 unsigned long node_allocs;
66222 unsigned long node_frees;
66223 unsigned long node_overflow;
66224 - atomic_t allochit;
66225 - atomic_t allocmiss;
66226 - atomic_t freehit;
66227 - atomic_t freemiss;
66228 + atomic_unchecked_t allochit;
66229 + atomic_unchecked_t allocmiss;
66230 + atomic_unchecked_t freehit;
66231 + atomic_unchecked_t freemiss;
66232
66233 /*
66234 * If debugging is enabled, then the allocator can add additional
66235 @@ -104,11 +104,16 @@ struct cache_sizes {
66236 #ifdef CONFIG_ZONE_DMA
66237 struct kmem_cache *cs_dmacachep;
66238 #endif
66239 +
66240 +#ifdef CONFIG_PAX_USERCOPY_SLABS
66241 + struct kmem_cache *cs_usercopycachep;
66242 +#endif
66243 +
66244 };
66245 extern struct cache_sizes malloc_sizes[];
66246
66247 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66248 -void *__kmalloc(size_t size, gfp_t flags);
66249 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
66250
66251 #ifdef CONFIG_TRACING
66252 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
66253 @@ -145,6 +150,13 @@ found:
66254 cachep = malloc_sizes[i].cs_dmacachep;
66255 else
66256 #endif
66257 +
66258 +#ifdef CONFIG_PAX_USERCOPY_SLABS
66259 + if (flags & GFP_USERCOPY)
66260 + cachep = malloc_sizes[i].cs_usercopycachep;
66261 + else
66262 +#endif
66263 +
66264 cachep = malloc_sizes[i].cs_cachep;
66265
66266 ret = kmem_cache_alloc_trace(cachep, flags, size);
66267 @@ -155,7 +167,7 @@ found:
66268 }
66269
66270 #ifdef CONFIG_NUMA
66271 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
66272 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66273 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66274
66275 #ifdef CONFIG_TRACING
66276 @@ -198,6 +210,13 @@ found:
66277 cachep = malloc_sizes[i].cs_dmacachep;
66278 else
66279 #endif
66280 +
66281 +#ifdef CONFIG_PAX_USERCOPY_SLABS
66282 + if (flags & GFP_USERCOPY)
66283 + cachep = malloc_sizes[i].cs_usercopycachep;
66284 + else
66285 +#endif
66286 +
66287 cachep = malloc_sizes[i].cs_cachep;
66288
66289 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
66290 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
66291 index f28e14a..7831211 100644
66292 --- a/include/linux/slob_def.h
66293 +++ b/include/linux/slob_def.h
66294 @@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
66295 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
66296 }
66297
66298 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
66299 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66300
66301 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
66302 {
66303 @@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66304 return __kmalloc_node(size, flags, NUMA_NO_NODE);
66305 }
66306
66307 -static __always_inline void *__kmalloc(size_t size, gfp_t flags)
66308 +static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
66309 {
66310 return kmalloc(size, flags);
66311 }
66312 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
66313 index df448ad..b99e7f6 100644
66314 --- a/include/linux/slub_def.h
66315 +++ b/include/linux/slub_def.h
66316 @@ -91,7 +91,7 @@ struct kmem_cache {
66317 struct kmem_cache_order_objects max;
66318 struct kmem_cache_order_objects min;
66319 gfp_t allocflags; /* gfp flags to use on each alloc */
66320 - int refcount; /* Refcount for slab cache destroy */
66321 + atomic_t refcount; /* Refcount for slab cache destroy */
66322 void (*ctor)(void *);
66323 int inuse; /* Offset to metadata */
66324 int align; /* Alignment */
66325 @@ -152,7 +152,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
66326 * Sorry that the following has to be that ugly but some versions of GCC
66327 * have trouble with constant propagation and loops.
66328 */
66329 -static __always_inline int kmalloc_index(size_t size)
66330 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
66331 {
66332 if (!size)
66333 return 0;
66334 @@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
66335 }
66336
66337 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66338 -void *__kmalloc(size_t size, gfp_t flags);
66339 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
66340
66341 static __always_inline void *
66342 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
66343 @@ -258,7 +258,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
66344 }
66345 #endif
66346
66347 -static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
66348 +static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
66349 {
66350 unsigned int order = get_order(size);
66351 return kmalloc_order_trace(size, flags, order);
66352 @@ -283,7 +283,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66353 }
66354
66355 #ifdef CONFIG_NUMA
66356 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
66357 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66358 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66359
66360 #ifdef CONFIG_TRACING
66361 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
66362 index 680f9a3..f13aeb0 100644
66363 --- a/include/linux/sonet.h
66364 +++ b/include/linux/sonet.h
66365 @@ -7,7 +7,7 @@
66366 #include <uapi/linux/sonet.h>
66367
66368 struct k_sonet_stats {
66369 -#define __HANDLE_ITEM(i) atomic_t i
66370 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66371 __SONET_ITEMS
66372 #undef __HANDLE_ITEM
66373 };
66374 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
66375 index 34206b8..f019e06 100644
66376 --- a/include/linux/sunrpc/clnt.h
66377 +++ b/include/linux/sunrpc/clnt.h
66378 @@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
66379 {
66380 switch (sap->sa_family) {
66381 case AF_INET:
66382 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
66383 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
66384 case AF_INET6:
66385 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
66386 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
66387 }
66388 return 0;
66389 }
66390 @@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
66391 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
66392 const struct sockaddr *src)
66393 {
66394 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
66395 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
66396 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
66397
66398 dsin->sin_family = ssin->sin_family;
66399 @@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
66400 if (sa->sa_family != AF_INET6)
66401 return 0;
66402
66403 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
66404 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
66405 }
66406
66407 #endif /* __KERNEL__ */
66408 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
66409 index dc0c3cc..8503fb6 100644
66410 --- a/include/linux/sunrpc/sched.h
66411 +++ b/include/linux/sunrpc/sched.h
66412 @@ -106,6 +106,7 @@ struct rpc_call_ops {
66413 void (*rpc_count_stats)(struct rpc_task *, void *);
66414 void (*rpc_release)(void *);
66415 };
66416 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
66417
66418 struct rpc_task_setup {
66419 struct rpc_task *task;
66420 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
66421 index 0b8e3e6..33e0a01 100644
66422 --- a/include/linux/sunrpc/svc_rdma.h
66423 +++ b/include/linux/sunrpc/svc_rdma.h
66424 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
66425 extern unsigned int svcrdma_max_requests;
66426 extern unsigned int svcrdma_max_req_size;
66427
66428 -extern atomic_t rdma_stat_recv;
66429 -extern atomic_t rdma_stat_read;
66430 -extern atomic_t rdma_stat_write;
66431 -extern atomic_t rdma_stat_sq_starve;
66432 -extern atomic_t rdma_stat_rq_starve;
66433 -extern atomic_t rdma_stat_rq_poll;
66434 -extern atomic_t rdma_stat_rq_prod;
66435 -extern atomic_t rdma_stat_sq_poll;
66436 -extern atomic_t rdma_stat_sq_prod;
66437 +extern atomic_unchecked_t rdma_stat_recv;
66438 +extern atomic_unchecked_t rdma_stat_read;
66439 +extern atomic_unchecked_t rdma_stat_write;
66440 +extern atomic_unchecked_t rdma_stat_sq_starve;
66441 +extern atomic_unchecked_t rdma_stat_rq_starve;
66442 +extern atomic_unchecked_t rdma_stat_rq_poll;
66443 +extern atomic_unchecked_t rdma_stat_rq_prod;
66444 +extern atomic_unchecked_t rdma_stat_sq_poll;
66445 +extern atomic_unchecked_t rdma_stat_sq_prod;
66446
66447 #define RPCRDMA_VERSION 1
66448
66449 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
66450 index cd844a6..3ca3592 100644
66451 --- a/include/linux/sysctl.h
66452 +++ b/include/linux/sysctl.h
66453 @@ -41,6 +41,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
66454
66455 extern int proc_dostring(struct ctl_table *, int,
66456 void __user *, size_t *, loff_t *);
66457 +extern int proc_dostring_modpriv(struct ctl_table *, int,
66458 + void __user *, size_t *, loff_t *);
66459 extern int proc_dointvec(struct ctl_table *, int,
66460 void __user *, size_t *, loff_t *);
66461 extern int proc_dointvec_minmax(struct ctl_table *, int,
66462 diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
66463 index 7faf933..eb6f5e3 100644
66464 --- a/include/linux/sysrq.h
66465 +++ b/include/linux/sysrq.h
66466 @@ -36,7 +36,7 @@ struct sysrq_key_op {
66467 char *help_msg;
66468 char *action_msg;
66469 int enable_mask;
66470 -};
66471 +} __do_const;
66472
66473 #ifdef CONFIG_MAGIC_SYSRQ
66474
66475 diff --git a/include/linux/tty.h b/include/linux/tty.h
66476 index f0b4eb4..1c4854e 100644
66477 --- a/include/linux/tty.h
66478 +++ b/include/linux/tty.h
66479 @@ -192,7 +192,7 @@ struct tty_port {
66480 const struct tty_port_operations *ops; /* Port operations */
66481 spinlock_t lock; /* Lock protecting tty field */
66482 int blocked_open; /* Waiting to open */
66483 - int count; /* Usage count */
66484 + atomic_t count; /* Usage count */
66485 wait_queue_head_t open_wait; /* Open waiters */
66486 wait_queue_head_t close_wait; /* Close waiters */
66487 wait_queue_head_t delta_msr_wait; /* Modem status change */
66488 @@ -513,7 +513,7 @@ extern int tty_port_open(struct tty_port *port,
66489 struct tty_struct *tty, struct file *filp);
66490 static inline int tty_port_users(struct tty_port *port)
66491 {
66492 - return port->count + port->blocked_open;
66493 + return atomic_read(&port->count) + port->blocked_open;
66494 }
66495
66496 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
66497 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
66498 index dd976cf..e272742 100644
66499 --- a/include/linux/tty_driver.h
66500 +++ b/include/linux/tty_driver.h
66501 @@ -284,7 +284,7 @@ struct tty_operations {
66502 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
66503 #endif
66504 const struct file_operations *proc_fops;
66505 -};
66506 +} __do_const;
66507
66508 struct tty_driver {
66509 int magic; /* magic number for this structure */
66510 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
66511 index fb79dd8d..07d4773 100644
66512 --- a/include/linux/tty_ldisc.h
66513 +++ b/include/linux/tty_ldisc.h
66514 @@ -149,7 +149,7 @@ struct tty_ldisc_ops {
66515
66516 struct module *owner;
66517
66518 - int refcount;
66519 + atomic_t refcount;
66520 };
66521
66522 struct tty_ldisc {
66523 diff --git a/include/linux/types.h b/include/linux/types.h
66524 index 1cc0e4b..0d50edf 100644
66525 --- a/include/linux/types.h
66526 +++ b/include/linux/types.h
66527 @@ -175,10 +175,26 @@ typedef struct {
66528 int counter;
66529 } atomic_t;
66530
66531 +#ifdef CONFIG_PAX_REFCOUNT
66532 +typedef struct {
66533 + int counter;
66534 +} atomic_unchecked_t;
66535 +#else
66536 +typedef atomic_t atomic_unchecked_t;
66537 +#endif
66538 +
66539 #ifdef CONFIG_64BIT
66540 typedef struct {
66541 long counter;
66542 } atomic64_t;
66543 +
66544 +#ifdef CONFIG_PAX_REFCOUNT
66545 +typedef struct {
66546 + long counter;
66547 +} atomic64_unchecked_t;
66548 +#else
66549 +typedef atomic64_t atomic64_unchecked_t;
66550 +#endif
66551 #endif
66552
66553 struct list_head {
66554 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
66555 index 5ca0951..ab496a5 100644
66556 --- a/include/linux/uaccess.h
66557 +++ b/include/linux/uaccess.h
66558 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
66559 long ret; \
66560 mm_segment_t old_fs = get_fs(); \
66561 \
66562 - set_fs(KERNEL_DS); \
66563 pagefault_disable(); \
66564 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
66565 - pagefault_enable(); \
66566 + set_fs(KERNEL_DS); \
66567 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
66568 set_fs(old_fs); \
66569 + pagefault_enable(); \
66570 ret; \
66571 })
66572
66573 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
66574 index 99c1b4d..bb94261 100644
66575 --- a/include/linux/unaligned/access_ok.h
66576 +++ b/include/linux/unaligned/access_ok.h
66577 @@ -6,32 +6,32 @@
66578
66579 static inline u16 get_unaligned_le16(const void *p)
66580 {
66581 - return le16_to_cpup((__le16 *)p);
66582 + return le16_to_cpup((const __le16 *)p);
66583 }
66584
66585 static inline u32 get_unaligned_le32(const void *p)
66586 {
66587 - return le32_to_cpup((__le32 *)p);
66588 + return le32_to_cpup((const __le32 *)p);
66589 }
66590
66591 static inline u64 get_unaligned_le64(const void *p)
66592 {
66593 - return le64_to_cpup((__le64 *)p);
66594 + return le64_to_cpup((const __le64 *)p);
66595 }
66596
66597 static inline u16 get_unaligned_be16(const void *p)
66598 {
66599 - return be16_to_cpup((__be16 *)p);
66600 + return be16_to_cpup((const __be16 *)p);
66601 }
66602
66603 static inline u32 get_unaligned_be32(const void *p)
66604 {
66605 - return be32_to_cpup((__be32 *)p);
66606 + return be32_to_cpup((const __be32 *)p);
66607 }
66608
66609 static inline u64 get_unaligned_be64(const void *p)
66610 {
66611 - return be64_to_cpup((__be64 *)p);
66612 + return be64_to_cpup((const __be64 *)p);
66613 }
66614
66615 static inline void put_unaligned_le16(u16 val, void *p)
66616 diff --git a/include/linux/usb.h b/include/linux/usb.h
66617 index 10278d1..e21ec3c 100644
66618 --- a/include/linux/usb.h
66619 +++ b/include/linux/usb.h
66620 @@ -551,7 +551,7 @@ struct usb_device {
66621 int maxchild;
66622
66623 u32 quirks;
66624 - atomic_t urbnum;
66625 + atomic_unchecked_t urbnum;
66626
66627 unsigned long active_duration;
66628
66629 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
66630 index c5d36c6..8478c90 100644
66631 --- a/include/linux/usb/renesas_usbhs.h
66632 +++ b/include/linux/usb/renesas_usbhs.h
66633 @@ -39,7 +39,7 @@ enum {
66634 */
66635 struct renesas_usbhs_driver_callback {
66636 int (*notify_hotplug)(struct platform_device *pdev);
66637 -};
66638 +} __no_const;
66639
66640 /*
66641 * callback functions for platform
66642 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
66643 * VBUS control is needed for Host
66644 */
66645 int (*set_vbus)(struct platform_device *pdev, int enable);
66646 -};
66647 +} __no_const;
66648
66649 /*
66650 * parameters for renesas usbhs
66651 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
66652 index 6f8fbcf..8259001 100644
66653 --- a/include/linux/vermagic.h
66654 +++ b/include/linux/vermagic.h
66655 @@ -25,9 +25,35 @@
66656 #define MODULE_ARCH_VERMAGIC ""
66657 #endif
66658
66659 +#ifdef CONFIG_PAX_REFCOUNT
66660 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
66661 +#else
66662 +#define MODULE_PAX_REFCOUNT ""
66663 +#endif
66664 +
66665 +#ifdef CONSTIFY_PLUGIN
66666 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
66667 +#else
66668 +#define MODULE_CONSTIFY_PLUGIN ""
66669 +#endif
66670 +
66671 +#ifdef STACKLEAK_PLUGIN
66672 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
66673 +#else
66674 +#define MODULE_STACKLEAK_PLUGIN ""
66675 +#endif
66676 +
66677 +#ifdef CONFIG_GRKERNSEC
66678 +#define MODULE_GRSEC "GRSEC "
66679 +#else
66680 +#define MODULE_GRSEC ""
66681 +#endif
66682 +
66683 #define VERMAGIC_STRING \
66684 UTS_RELEASE " " \
66685 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
66686 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
66687 - MODULE_ARCH_VERMAGIC
66688 + MODULE_ARCH_VERMAGIC \
66689 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
66690 + MODULE_GRSEC
66691
66692 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
66693 index 6071e91..ca6a489 100644
66694 --- a/include/linux/vmalloc.h
66695 +++ b/include/linux/vmalloc.h
66696 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
66697 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
66698 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
66699 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
66700 +
66701 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66702 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
66703 +#endif
66704 +
66705 /* bits [20..32] reserved for arch specific ioremap internals */
66706
66707 /*
66708 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
66709 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
66710 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
66711 unsigned long start, unsigned long end, gfp_t gfp_mask,
66712 - pgprot_t prot, int node, const void *caller);
66713 + pgprot_t prot, int node, const void *caller) __size_overflow(1);
66714 extern void vfree(const void *addr);
66715
66716 extern void *vmap(struct page **pages, unsigned int count,
66717 @@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
66718 extern void free_vm_area(struct vm_struct *area);
66719
66720 /* for /dev/kmem */
66721 -extern long vread(char *buf, char *addr, unsigned long count);
66722 -extern long vwrite(char *buf, char *addr, unsigned long count);
66723 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
66724 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
66725
66726 /*
66727 * Internals. Dont't use..
66728 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
66729 index 92a86b2..1d9eb3c 100644
66730 --- a/include/linux/vmstat.h
66731 +++ b/include/linux/vmstat.h
66732 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
66733 /*
66734 * Zone based page accounting with per cpu differentials.
66735 */
66736 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66737 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66738
66739 static inline void zone_page_state_add(long x, struct zone *zone,
66740 enum zone_stat_item item)
66741 {
66742 - atomic_long_add(x, &zone->vm_stat[item]);
66743 - atomic_long_add(x, &vm_stat[item]);
66744 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
66745 + atomic_long_add_unchecked(x, &vm_stat[item]);
66746 }
66747
66748 static inline unsigned long global_page_state(enum zone_stat_item item)
66749 {
66750 - long x = atomic_long_read(&vm_stat[item]);
66751 + long x = atomic_long_read_unchecked(&vm_stat[item]);
66752 #ifdef CONFIG_SMP
66753 if (x < 0)
66754 x = 0;
66755 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
66756 static inline unsigned long zone_page_state(struct zone *zone,
66757 enum zone_stat_item item)
66758 {
66759 - long x = atomic_long_read(&zone->vm_stat[item]);
66760 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66761 #ifdef CONFIG_SMP
66762 if (x < 0)
66763 x = 0;
66764 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
66765 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
66766 enum zone_stat_item item)
66767 {
66768 - long x = atomic_long_read(&zone->vm_stat[item]);
66769 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66770
66771 #ifdef CONFIG_SMP
66772 int cpu;
66773 @@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
66774
66775 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
66776 {
66777 - atomic_long_inc(&zone->vm_stat[item]);
66778 - atomic_long_inc(&vm_stat[item]);
66779 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
66780 + atomic_long_inc_unchecked(&vm_stat[item]);
66781 }
66782
66783 static inline void __inc_zone_page_state(struct page *page,
66784 @@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
66785
66786 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
66787 {
66788 - atomic_long_dec(&zone->vm_stat[item]);
66789 - atomic_long_dec(&vm_stat[item]);
66790 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
66791 + atomic_long_dec_unchecked(&vm_stat[item]);
66792 }
66793
66794 static inline void __dec_zone_page_state(struct page *page,
66795 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
66796 index 944ecdf..a3994fc 100644
66797 --- a/include/media/saa7146_vv.h
66798 +++ b/include/media/saa7146_vv.h
66799 @@ -161,8 +161,8 @@ struct saa7146_ext_vv
66800 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
66801
66802 /* the extension can override this */
66803 - struct v4l2_ioctl_ops vid_ops;
66804 - struct v4l2_ioctl_ops vbi_ops;
66805 + v4l2_ioctl_ops_no_const vid_ops;
66806 + v4l2_ioctl_ops_no_const vbi_ops;
66807 /* pointer to the saa7146 core ops */
66808 const struct v4l2_ioctl_ops *core_ops;
66809
66810 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
66811 index 95d1c91..19a5d94 100644
66812 --- a/include/media/v4l2-dev.h
66813 +++ b/include/media/v4l2-dev.h
66814 @@ -76,7 +76,8 @@ struct v4l2_file_operations {
66815 int (*mmap) (struct file *, struct vm_area_struct *);
66816 int (*open) (struct file *);
66817 int (*release) (struct file *);
66818 -};
66819 +} __do_const;
66820 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
66821
66822 /*
66823 * Newer version of video_device, handled by videodev2.c
66824 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
66825 index e48b571..cec1748 100644
66826 --- a/include/media/v4l2-ioctl.h
66827 +++ b/include/media/v4l2-ioctl.h
66828 @@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
66829 long (*vidioc_default) (struct file *file, void *fh,
66830 bool valid_prio, int cmd, void *arg);
66831 };
66832 -
66833 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
66834
66835 /* v4l debugging and diagnostics */
66836
66837 diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
66838 index ede0369..067cf12 100644
66839 --- a/include/net/bluetooth/bluetooth.h
66840 +++ b/include/net/bluetooth/bluetooth.h
66841 @@ -204,7 +204,7 @@ struct bt_sock_list {
66842 struct hlist_head head;
66843 rwlock_t lock;
66844 #ifdef CONFIG_PROC_FS
66845 - struct file_operations fops;
66846 + file_operations_no_const fops;
66847 int (* custom_seq_show)(struct seq_file *, void *);
66848 #endif
66849 };
66850 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
66851 index bcb9cc3..f4ec722e 100644
66852 --- a/include/net/caif/caif_hsi.h
66853 +++ b/include/net/caif/caif_hsi.h
66854 @@ -98,7 +98,7 @@ struct cfhsi_cb_ops {
66855 void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
66856 void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
66857 void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
66858 -};
66859 +} __no_const;
66860
66861 /* Structure implemented by HSI device. */
66862 struct cfhsi_ops {
66863 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
66864 index 9e5425b..8136ffc 100644
66865 --- a/include/net/caif/cfctrl.h
66866 +++ b/include/net/caif/cfctrl.h
66867 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
66868 void (*radioset_rsp)(void);
66869 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
66870 struct cflayer *client_layer);
66871 -};
66872 +} __no_const;
66873
66874 /* Link Setup Parameters for CAIF-Links. */
66875 struct cfctrl_link_param {
66876 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
66877 struct cfctrl {
66878 struct cfsrvl serv;
66879 struct cfctrl_rsp res;
66880 - atomic_t req_seq_no;
66881 - atomic_t rsp_seq_no;
66882 + atomic_unchecked_t req_seq_no;
66883 + atomic_unchecked_t rsp_seq_no;
66884 struct list_head list;
66885 /* Protects from simultaneous access to first_req list */
66886 spinlock_t info_list_lock;
66887 diff --git a/include/net/flow.h b/include/net/flow.h
66888 index 628e11b..4c475df 100644
66889 --- a/include/net/flow.h
66890 +++ b/include/net/flow.h
66891 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
66892
66893 extern void flow_cache_flush(void);
66894 extern void flow_cache_flush_deferred(void);
66895 -extern atomic_t flow_cache_genid;
66896 +extern atomic_unchecked_t flow_cache_genid;
66897
66898 #endif
66899 diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
66900 index 4fd8a4b..123e31a 100644
66901 --- a/include/net/gro_cells.h
66902 +++ b/include/net/gro_cells.h
66903 @@ -30,7 +30,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
66904 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
66905
66906 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
66907 - atomic_long_inc(&dev->rx_dropped);
66908 + atomic_long_inc_unchecked(&dev->rx_dropped);
66909 kfree_skb(skb);
66910 return;
66911 }
66912 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
66913 index ba1d361..8163a9c 100644
66914 --- a/include/net/inet_connection_sock.h
66915 +++ b/include/net/inet_connection_sock.h
66916 @@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
66917 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
66918 int (*bind_conflict)(const struct sock *sk,
66919 const struct inet_bind_bucket *tb, bool relax);
66920 -};
66921 +} __do_const;
66922
66923 /** inet_connection_sock - INET connection oriented sock
66924 *
66925 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
66926 index 53f464d..ba76aaa 100644
66927 --- a/include/net/inetpeer.h
66928 +++ b/include/net/inetpeer.h
66929 @@ -47,8 +47,8 @@ struct inet_peer {
66930 */
66931 union {
66932 struct {
66933 - atomic_t rid; /* Frag reception counter */
66934 - atomic_t ip_id_count; /* IP ID for the next packet */
66935 + atomic_unchecked_t rid; /* Frag reception counter */
66936 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
66937 };
66938 struct rcu_head rcu;
66939 struct inet_peer *gc_next;
66940 @@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
66941 more++;
66942 inet_peer_refcheck(p);
66943 do {
66944 - old = atomic_read(&p->ip_id_count);
66945 + old = atomic_read_unchecked(&p->ip_id_count);
66946 new = old + more;
66947 if (!new)
66948 new = 1;
66949 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
66950 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
66951 return new;
66952 }
66953
66954 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
66955 index 9497be1..5a4fafe 100644
66956 --- a/include/net/ip_fib.h
66957 +++ b/include/net/ip_fib.h
66958 @@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66959
66960 #define FIB_RES_SADDR(net, res) \
66961 ((FIB_RES_NH(res).nh_saddr_genid == \
66962 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
66963 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
66964 FIB_RES_NH(res).nh_saddr : \
66965 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
66966 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
66967 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
66968 index ee75ccd..2cc2b95 100644
66969 --- a/include/net/ip_vs.h
66970 +++ b/include/net/ip_vs.h
66971 @@ -510,7 +510,7 @@ struct ip_vs_conn {
66972 struct ip_vs_conn *control; /* Master control connection */
66973 atomic_t n_control; /* Number of controlled ones */
66974 struct ip_vs_dest *dest; /* real server */
66975 - atomic_t in_pkts; /* incoming packet counter */
66976 + atomic_unchecked_t in_pkts; /* incoming packet counter */
66977
66978 /* packet transmitter for different forwarding methods. If it
66979 mangles the packet, it must return NF_DROP or better NF_STOLEN,
66980 @@ -648,7 +648,7 @@ struct ip_vs_dest {
66981 __be16 port; /* port number of the server */
66982 union nf_inet_addr addr; /* IP address of the server */
66983 volatile unsigned int flags; /* dest status flags */
66984 - atomic_t conn_flags; /* flags to copy to conn */
66985 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
66986 atomic_t weight; /* server weight */
66987
66988 atomic_t refcnt; /* reference counter */
66989 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
66990 index 69b610a..fe3962c 100644
66991 --- a/include/net/irda/ircomm_core.h
66992 +++ b/include/net/irda/ircomm_core.h
66993 @@ -51,7 +51,7 @@ typedef struct {
66994 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
66995 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
66996 struct ircomm_info *);
66997 -} call_t;
66998 +} __no_const call_t;
66999
67000 struct ircomm_cb {
67001 irda_queue_t queue;
67002 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
67003 index 80ffde3..968b0f4 100644
67004 --- a/include/net/irda/ircomm_tty.h
67005 +++ b/include/net/irda/ircomm_tty.h
67006 @@ -35,6 +35,7 @@
67007 #include <linux/termios.h>
67008 #include <linux/timer.h>
67009 #include <linux/tty.h> /* struct tty_struct */
67010 +#include <asm/local.h>
67011
67012 #include <net/irda/irias_object.h>
67013 #include <net/irda/ircomm_core.h>
67014 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
67015 index cc7c197..9f2da2a 100644
67016 --- a/include/net/iucv/af_iucv.h
67017 +++ b/include/net/iucv/af_iucv.h
67018 @@ -141,7 +141,7 @@ struct iucv_sock {
67019 struct iucv_sock_list {
67020 struct hlist_head head;
67021 rwlock_t lock;
67022 - atomic_t autobind_name;
67023 + atomic_unchecked_t autobind_name;
67024 };
67025
67026 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
67027 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
67028 index 0dab173..1b76af0 100644
67029 --- a/include/net/neighbour.h
67030 +++ b/include/net/neighbour.h
67031 @@ -123,7 +123,7 @@ struct neigh_ops {
67032 void (*error_report)(struct neighbour *, struct sk_buff *);
67033 int (*output)(struct neighbour *, struct sk_buff *);
67034 int (*connected_output)(struct neighbour *, struct sk_buff *);
67035 -};
67036 +} __do_const;
67037
67038 struct pneigh_entry {
67039 struct pneigh_entry *next;
67040 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
67041 index 95e6466..251016d 100644
67042 --- a/include/net/net_namespace.h
67043 +++ b/include/net/net_namespace.h
67044 @@ -110,7 +110,7 @@ struct net {
67045 #endif
67046 struct netns_ipvs *ipvs;
67047 struct sock *diag_nlsk;
67048 - atomic_t rt_genid;
67049 + atomic_unchecked_t rt_genid;
67050 };
67051
67052 /*
67053 @@ -320,12 +320,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
67054
67055 static inline int rt_genid(struct net *net)
67056 {
67057 - return atomic_read(&net->rt_genid);
67058 + return atomic_read_unchecked(&net->rt_genid);
67059 }
67060
67061 static inline void rt_genid_bump(struct net *net)
67062 {
67063 - atomic_inc(&net->rt_genid);
67064 + atomic_inc_unchecked(&net->rt_genid);
67065 }
67066
67067 #endif /* __NET_NET_NAMESPACE_H */
67068 diff --git a/include/net/netdma.h b/include/net/netdma.h
67069 index 8ba8ce2..99b7fff 100644
67070 --- a/include/net/netdma.h
67071 +++ b/include/net/netdma.h
67072 @@ -24,7 +24,7 @@
67073 #include <linux/dmaengine.h>
67074 #include <linux/skbuff.h>
67075
67076 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
67077 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
67078 struct sk_buff *skb, int offset, struct iovec *to,
67079 size_t len, struct dma_pinned_list *pinned_list);
67080
67081 diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
67082 index 252fd10..aa1421f 100644
67083 --- a/include/net/netfilter/nf_queue.h
67084 +++ b/include/net/netfilter/nf_queue.h
67085 @@ -22,7 +22,7 @@ struct nf_queue_handler {
67086 int (*outfn)(struct nf_queue_entry *entry,
67087 unsigned int queuenum);
67088 char *name;
67089 -};
67090 +} __do_const;
67091
67092 extern int nf_register_queue_handler(u_int8_t pf,
67093 const struct nf_queue_handler *qh);
67094 diff --git a/include/net/netlink.h b/include/net/netlink.h
67095 index 9690b0f..87aded7 100644
67096 --- a/include/net/netlink.h
67097 +++ b/include/net/netlink.h
67098 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
67099 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
67100 {
67101 if (mark)
67102 - skb_trim(skb, (unsigned char *) mark - skb->data);
67103 + skb_trim(skb, (const unsigned char *) mark - skb->data);
67104 }
67105
67106 /**
67107 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
67108 index 2ae2b83..dbdc85e 100644
67109 --- a/include/net/netns/ipv4.h
67110 +++ b/include/net/netns/ipv4.h
67111 @@ -64,7 +64,7 @@ struct netns_ipv4 {
67112 kgid_t sysctl_ping_group_range[2];
67113 long sysctl_tcp_mem[3];
67114
67115 - atomic_t dev_addr_genid;
67116 + atomic_unchecked_t dev_addr_genid;
67117
67118 #ifdef CONFIG_IP_MROUTE
67119 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
67120 diff --git a/include/net/protocol.h b/include/net/protocol.h
67121 index 929528c..c84d4f6 100644
67122 --- a/include/net/protocol.h
67123 +++ b/include/net/protocol.h
67124 @@ -48,7 +48,7 @@ struct net_protocol {
67125 int (*gro_complete)(struct sk_buff *skb);
67126 unsigned int no_policy:1,
67127 netns_ok:1;
67128 -};
67129 +} __do_const;
67130
67131 #if IS_ENABLED(CONFIG_IPV6)
67132 struct inet6_protocol {
67133 @@ -69,7 +69,7 @@ struct inet6_protocol {
67134 int (*gro_complete)(struct sk_buff *skb);
67135
67136 unsigned int flags; /* INET6_PROTO_xxx */
67137 -};
67138 +} __do_const;
67139
67140 #define INET6_PROTO_NOPOLICY 0x1
67141 #define INET6_PROTO_FINAL 0x2
67142 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
67143 index 9c6414f..fbd0524 100644
67144 --- a/include/net/sctp/sctp.h
67145 +++ b/include/net/sctp/sctp.h
67146 @@ -318,9 +318,9 @@ do { \
67147
67148 #else /* SCTP_DEBUG */
67149
67150 -#define SCTP_DEBUG_PRINTK(whatever...)
67151 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
67152 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
67153 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
67154 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
67155 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
67156 #define SCTP_ENABLE_DEBUG
67157 #define SCTP_DISABLE_DEBUG
67158 #define SCTP_ASSERT(expr, str, func)
67159 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
67160 index 64158aa..b65533c 100644
67161 --- a/include/net/sctp/structs.h
67162 +++ b/include/net/sctp/structs.h
67163 @@ -496,7 +496,7 @@ struct sctp_af {
67164 int sockaddr_len;
67165 sa_family_t sa_family;
67166 struct list_head list;
67167 -};
67168 +} __do_const;
67169
67170 struct sctp_af *sctp_get_af_specific(sa_family_t);
67171 int sctp_register_af(struct sctp_af *);
67172 @@ -516,7 +516,7 @@ struct sctp_pf {
67173 struct sctp_association *asoc);
67174 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
67175 struct sctp_af *af;
67176 -};
67177 +} __do_const;
67178
67179
67180 /* Structure to track chunk fragments that have been acked, but peer
67181 diff --git a/include/net/sock.h b/include/net/sock.h
67182 index c945fba..e162e56 100644
67183 --- a/include/net/sock.h
67184 +++ b/include/net/sock.h
67185 @@ -304,7 +304,7 @@ struct sock {
67186 #ifdef CONFIG_RPS
67187 __u32 sk_rxhash;
67188 #endif
67189 - atomic_t sk_drops;
67190 + atomic_unchecked_t sk_drops;
67191 int sk_rcvbuf;
67192
67193 struct sk_filter __rcu *sk_filter;
67194 @@ -1763,7 +1763,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
67195 }
67196
67197 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
67198 - char __user *from, char *to,
67199 + char __user *from, unsigned char *to,
67200 int copy, int offset)
67201 {
67202 if (skb->ip_summed == CHECKSUM_NONE) {
67203 @@ -2022,7 +2022,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
67204 }
67205 }
67206
67207 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
67208 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
67209
67210 /**
67211 * sk_page_frag - return an appropriate page_frag
67212 diff --git a/include/net/tcp.h b/include/net/tcp.h
67213 index 4af45e3..32163a9 100644
67214 --- a/include/net/tcp.h
67215 +++ b/include/net/tcp.h
67216 @@ -531,7 +531,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
67217 extern void tcp_xmit_retransmit_queue(struct sock *);
67218 extern void tcp_simple_retransmit(struct sock *);
67219 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
67220 -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
67221 +extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
67222
67223 extern void tcp_send_probe0(struct sock *);
67224 extern void tcp_send_partial(struct sock *);
67225 @@ -702,8 +702,8 @@ struct tcp_skb_cb {
67226 struct inet6_skb_parm h6;
67227 #endif
67228 } header; /* For incoming frames */
67229 - __u32 seq; /* Starting sequence number */
67230 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
67231 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
67232 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
67233 __u32 when; /* used to compute rtt's */
67234 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
67235
67236 @@ -717,7 +717,7 @@ struct tcp_skb_cb {
67237
67238 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
67239 /* 1 byte hole */
67240 - __u32 ack_seq; /* Sequence number ACK'd */
67241 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
67242 };
67243
67244 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
67245 @@ -1563,7 +1563,7 @@ struct tcp_seq_afinfo {
67246 char *name;
67247 sa_family_t family;
67248 const struct file_operations *seq_fops;
67249 - struct seq_operations seq_ops;
67250 + seq_operations_no_const seq_ops;
67251 };
67252
67253 struct tcp_iter_state {
67254 diff --git a/include/net/udp.h b/include/net/udp.h
67255 index 065f379..b661b40 100644
67256 --- a/include/net/udp.h
67257 +++ b/include/net/udp.h
67258 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
67259 sa_family_t family;
67260 struct udp_table *udp_table;
67261 const struct file_operations *seq_fops;
67262 - struct seq_operations seq_ops;
67263 + seq_operations_no_const seq_ops;
67264 };
67265
67266 struct udp_iter_state {
67267 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
67268 index 63445ed..74ef61d 100644
67269 --- a/include/net/xfrm.h
67270 +++ b/include/net/xfrm.h
67271 @@ -423,7 +423,7 @@ struct xfrm_mode {
67272 struct module *owner;
67273 unsigned int encap;
67274 int flags;
67275 -};
67276 +} __do_const;
67277
67278 /* Flags for xfrm_mode. */
67279 enum {
67280 @@ -514,7 +514,7 @@ struct xfrm_policy {
67281 struct timer_list timer;
67282
67283 struct flow_cache_object flo;
67284 - atomic_t genid;
67285 + atomic_unchecked_t genid;
67286 u32 priority;
67287 u32 index;
67288 struct xfrm_mark mark;
67289 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
67290 index 1a046b1..ee0bef0 100644
67291 --- a/include/rdma/iw_cm.h
67292 +++ b/include/rdma/iw_cm.h
67293 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
67294 int backlog);
67295
67296 int (*destroy_listen)(struct iw_cm_id *cm_id);
67297 -};
67298 +} __no_const;
67299
67300 /**
67301 * iw_create_cm_id - Create an IW CM identifier.
67302 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
67303 index 399162b..b337f1a 100644
67304 --- a/include/scsi/libfc.h
67305 +++ b/include/scsi/libfc.h
67306 @@ -762,6 +762,7 @@ struct libfc_function_template {
67307 */
67308 void (*disc_stop_final) (struct fc_lport *);
67309 };
67310 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
67311
67312 /**
67313 * struct fc_disc - Discovery context
67314 @@ -866,7 +867,7 @@ struct fc_lport {
67315 struct fc_vport *vport;
67316
67317 /* Operational Information */
67318 - struct libfc_function_template tt;
67319 + libfc_function_template_no_const tt;
67320 u8 link_up;
67321 u8 qfull;
67322 enum fc_lport_state state;
67323 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
67324 index 55367b0..d97bd2a 100644
67325 --- a/include/scsi/scsi_device.h
67326 +++ b/include/scsi/scsi_device.h
67327 @@ -169,9 +169,9 @@ struct scsi_device {
67328 unsigned int max_device_blocked; /* what device_blocked counts down from */
67329 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
67330
67331 - atomic_t iorequest_cnt;
67332 - atomic_t iodone_cnt;
67333 - atomic_t ioerr_cnt;
67334 + atomic_unchecked_t iorequest_cnt;
67335 + atomic_unchecked_t iodone_cnt;
67336 + atomic_unchecked_t ioerr_cnt;
67337
67338 struct device sdev_gendev,
67339 sdev_dev;
67340 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
67341 index b797e8f..8e2c3aa 100644
67342 --- a/include/scsi/scsi_transport_fc.h
67343 +++ b/include/scsi/scsi_transport_fc.h
67344 @@ -751,7 +751,8 @@ struct fc_function_template {
67345 unsigned long show_host_system_hostname:1;
67346
67347 unsigned long disable_target_scan:1;
67348 -};
67349 +} __do_const;
67350 +typedef struct fc_function_template __no_const fc_function_template_no_const;
67351
67352
67353 /**
67354 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
67355 index 030b87c..98a6954 100644
67356 --- a/include/sound/ak4xxx-adda.h
67357 +++ b/include/sound/ak4xxx-adda.h
67358 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
67359 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
67360 unsigned char val);
67361 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
67362 -};
67363 +} __no_const;
67364
67365 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
67366
67367 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
67368 index 8c05e47..2b5df97 100644
67369 --- a/include/sound/hwdep.h
67370 +++ b/include/sound/hwdep.h
67371 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
67372 struct snd_hwdep_dsp_status *status);
67373 int (*dsp_load)(struct snd_hwdep *hw,
67374 struct snd_hwdep_dsp_image *image);
67375 -};
67376 +} __no_const;
67377
67378 struct snd_hwdep {
67379 struct snd_card *card;
67380 diff --git a/include/sound/info.h b/include/sound/info.h
67381 index 9ca1a49..aba1728 100644
67382 --- a/include/sound/info.h
67383 +++ b/include/sound/info.h
67384 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
67385 struct snd_info_buffer *buffer);
67386 void (*write)(struct snd_info_entry *entry,
67387 struct snd_info_buffer *buffer);
67388 -};
67389 +} __no_const;
67390
67391 struct snd_info_entry_ops {
67392 int (*open)(struct snd_info_entry *entry,
67393 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
67394 index 6268a41..de9b9d1 100644
67395 --- a/include/sound/pcm.h
67396 +++ b/include/sound/pcm.h
67397 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
67398 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
67399 int (*ack)(struct snd_pcm_substream *substream);
67400 };
67401 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
67402
67403 /*
67404 *
67405 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
67406 index 7e95056..52df55a 100644
67407 --- a/include/sound/sb16_csp.h
67408 +++ b/include/sound/sb16_csp.h
67409 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
67410 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
67411 int (*csp_stop) (struct snd_sb_csp * p);
67412 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
67413 -};
67414 +} __no_const;
67415
67416 /*
67417 * CSP private data
67418 diff --git a/include/sound/soc.h b/include/sound/soc.h
67419 index 91244a0..11c542e 100644
67420 --- a/include/sound/soc.h
67421 +++ b/include/sound/soc.h
67422 @@ -769,7 +769,7 @@ struct snd_soc_codec_driver {
67423 /* probe ordering - for components with runtime dependencies */
67424 int probe_order;
67425 int remove_order;
67426 -};
67427 +} __do_const;
67428
67429 /* SoC platform interface */
67430 struct snd_soc_platform_driver {
67431 @@ -815,7 +815,7 @@ struct snd_soc_platform_driver {
67432 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
67433 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
67434 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
67435 -};
67436 +} __do_const;
67437
67438 struct snd_soc_platform {
67439 const char *name;
67440 @@ -1031,7 +1031,7 @@ struct snd_soc_pcm_runtime {
67441 struct snd_soc_dai_link *dai_link;
67442 struct mutex pcm_mutex;
67443 enum snd_soc_pcm_subclass pcm_subclass;
67444 - struct snd_pcm_ops ops;
67445 + snd_pcm_ops_no_const ops;
67446
67447 unsigned int dev_registered:1;
67448
67449 diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
67450 index 098c4de..171054b 100644
67451 --- a/include/sound/tea575x-tuner.h
67452 +++ b/include/sound/tea575x-tuner.h
67453 @@ -49,7 +49,7 @@ struct snd_tea575x_ops {
67454
67455 struct snd_tea575x {
67456 struct v4l2_device *v4l2_dev;
67457 - struct v4l2_file_operations fops;
67458 + v4l2_file_operations_no_const fops;
67459 struct video_device vd; /* video device */
67460 int radio_nr; /* radio_nr */
67461 bool tea5759; /* 5759 chip is present */
67462 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
67463 index 5be8937..a8e46e9 100644
67464 --- a/include/target/target_core_base.h
67465 +++ b/include/target/target_core_base.h
67466 @@ -434,7 +434,7 @@ struct t10_reservation_ops {
67467 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
67468 int (*t10_pr_register)(struct se_cmd *);
67469 int (*t10_pr_clear)(struct se_cmd *);
67470 -};
67471 +} __no_const;
67472
67473 struct t10_reservation {
67474 /* Reservation effects all target ports */
67475 @@ -758,7 +758,7 @@ struct se_device {
67476 spinlock_t stats_lock;
67477 /* Active commands on this virtual SE device */
67478 atomic_t simple_cmds;
67479 - atomic_t dev_ordered_id;
67480 + atomic_unchecked_t dev_ordered_id;
67481 atomic_t dev_ordered_sync;
67482 atomic_t dev_qf_count;
67483 struct se_obj dev_obj;
67484 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
67485 new file mode 100644
67486 index 0000000..fb634b7
67487 --- /dev/null
67488 +++ b/include/trace/events/fs.h
67489 @@ -0,0 +1,53 @@
67490 +#undef TRACE_SYSTEM
67491 +#define TRACE_SYSTEM fs
67492 +
67493 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
67494 +#define _TRACE_FS_H
67495 +
67496 +#include <linux/fs.h>
67497 +#include <linux/tracepoint.h>
67498 +
67499 +TRACE_EVENT(do_sys_open,
67500 +
67501 + TP_PROTO(const char *filename, int flags, int mode),
67502 +
67503 + TP_ARGS(filename, flags, mode),
67504 +
67505 + TP_STRUCT__entry(
67506 + __string( filename, filename )
67507 + __field( int, flags )
67508 + __field( int, mode )
67509 + ),
67510 +
67511 + TP_fast_assign(
67512 + __assign_str(filename, filename);
67513 + __entry->flags = flags;
67514 + __entry->mode = mode;
67515 + ),
67516 +
67517 + TP_printk("\"%s\" %x %o",
67518 + __get_str(filename), __entry->flags, __entry->mode)
67519 +);
67520 +
67521 +TRACE_EVENT(open_exec,
67522 +
67523 + TP_PROTO(const char *filename),
67524 +
67525 + TP_ARGS(filename),
67526 +
67527 + TP_STRUCT__entry(
67528 + __string( filename, filename )
67529 + ),
67530 +
67531 + TP_fast_assign(
67532 + __assign_str(filename, filename);
67533 + ),
67534 +
67535 + TP_printk("\"%s\"",
67536 + __get_str(filename))
67537 +);
67538 +
67539 +#endif /* _TRACE_FS_H */
67540 +
67541 +/* This part must be outside protection */
67542 +#include <trace/define_trace.h>
67543 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
67544 index 1c09820..7f5ec79 100644
67545 --- a/include/trace/events/irq.h
67546 +++ b/include/trace/events/irq.h
67547 @@ -36,7 +36,7 @@ struct softirq_action;
67548 */
67549 TRACE_EVENT(irq_handler_entry,
67550
67551 - TP_PROTO(int irq, struct irqaction *action),
67552 + TP_PROTO(int irq, const struct irqaction *action),
67553
67554 TP_ARGS(irq, action),
67555
67556 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
67557 */
67558 TRACE_EVENT(irq_handler_exit,
67559
67560 - TP_PROTO(int irq, struct irqaction *action, int ret),
67561 + TP_PROTO(int irq, const struct irqaction *action, int ret),
67562
67563 TP_ARGS(irq, action, ret),
67564
67565 diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
67566 index 7caf44c..23c6f27 100644
67567 --- a/include/uapi/linux/a.out.h
67568 +++ b/include/uapi/linux/a.out.h
67569 @@ -39,6 +39,14 @@ enum machine_type {
67570 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
67571 };
67572
67573 +/* Constants for the N_FLAGS field */
67574 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67575 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
67576 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
67577 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
67578 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67579 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67580 +
67581 #if !defined (N_MAGIC)
67582 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
67583 #endif
67584 diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
67585 index d876736..b36014e 100644
67586 --- a/include/uapi/linux/byteorder/little_endian.h
67587 +++ b/include/uapi/linux/byteorder/little_endian.h
67588 @@ -42,51 +42,51 @@
67589
67590 static inline __le64 __cpu_to_le64p(const __u64 *p)
67591 {
67592 - return (__force __le64)*p;
67593 + return (__force const __le64)*p;
67594 }
67595 static inline __u64 __le64_to_cpup(const __le64 *p)
67596 {
67597 - return (__force __u64)*p;
67598 + return (__force const __u64)*p;
67599 }
67600 static inline __le32 __cpu_to_le32p(const __u32 *p)
67601 {
67602 - return (__force __le32)*p;
67603 + return (__force const __le32)*p;
67604 }
67605 static inline __u32 __le32_to_cpup(const __le32 *p)
67606 {
67607 - return (__force __u32)*p;
67608 + return (__force const __u32)*p;
67609 }
67610 static inline __le16 __cpu_to_le16p(const __u16 *p)
67611 {
67612 - return (__force __le16)*p;
67613 + return (__force const __le16)*p;
67614 }
67615 static inline __u16 __le16_to_cpup(const __le16 *p)
67616 {
67617 - return (__force __u16)*p;
67618 + return (__force const __u16)*p;
67619 }
67620 static inline __be64 __cpu_to_be64p(const __u64 *p)
67621 {
67622 - return (__force __be64)__swab64p(p);
67623 + return (__force const __be64)__swab64p(p);
67624 }
67625 static inline __u64 __be64_to_cpup(const __be64 *p)
67626 {
67627 - return __swab64p((__u64 *)p);
67628 + return __swab64p((const __u64 *)p);
67629 }
67630 static inline __be32 __cpu_to_be32p(const __u32 *p)
67631 {
67632 - return (__force __be32)__swab32p(p);
67633 + return (__force const __be32)__swab32p(p);
67634 }
67635 static inline __u32 __be32_to_cpup(const __be32 *p)
67636 {
67637 - return __swab32p((__u32 *)p);
67638 + return __swab32p((const __u32 *)p);
67639 }
67640 static inline __be16 __cpu_to_be16p(const __u16 *p)
67641 {
67642 - return (__force __be16)__swab16p(p);
67643 + return (__force const __be16)__swab16p(p);
67644 }
67645 static inline __u16 __be16_to_cpup(const __be16 *p)
67646 {
67647 - return __swab16p((__u16 *)p);
67648 + return __swab16p((const __u16 *)p);
67649 }
67650 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
67651 #define __le64_to_cpus(x) do { (void)(x); } while (0)
67652 diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
67653 index 126a817..d522bd1 100644
67654 --- a/include/uapi/linux/elf.h
67655 +++ b/include/uapi/linux/elf.h
67656 @@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
67657 #define PT_GNU_EH_FRAME 0x6474e550
67658
67659 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
67660 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
67661 +
67662 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
67663 +
67664 +/* Constants for the e_flags field */
67665 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67666 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
67667 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
67668 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
67669 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67670 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67671
67672 /*
67673 * Extended Numbering
67674 @@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
67675 #define DT_DEBUG 21
67676 #define DT_TEXTREL 22
67677 #define DT_JMPREL 23
67678 +#define DT_FLAGS 30
67679 + #define DF_TEXTREL 0x00000004
67680 #define DT_ENCODING 32
67681 #define OLD_DT_LOOS 0x60000000
67682 #define DT_LOOS 0x6000000d
67683 @@ -240,6 +253,19 @@ typedef struct elf64_hdr {
67684 #define PF_W 0x2
67685 #define PF_X 0x1
67686
67687 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
67688 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
67689 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
67690 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
67691 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
67692 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
67693 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
67694 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
67695 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
67696 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
67697 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
67698 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
67699 +
67700 typedef struct elf32_phdr{
67701 Elf32_Word p_type;
67702 Elf32_Off p_offset;
67703 @@ -332,6 +358,8 @@ typedef struct elf64_shdr {
67704 #define EI_OSABI 7
67705 #define EI_PAD 8
67706
67707 +#define EI_PAX 14
67708 +
67709 #define ELFMAG0 0x7f /* EI_MAG */
67710 #define ELFMAG1 'E'
67711 #define ELFMAG2 'L'
67712 diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
67713 index aa169c4..6a2771d 100644
67714 --- a/include/uapi/linux/personality.h
67715 +++ b/include/uapi/linux/personality.h
67716 @@ -30,6 +30,7 @@ enum {
67717 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
67718 ADDR_NO_RANDOMIZE | \
67719 ADDR_COMPAT_LAYOUT | \
67720 + ADDR_LIMIT_3GB | \
67721 MMAP_PAGE_ZERO)
67722
67723 /*
67724 diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
67725 index 7530e74..e714828 100644
67726 --- a/include/uapi/linux/screen_info.h
67727 +++ b/include/uapi/linux/screen_info.h
67728 @@ -43,7 +43,8 @@ struct screen_info {
67729 __u16 pages; /* 0x32 */
67730 __u16 vesa_attributes; /* 0x34 */
67731 __u32 capabilities; /* 0x36 */
67732 - __u8 _reserved[6]; /* 0x3a */
67733 + __u16 vesapm_size; /* 0x3a */
67734 + __u8 _reserved[4]; /* 0x3c */
67735 } __attribute__((packed));
67736
67737 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
67738 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
67739 index 6d67213..8dab561 100644
67740 --- a/include/uapi/linux/sysctl.h
67741 +++ b/include/uapi/linux/sysctl.h
67742 @@ -155,7 +155,11 @@ enum
67743 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
67744 };
67745
67746 -
67747 +#ifdef CONFIG_PAX_SOFTMODE
67748 +enum {
67749 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
67750 +};
67751 +#endif
67752
67753 /* CTL_VM names: */
67754 enum
67755 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
67756 index 26607bd..588b65f 100644
67757 --- a/include/uapi/linux/xattr.h
67758 +++ b/include/uapi/linux/xattr.h
67759 @@ -60,5 +60,9 @@
67760 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
67761 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
67762
67763 +/* User namespace */
67764 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
67765 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
67766 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
67767
67768 #endif /* _UAPI_LINUX_XATTR_H */
67769 diff --git a/include/video/omapdss.h b/include/video/omapdss.h
67770 index 3729173..7b2df7e 100644
67771 --- a/include/video/omapdss.h
67772 +++ b/include/video/omapdss.h
67773 @@ -323,7 +323,7 @@ struct omap_dss_board_info {
67774 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
67775 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
67776 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
67777 -};
67778 +} __do_const;
67779
67780 /* Init with the board info */
67781 extern int omap_display_init(struct omap_dss_board_info *board_data);
67782 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
67783 index f9466fa..f4e2b81 100644
67784 --- a/include/video/udlfb.h
67785 +++ b/include/video/udlfb.h
67786 @@ -53,10 +53,10 @@ struct dlfb_data {
67787 u32 pseudo_palette[256];
67788 int blank_mode; /*one of FB_BLANK_ */
67789 /* blit-only rendering path metrics, exposed through sysfs */
67790 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67791 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
67792 - atomic_t bytes_sent; /* to usb, after compression including overhead */
67793 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
67794 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67795 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
67796 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
67797 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
67798 };
67799
67800 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
67801 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
67802 index 0993a22..32ba2fe 100644
67803 --- a/include/video/uvesafb.h
67804 +++ b/include/video/uvesafb.h
67805 @@ -177,6 +177,7 @@ struct uvesafb_par {
67806 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
67807 u8 pmi_setpal; /* PMI for palette changes */
67808 u16 *pmi_base; /* protected mode interface location */
67809 + u8 *pmi_code; /* protected mode code location */
67810 void *pmi_start;
67811 void *pmi_pal;
67812 u8 *vbe_state_orig; /*
67813 diff --git a/init/Kconfig b/init/Kconfig
67814 index 6fdd6e3..5b01610 100644
67815 --- a/init/Kconfig
67816 +++ b/init/Kconfig
67817 @@ -925,6 +925,7 @@ endif # CGROUPS
67818
67819 config CHECKPOINT_RESTORE
67820 bool "Checkpoint/restore support" if EXPERT
67821 + depends on !GRKERNSEC
67822 default n
67823 help
67824 Enables additional kernel features in a sake of checkpoint/restore.
67825 @@ -1016,6 +1017,8 @@ config UIDGID_CONVERTED
67826 depends on OCFS2_FS = n
67827 depends on XFS_FS = n
67828
67829 + depends on GRKERNSEC = n
67830 +
67831 config UIDGID_STRICT_TYPE_CHECKS
67832 bool "Require conversions between uid/gids and their internal representation"
67833 depends on UIDGID_CONVERTED
67834 @@ -1405,7 +1408,7 @@ config SLUB_DEBUG
67835
67836 config COMPAT_BRK
67837 bool "Disable heap randomization"
67838 - default y
67839 + default n
67840 help
67841 Randomizing heap placement makes heap exploits harder, but it
67842 also breaks ancient binaries (including anything libc5 based).
67843 @@ -1648,7 +1651,7 @@ config INIT_ALL_POSSIBLE
67844 config STOP_MACHINE
67845 bool
67846 default y
67847 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
67848 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
67849 help
67850 Need stop_machine() primitive.
67851
67852 diff --git a/init/Makefile b/init/Makefile
67853 index 7bc47ee..6da2dc7 100644
67854 --- a/init/Makefile
67855 +++ b/init/Makefile
67856 @@ -2,6 +2,9 @@
67857 # Makefile for the linux kernel.
67858 #
67859
67860 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
67861 +asflags-y := $(GCC_PLUGINS_AFLAGS)
67862 +
67863 obj-y := main.o version.o mounts.o
67864 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
67865 obj-y += noinitramfs.o
67866 diff --git a/init/do_mounts.c b/init/do_mounts.c
67867 index f8a6642..4e5ee1b 100644
67868 --- a/init/do_mounts.c
67869 +++ b/init/do_mounts.c
67870 @@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
67871 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
67872 {
67873 struct super_block *s;
67874 - int err = sys_mount(name, "/root", fs, flags, data);
67875 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
67876 if (err)
67877 return err;
67878
67879 - sys_chdir("/root");
67880 + sys_chdir((const char __force_user *)"/root");
67881 s = current->fs->pwd.dentry->d_sb;
67882 ROOT_DEV = s->s_dev;
67883 printk(KERN_INFO
67884 @@ -461,18 +461,18 @@ void __init change_floppy(char *fmt, ...)
67885 va_start(args, fmt);
67886 vsprintf(buf, fmt, args);
67887 va_end(args);
67888 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
67889 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
67890 if (fd >= 0) {
67891 sys_ioctl(fd, FDEJECT, 0);
67892 sys_close(fd);
67893 }
67894 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
67895 - fd = sys_open("/dev/console", O_RDWR, 0);
67896 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
67897 if (fd >= 0) {
67898 sys_ioctl(fd, TCGETS, (long)&termios);
67899 termios.c_lflag &= ~ICANON;
67900 sys_ioctl(fd, TCSETSF, (long)&termios);
67901 - sys_read(fd, &c, 1);
67902 + sys_read(fd, (char __user *)&c, 1);
67903 termios.c_lflag |= ICANON;
67904 sys_ioctl(fd, TCSETSF, (long)&termios);
67905 sys_close(fd);
67906 @@ -566,6 +566,6 @@ void __init prepare_namespace(void)
67907 mount_root();
67908 out:
67909 devtmpfs_mount("dev");
67910 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
67911 - sys_chroot(".");
67912 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
67913 + sys_chroot((const char __force_user *)".");
67914 }
67915 diff --git a/init/do_mounts.h b/init/do_mounts.h
67916 index f5b978a..69dbfe8 100644
67917 --- a/init/do_mounts.h
67918 +++ b/init/do_mounts.h
67919 @@ -15,15 +15,15 @@ extern int root_mountflags;
67920
67921 static inline int create_dev(char *name, dev_t dev)
67922 {
67923 - sys_unlink(name);
67924 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
67925 + sys_unlink((char __force_user *)name);
67926 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
67927 }
67928
67929 #if BITS_PER_LONG == 32
67930 static inline u32 bstat(char *name)
67931 {
67932 struct stat64 stat;
67933 - if (sys_stat64(name, &stat) != 0)
67934 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
67935 return 0;
67936 if (!S_ISBLK(stat.st_mode))
67937 return 0;
67938 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
67939 static inline u32 bstat(char *name)
67940 {
67941 struct stat stat;
67942 - if (sys_newstat(name, &stat) != 0)
67943 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
67944 return 0;
67945 if (!S_ISBLK(stat.st_mode))
67946 return 0;
67947 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
67948 index 5e4ded5..aa3cd7e 100644
67949 --- a/init/do_mounts_initrd.c
67950 +++ b/init/do_mounts_initrd.c
67951 @@ -54,8 +54,8 @@ static void __init handle_initrd(void)
67952 create_dev("/dev/root.old", Root_RAM0);
67953 /* mount initrd on rootfs' /root */
67954 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
67955 - sys_mkdir("/old", 0700);
67956 - sys_chdir("/old");
67957 + sys_mkdir((const char __force_user *)"/old", 0700);
67958 + sys_chdir((const char __force_user *)"/old");
67959
67960 /*
67961 * In case that a resume from disk is carried out by linuxrc or one of
67962 @@ -69,31 +69,31 @@ static void __init handle_initrd(void)
67963 current->flags &= ~PF_FREEZER_SKIP;
67964
67965 /* move initrd to rootfs' /old */
67966 - sys_mount("..", ".", NULL, MS_MOVE, NULL);
67967 + sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
67968 /* switch root and cwd back to / of rootfs */
67969 - sys_chroot("..");
67970 + sys_chroot((const char __force_user *)"..");
67971
67972 if (new_decode_dev(real_root_dev) == Root_RAM0) {
67973 - sys_chdir("/old");
67974 + sys_chdir((const char __force_user *)"/old");
67975 return;
67976 }
67977
67978 - sys_chdir("/");
67979 + sys_chdir((const char __force_user *)"/");
67980 ROOT_DEV = new_decode_dev(real_root_dev);
67981 mount_root();
67982
67983 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
67984 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
67985 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
67986 if (!error)
67987 printk("okay\n");
67988 else {
67989 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
67990 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
67991 if (error == -ENOENT)
67992 printk("/initrd does not exist. Ignored.\n");
67993 else
67994 printk("failed\n");
67995 printk(KERN_NOTICE "Unmounting old root\n");
67996 - sys_umount("/old", MNT_DETACH);
67997 + sys_umount((char __force_user *)"/old", MNT_DETACH);
67998 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
67999 if (fd < 0) {
68000 error = fd;
68001 @@ -116,11 +116,11 @@ int __init initrd_load(void)
68002 * mounted in the normal path.
68003 */
68004 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
68005 - sys_unlink("/initrd.image");
68006 + sys_unlink((const char __force_user *)"/initrd.image");
68007 handle_initrd();
68008 return 1;
68009 }
68010 }
68011 - sys_unlink("/initrd.image");
68012 + sys_unlink((const char __force_user *)"/initrd.image");
68013 return 0;
68014 }
68015 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
68016 index 8cb6db5..d729f50 100644
68017 --- a/init/do_mounts_md.c
68018 +++ b/init/do_mounts_md.c
68019 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
68020 partitioned ? "_d" : "", minor,
68021 md_setup_args[ent].device_names);
68022
68023 - fd = sys_open(name, 0, 0);
68024 + fd = sys_open((char __force_user *)name, 0, 0);
68025 if (fd < 0) {
68026 printk(KERN_ERR "md: open failed - cannot start "
68027 "array %s\n", name);
68028 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
68029 * array without it
68030 */
68031 sys_close(fd);
68032 - fd = sys_open(name, 0, 0);
68033 + fd = sys_open((char __force_user *)name, 0, 0);
68034 sys_ioctl(fd, BLKRRPART, 0);
68035 }
68036 sys_close(fd);
68037 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
68038
68039 wait_for_device_probe();
68040
68041 - fd = sys_open("/dev/md0", 0, 0);
68042 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
68043 if (fd >= 0) {
68044 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
68045 sys_close(fd);
68046 diff --git a/init/init_task.c b/init/init_task.c
68047 index 8b2f399..f0797c9 100644
68048 --- a/init/init_task.c
68049 +++ b/init/init_task.c
68050 @@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
68051 * Initial thread structure. Alignment of this is handled by a special
68052 * linker map entry.
68053 */
68054 +#ifdef CONFIG_X86
68055 +union thread_union init_thread_union __init_task_data;
68056 +#else
68057 union thread_union init_thread_union __init_task_data =
68058 { INIT_THREAD_INFO(init_task) };
68059 +#endif
68060 diff --git a/init/initramfs.c b/init/initramfs.c
68061 index 84c6bf1..8899338 100644
68062 --- a/init/initramfs.c
68063 +++ b/init/initramfs.c
68064 @@ -84,7 +84,7 @@ static void __init free_hash(void)
68065 }
68066 }
68067
68068 -static long __init do_utime(char *filename, time_t mtime)
68069 +static long __init do_utime(char __force_user *filename, time_t mtime)
68070 {
68071 struct timespec t[2];
68072
68073 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
68074 struct dir_entry *de, *tmp;
68075 list_for_each_entry_safe(de, tmp, &dir_list, list) {
68076 list_del(&de->list);
68077 - do_utime(de->name, de->mtime);
68078 + do_utime((char __force_user *)de->name, de->mtime);
68079 kfree(de->name);
68080 kfree(de);
68081 }
68082 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
68083 if (nlink >= 2) {
68084 char *old = find_link(major, minor, ino, mode, collected);
68085 if (old)
68086 - return (sys_link(old, collected) < 0) ? -1 : 1;
68087 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
68088 }
68089 return 0;
68090 }
68091 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
68092 {
68093 struct stat st;
68094
68095 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
68096 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
68097 if (S_ISDIR(st.st_mode))
68098 - sys_rmdir(path);
68099 + sys_rmdir((char __force_user *)path);
68100 else
68101 - sys_unlink(path);
68102 + sys_unlink((char __force_user *)path);
68103 }
68104 }
68105
68106 @@ -315,7 +315,7 @@ static int __init do_name(void)
68107 int openflags = O_WRONLY|O_CREAT;
68108 if (ml != 1)
68109 openflags |= O_TRUNC;
68110 - wfd = sys_open(collected, openflags, mode);
68111 + wfd = sys_open((char __force_user *)collected, openflags, mode);
68112
68113 if (wfd >= 0) {
68114 sys_fchown(wfd, uid, gid);
68115 @@ -327,17 +327,17 @@ static int __init do_name(void)
68116 }
68117 }
68118 } else if (S_ISDIR(mode)) {
68119 - sys_mkdir(collected, mode);
68120 - sys_chown(collected, uid, gid);
68121 - sys_chmod(collected, mode);
68122 + sys_mkdir((char __force_user *)collected, mode);
68123 + sys_chown((char __force_user *)collected, uid, gid);
68124 + sys_chmod((char __force_user *)collected, mode);
68125 dir_add(collected, mtime);
68126 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
68127 S_ISFIFO(mode) || S_ISSOCK(mode)) {
68128 if (maybe_link() == 0) {
68129 - sys_mknod(collected, mode, rdev);
68130 - sys_chown(collected, uid, gid);
68131 - sys_chmod(collected, mode);
68132 - do_utime(collected, mtime);
68133 + sys_mknod((char __force_user *)collected, mode, rdev);
68134 + sys_chown((char __force_user *)collected, uid, gid);
68135 + sys_chmod((char __force_user *)collected, mode);
68136 + do_utime((char __force_user *)collected, mtime);
68137 }
68138 }
68139 return 0;
68140 @@ -346,15 +346,15 @@ static int __init do_name(void)
68141 static int __init do_copy(void)
68142 {
68143 if (count >= body_len) {
68144 - sys_write(wfd, victim, body_len);
68145 + sys_write(wfd, (char __force_user *)victim, body_len);
68146 sys_close(wfd);
68147 - do_utime(vcollected, mtime);
68148 + do_utime((char __force_user *)vcollected, mtime);
68149 kfree(vcollected);
68150 eat(body_len);
68151 state = SkipIt;
68152 return 0;
68153 } else {
68154 - sys_write(wfd, victim, count);
68155 + sys_write(wfd, (char __force_user *)victim, count);
68156 body_len -= count;
68157 eat(count);
68158 return 1;
68159 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
68160 {
68161 collected[N_ALIGN(name_len) + body_len] = '\0';
68162 clean_path(collected, 0);
68163 - sys_symlink(collected + N_ALIGN(name_len), collected);
68164 - sys_lchown(collected, uid, gid);
68165 - do_utime(collected, mtime);
68166 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
68167 + sys_lchown((char __force_user *)collected, uid, gid);
68168 + do_utime((char __force_user *)collected, mtime);
68169 state = SkipIt;
68170 next_state = Reset;
68171 return 0;
68172 diff --git a/init/main.c b/init/main.c
68173 index e33e09d..b699703 100644
68174 --- a/init/main.c
68175 +++ b/init/main.c
68176 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
68177 extern void tc_init(void);
68178 #endif
68179
68180 +extern void grsecurity_init(void);
68181 +
68182 /*
68183 * Debug helper: via this flag we know that we are in 'early bootup code'
68184 * where only the boot processor is running with IRQ disabled. This means
68185 @@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
68186
68187 __setup("reset_devices", set_reset_devices);
68188
68189 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68190 +int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
68191 +static int __init setup_grsec_proc_gid(char *str)
68192 +{
68193 + grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
68194 + return 1;
68195 +}
68196 +__setup("grsec_proc_gid=", setup_grsec_proc_gid);
68197 +#endif
68198 +
68199 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
68200 +extern char pax_enter_kernel_user[];
68201 +extern char pax_exit_kernel_user[];
68202 +extern pgdval_t clone_pgd_mask;
68203 +#endif
68204 +
68205 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
68206 +static int __init setup_pax_nouderef(char *str)
68207 +{
68208 +#ifdef CONFIG_X86_32
68209 + unsigned int cpu;
68210 + struct desc_struct *gdt;
68211 +
68212 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
68213 + gdt = get_cpu_gdt_table(cpu);
68214 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
68215 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
68216 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
68217 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
68218 + }
68219 + loadsegment(ds, __KERNEL_DS);
68220 + loadsegment(es, __KERNEL_DS);
68221 + loadsegment(ss, __KERNEL_DS);
68222 +#else
68223 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
68224 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
68225 + clone_pgd_mask = ~(pgdval_t)0UL;
68226 +#endif
68227 +
68228 + return 0;
68229 +}
68230 +early_param("pax_nouderef", setup_pax_nouderef);
68231 +#endif
68232 +
68233 +#ifdef CONFIG_PAX_SOFTMODE
68234 +int pax_softmode;
68235 +
68236 +static int __init setup_pax_softmode(char *str)
68237 +{
68238 + get_option(&str, &pax_softmode);
68239 + return 1;
68240 +}
68241 +__setup("pax_softmode=", setup_pax_softmode);
68242 +#endif
68243 +
68244 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
68245 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
68246 static const char *panic_later, *panic_param;
68247 @@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
68248 {
68249 int count = preempt_count();
68250 int ret;
68251 + const char *msg1 = "", *msg2 = "";
68252
68253 if (initcall_debug)
68254 ret = do_one_initcall_debug(fn);
68255 @@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
68256 sprintf(msgbuf, "error code %d ", ret);
68257
68258 if (preempt_count() != count) {
68259 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
68260 + msg1 = " preemption imbalance";
68261 preempt_count() = count;
68262 }
68263 if (irqs_disabled()) {
68264 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
68265 + msg2 = " disabled interrupts";
68266 local_irq_enable();
68267 }
68268 - if (msgbuf[0]) {
68269 - printk("initcall %pF returned with %s\n", fn, msgbuf);
68270 + if (msgbuf[0] || *msg1 || *msg2) {
68271 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
68272 }
68273
68274 return ret;
68275 @@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
68276 "late",
68277 };
68278
68279 +#ifdef CONFIG_PAX_LATENT_ENTROPY
68280 +u64 latent_entropy;
68281 +#endif
68282 +
68283 static void __init do_initcall_level(int level)
68284 {
68285 extern const struct kernel_param __start___param[], __stop___param[];
68286 @@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
68287 level, level,
68288 &repair_env_string);
68289
68290 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
68291 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
68292 do_one_initcall(*fn);
68293 +
68294 +#ifdef CONFIG_PAX_LATENT_ENTROPY
68295 + add_device_randomness(&latent_entropy, sizeof(latent_entropy));
68296 +#endif
68297 +
68298 + }
68299 }
68300
68301 static void __init do_initcalls(void)
68302 @@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
68303 {
68304 initcall_t *fn;
68305
68306 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
68307 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
68308 do_one_initcall(*fn);
68309 +
68310 +#ifdef CONFIG_PAX_LATENT_ENTROPY
68311 + add_device_randomness(&latent_entropy, sizeof(latent_entropy));
68312 +#endif
68313 +
68314 + }
68315 }
68316
68317 static int run_init_process(const char *init_filename)
68318 @@ -876,7 +950,7 @@ static void __init kernel_init_freeable(void)
68319 do_basic_setup();
68320
68321 /* Open the /dev/console on the rootfs, this should never fail */
68322 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
68323 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
68324 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
68325
68326 (void) sys_dup(0);
68327 @@ -889,11 +963,13 @@ static void __init kernel_init_freeable(void)
68328 if (!ramdisk_execute_command)
68329 ramdisk_execute_command = "/init";
68330
68331 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
68332 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
68333 ramdisk_execute_command = NULL;
68334 prepare_namespace();
68335 }
68336
68337 + grsecurity_init();
68338 +
68339 /*
68340 * Ok, we have completed the initial bootup, and
68341 * we're essentially up and running. Get rid of the
68342 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
68343 index 71a3ca1..cc330ee 100644
68344 --- a/ipc/mqueue.c
68345 +++ b/ipc/mqueue.c
68346 @@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
68347 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
68348 info->attr.mq_msgsize);
68349
68350 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
68351 spin_lock(&mq_lock);
68352 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
68353 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
68354 diff --git a/ipc/msg.c b/ipc/msg.c
68355 index a71af5a..a90a110 100644
68356 --- a/ipc/msg.c
68357 +++ b/ipc/msg.c
68358 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
68359 return security_msg_queue_associate(msq, msgflg);
68360 }
68361
68362 +static struct ipc_ops msg_ops = {
68363 + .getnew = newque,
68364 + .associate = msg_security,
68365 + .more_checks = NULL
68366 +};
68367 +
68368 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
68369 {
68370 struct ipc_namespace *ns;
68371 - struct ipc_ops msg_ops;
68372 struct ipc_params msg_params;
68373
68374 ns = current->nsproxy->ipc_ns;
68375
68376 - msg_ops.getnew = newque;
68377 - msg_ops.associate = msg_security;
68378 - msg_ops.more_checks = NULL;
68379 -
68380 msg_params.key = key;
68381 msg_params.flg = msgflg;
68382
68383 diff --git a/ipc/sem.c b/ipc/sem.c
68384 index 58d31f1..cce7a55 100644
68385 --- a/ipc/sem.c
68386 +++ b/ipc/sem.c
68387 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
68388 return 0;
68389 }
68390
68391 +static struct ipc_ops sem_ops = {
68392 + .getnew = newary,
68393 + .associate = sem_security,
68394 + .more_checks = sem_more_checks
68395 +};
68396 +
68397 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
68398 {
68399 struct ipc_namespace *ns;
68400 - struct ipc_ops sem_ops;
68401 struct ipc_params sem_params;
68402
68403 ns = current->nsproxy->ipc_ns;
68404 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
68405 if (nsems < 0 || nsems > ns->sc_semmsl)
68406 return -EINVAL;
68407
68408 - sem_ops.getnew = newary;
68409 - sem_ops.associate = sem_security;
68410 - sem_ops.more_checks = sem_more_checks;
68411 -
68412 sem_params.key = key;
68413 sem_params.flg = semflg;
68414 sem_params.u.nsems = nsems;
68415 diff --git a/ipc/shm.c b/ipc/shm.c
68416 index dff40c9..9450e27 100644
68417 --- a/ipc/shm.c
68418 +++ b/ipc/shm.c
68419 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68420 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
68421 #endif
68422
68423 +#ifdef CONFIG_GRKERNSEC
68424 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68425 + const time_t shm_createtime, const uid_t cuid,
68426 + const int shmid);
68427 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68428 + const time_t shm_createtime);
68429 +#endif
68430 +
68431 void shm_init_ns(struct ipc_namespace *ns)
68432 {
68433 ns->shm_ctlmax = SHMMAX;
68434 @@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
68435 shp->shm_lprid = 0;
68436 shp->shm_atim = shp->shm_dtim = 0;
68437 shp->shm_ctim = get_seconds();
68438 +#ifdef CONFIG_GRKERNSEC
68439 + {
68440 + struct timespec timeval;
68441 + do_posix_clock_monotonic_gettime(&timeval);
68442 +
68443 + shp->shm_createtime = timeval.tv_sec;
68444 + }
68445 +#endif
68446 shp->shm_segsz = size;
68447 shp->shm_nattch = 0;
68448 shp->shm_file = file;
68449 @@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
68450 return 0;
68451 }
68452
68453 +static struct ipc_ops shm_ops = {
68454 + .getnew = newseg,
68455 + .associate = shm_security,
68456 + .more_checks = shm_more_checks
68457 +};
68458 +
68459 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
68460 {
68461 struct ipc_namespace *ns;
68462 - struct ipc_ops shm_ops;
68463 struct ipc_params shm_params;
68464
68465 ns = current->nsproxy->ipc_ns;
68466
68467 - shm_ops.getnew = newseg;
68468 - shm_ops.associate = shm_security;
68469 - shm_ops.more_checks = shm_more_checks;
68470 -
68471 shm_params.key = key;
68472 shm_params.flg = shmflg;
68473 shm_params.u.size = size;
68474 @@ -1003,6 +1020,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68475 f_mode = FMODE_READ | FMODE_WRITE;
68476 }
68477 if (shmflg & SHM_EXEC) {
68478 +
68479 +#ifdef CONFIG_PAX_MPROTECT
68480 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
68481 + goto out;
68482 +#endif
68483 +
68484 prot |= PROT_EXEC;
68485 acc_mode |= S_IXUGO;
68486 }
68487 @@ -1026,9 +1049,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68488 if (err)
68489 goto out_unlock;
68490
68491 +#ifdef CONFIG_GRKERNSEC
68492 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
68493 + shp->shm_perm.cuid, shmid) ||
68494 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
68495 + err = -EACCES;
68496 + goto out_unlock;
68497 + }
68498 +#endif
68499 +
68500 path = shp->shm_file->f_path;
68501 path_get(&path);
68502 shp->shm_nattch++;
68503 +#ifdef CONFIG_GRKERNSEC
68504 + shp->shm_lapid = current->pid;
68505 +#endif
68506 size = i_size_read(path.dentry->d_inode);
68507 shm_unlock(shp);
68508
68509 diff --git a/kernel/acct.c b/kernel/acct.c
68510 index 051e071..15e0920 100644
68511 --- a/kernel/acct.c
68512 +++ b/kernel/acct.c
68513 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
68514 */
68515 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
68516 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
68517 - file->f_op->write(file, (char *)&ac,
68518 + file->f_op->write(file, (char __force_user *)&ac,
68519 sizeof(acct_t), &file->f_pos);
68520 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
68521 set_fs(fs);
68522 diff --git a/kernel/audit.c b/kernel/audit.c
68523 index 40414e9..c920b72 100644
68524 --- a/kernel/audit.c
68525 +++ b/kernel/audit.c
68526 @@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
68527 3) suppressed due to audit_rate_limit
68528 4) suppressed due to audit_backlog_limit
68529 */
68530 -static atomic_t audit_lost = ATOMIC_INIT(0);
68531 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
68532
68533 /* The netlink socket. */
68534 static struct sock *audit_sock;
68535 @@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
68536 unsigned long now;
68537 int print;
68538
68539 - atomic_inc(&audit_lost);
68540 + atomic_inc_unchecked(&audit_lost);
68541
68542 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
68543
68544 @@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
68545 printk(KERN_WARNING
68546 "audit: audit_lost=%d audit_rate_limit=%d "
68547 "audit_backlog_limit=%d\n",
68548 - atomic_read(&audit_lost),
68549 + atomic_read_unchecked(&audit_lost),
68550 audit_rate_limit,
68551 audit_backlog_limit);
68552 audit_panic(message);
68553 @@ -677,7 +677,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
68554 status_set.pid = audit_pid;
68555 status_set.rate_limit = audit_rate_limit;
68556 status_set.backlog_limit = audit_backlog_limit;
68557 - status_set.lost = atomic_read(&audit_lost);
68558 + status_set.lost = atomic_read_unchecked(&audit_lost);
68559 status_set.backlog = skb_queue_len(&audit_skb_queue);
68560 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
68561 &status_set, sizeof(status_set));
68562 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
68563 index 2f186ed..afb42c2 100644
68564 --- a/kernel/auditsc.c
68565 +++ b/kernel/auditsc.c
68566 @@ -2352,7 +2352,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
68567 }
68568
68569 /* global counter which is incremented every time something logs in */
68570 -static atomic_t session_id = ATOMIC_INIT(0);
68571 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
68572
68573 /**
68574 * audit_set_loginuid - set current task's audit_context loginuid
68575 @@ -2376,7 +2376,7 @@ int audit_set_loginuid(kuid_t loginuid)
68576 return -EPERM;
68577 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
68578
68579 - sessionid = atomic_inc_return(&session_id);
68580 + sessionid = atomic_inc_return_unchecked(&session_id);
68581 if (context && context->in_syscall) {
68582 struct audit_buffer *ab;
68583
68584 diff --git a/kernel/capability.c b/kernel/capability.c
68585 index 493d972..ea17248 100644
68586 --- a/kernel/capability.c
68587 +++ b/kernel/capability.c
68588 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
68589 * before modification is attempted and the application
68590 * fails.
68591 */
68592 + if (tocopy > ARRAY_SIZE(kdata))
68593 + return -EFAULT;
68594 +
68595 if (copy_to_user(dataptr, kdata, tocopy
68596 * sizeof(struct __user_cap_data_struct))) {
68597 return -EFAULT;
68598 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
68599 int ret;
68600
68601 rcu_read_lock();
68602 - ret = security_capable(__task_cred(t), ns, cap);
68603 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
68604 + gr_task_is_capable(t, __task_cred(t), cap);
68605 rcu_read_unlock();
68606
68607 - return (ret == 0);
68608 + return ret;
68609 }
68610
68611 /**
68612 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
68613 int ret;
68614
68615 rcu_read_lock();
68616 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
68617 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
68618 rcu_read_unlock();
68619
68620 - return (ret == 0);
68621 + return ret;
68622 }
68623
68624 /**
68625 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
68626 BUG();
68627 }
68628
68629 - if (security_capable(current_cred(), ns, cap) == 0) {
68630 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
68631 current->flags |= PF_SUPERPRIV;
68632 return true;
68633 }
68634 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
68635 }
68636 EXPORT_SYMBOL(ns_capable);
68637
68638 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
68639 +{
68640 + if (unlikely(!cap_valid(cap))) {
68641 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
68642 + BUG();
68643 + }
68644 +
68645 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
68646 + current->flags |= PF_SUPERPRIV;
68647 + return true;
68648 + }
68649 + return false;
68650 +}
68651 +EXPORT_SYMBOL(ns_capable_nolog);
68652 +
68653 /**
68654 * capable - Determine if the current task has a superior capability in effect
68655 * @cap: The capability to be tested for
68656 @@ -408,6 +427,12 @@ bool capable(int cap)
68657 }
68658 EXPORT_SYMBOL(capable);
68659
68660 +bool capable_nolog(int cap)
68661 +{
68662 + return ns_capable_nolog(&init_user_ns, cap);
68663 +}
68664 +EXPORT_SYMBOL(capable_nolog);
68665 +
68666 /**
68667 * nsown_capable - Check superior capability to one's own user_ns
68668 * @cap: The capability in question
68669 @@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
68670
68671 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68672 }
68673 +
68674 +bool inode_capable_nolog(const struct inode *inode, int cap)
68675 +{
68676 + struct user_namespace *ns = current_user_ns();
68677 +
68678 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68679 +}
68680 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
68681 index f24f724..0612ec45 100644
68682 --- a/kernel/cgroup.c
68683 +++ b/kernel/cgroup.c
68684 @@ -5518,7 +5518,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
68685 struct css_set *cg = link->cg;
68686 struct task_struct *task;
68687 int count = 0;
68688 - seq_printf(seq, "css_set %p\n", cg);
68689 + seq_printf(seq, "css_set %pK\n", cg);
68690 list_for_each_entry(task, &cg->tasks, cg_list) {
68691 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
68692 seq_puts(seq, " ...\n");
68693 diff --git a/kernel/compat.c b/kernel/compat.c
68694 index c28a306..b4d0cf3 100644
68695 --- a/kernel/compat.c
68696 +++ b/kernel/compat.c
68697 @@ -13,6 +13,7 @@
68698
68699 #include <linux/linkage.h>
68700 #include <linux/compat.h>
68701 +#include <linux/module.h>
68702 #include <linux/errno.h>
68703 #include <linux/time.h>
68704 #include <linux/signal.h>
68705 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
68706 mm_segment_t oldfs;
68707 long ret;
68708
68709 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
68710 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
68711 oldfs = get_fs();
68712 set_fs(KERNEL_DS);
68713 ret = hrtimer_nanosleep_restart(restart);
68714 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
68715 oldfs = get_fs();
68716 set_fs(KERNEL_DS);
68717 ret = hrtimer_nanosleep(&tu,
68718 - rmtp ? (struct timespec __user *)&rmt : NULL,
68719 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
68720 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
68721 set_fs(oldfs);
68722
68723 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
68724 mm_segment_t old_fs = get_fs();
68725
68726 set_fs(KERNEL_DS);
68727 - ret = sys_sigpending((old_sigset_t __user *) &s);
68728 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
68729 set_fs(old_fs);
68730 if (ret == 0)
68731 ret = put_user(s, set);
68732 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
68733 mm_segment_t old_fs = get_fs();
68734
68735 set_fs(KERNEL_DS);
68736 - ret = sys_old_getrlimit(resource, &r);
68737 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
68738 set_fs(old_fs);
68739
68740 if (!ret) {
68741 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
68742 mm_segment_t old_fs = get_fs();
68743
68744 set_fs(KERNEL_DS);
68745 - ret = sys_getrusage(who, (struct rusage __user *) &r);
68746 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
68747 set_fs(old_fs);
68748
68749 if (ret)
68750 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
68751 set_fs (KERNEL_DS);
68752 ret = sys_wait4(pid,
68753 (stat_addr ?
68754 - (unsigned int __user *) &status : NULL),
68755 - options, (struct rusage __user *) &r);
68756 + (unsigned int __force_user *) &status : NULL),
68757 + options, (struct rusage __force_user *) &r);
68758 set_fs (old_fs);
68759
68760 if (ret > 0) {
68761 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
68762 memset(&info, 0, sizeof(info));
68763
68764 set_fs(KERNEL_DS);
68765 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
68766 - uru ? (struct rusage __user *)&ru : NULL);
68767 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
68768 + uru ? (struct rusage __force_user *)&ru : NULL);
68769 set_fs(old_fs);
68770
68771 if ((ret < 0) || (info.si_signo == 0))
68772 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
68773 oldfs = get_fs();
68774 set_fs(KERNEL_DS);
68775 err = sys_timer_settime(timer_id, flags,
68776 - (struct itimerspec __user *) &newts,
68777 - (struct itimerspec __user *) &oldts);
68778 + (struct itimerspec __force_user *) &newts,
68779 + (struct itimerspec __force_user *) &oldts);
68780 set_fs(oldfs);
68781 if (!err && old && put_compat_itimerspec(old, &oldts))
68782 return -EFAULT;
68783 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
68784 oldfs = get_fs();
68785 set_fs(KERNEL_DS);
68786 err = sys_timer_gettime(timer_id,
68787 - (struct itimerspec __user *) &ts);
68788 + (struct itimerspec __force_user *) &ts);
68789 set_fs(oldfs);
68790 if (!err && put_compat_itimerspec(setting, &ts))
68791 return -EFAULT;
68792 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
68793 oldfs = get_fs();
68794 set_fs(KERNEL_DS);
68795 err = sys_clock_settime(which_clock,
68796 - (struct timespec __user *) &ts);
68797 + (struct timespec __force_user *) &ts);
68798 set_fs(oldfs);
68799 return err;
68800 }
68801 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
68802 oldfs = get_fs();
68803 set_fs(KERNEL_DS);
68804 err = sys_clock_gettime(which_clock,
68805 - (struct timespec __user *) &ts);
68806 + (struct timespec __force_user *) &ts);
68807 set_fs(oldfs);
68808 if (!err && put_compat_timespec(&ts, tp))
68809 return -EFAULT;
68810 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
68811
68812 oldfs = get_fs();
68813 set_fs(KERNEL_DS);
68814 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
68815 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
68816 set_fs(oldfs);
68817
68818 err = compat_put_timex(utp, &txc);
68819 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
68820 oldfs = get_fs();
68821 set_fs(KERNEL_DS);
68822 err = sys_clock_getres(which_clock,
68823 - (struct timespec __user *) &ts);
68824 + (struct timespec __force_user *) &ts);
68825 set_fs(oldfs);
68826 if (!err && tp && put_compat_timespec(&ts, tp))
68827 return -EFAULT;
68828 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
68829 long err;
68830 mm_segment_t oldfs;
68831 struct timespec tu;
68832 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
68833 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
68834
68835 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
68836 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
68837 oldfs = get_fs();
68838 set_fs(KERNEL_DS);
68839 err = clock_nanosleep_restart(restart);
68840 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
68841 oldfs = get_fs();
68842 set_fs(KERNEL_DS);
68843 err = sys_clock_nanosleep(which_clock, flags,
68844 - (struct timespec __user *) &in,
68845 - (struct timespec __user *) &out);
68846 + (struct timespec __force_user *) &in,
68847 + (struct timespec __force_user *) &out);
68848 set_fs(oldfs);
68849
68850 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
68851 diff --git a/kernel/configs.c b/kernel/configs.c
68852 index 42e8fa0..9e7406b 100644
68853 --- a/kernel/configs.c
68854 +++ b/kernel/configs.c
68855 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
68856 struct proc_dir_entry *entry;
68857
68858 /* create the current config file */
68859 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
68860 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
68861 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
68862 + &ikconfig_file_ops);
68863 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68864 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
68865 + &ikconfig_file_ops);
68866 +#endif
68867 +#else
68868 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
68869 &ikconfig_file_ops);
68870 +#endif
68871 +
68872 if (!entry)
68873 return -ENOMEM;
68874
68875 diff --git a/kernel/cred.c b/kernel/cred.c
68876 index 48cea3d..3476734 100644
68877 --- a/kernel/cred.c
68878 +++ b/kernel/cred.c
68879 @@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
68880 validate_creds(cred);
68881 alter_cred_subscribers(cred, -1);
68882 put_cred(cred);
68883 +
68884 +#ifdef CONFIG_GRKERNSEC_SETXID
68885 + cred = (struct cred *) tsk->delayed_cred;
68886 + if (cred != NULL) {
68887 + tsk->delayed_cred = NULL;
68888 + validate_creds(cred);
68889 + alter_cred_subscribers(cred, -1);
68890 + put_cred(cred);
68891 + }
68892 +#endif
68893 }
68894
68895 /**
68896 @@ -469,7 +479,7 @@ error_put:
68897 * Always returns 0 thus allowing this function to be tail-called at the end
68898 * of, say, sys_setgid().
68899 */
68900 -int commit_creds(struct cred *new)
68901 +static int __commit_creds(struct cred *new)
68902 {
68903 struct task_struct *task = current;
68904 const struct cred *old = task->real_cred;
68905 @@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
68906
68907 get_cred(new); /* we will require a ref for the subj creds too */
68908
68909 + gr_set_role_label(task, new->uid, new->gid);
68910 +
68911 /* dumpability changes */
68912 if (!uid_eq(old->euid, new->euid) ||
68913 !gid_eq(old->egid, new->egid) ||
68914 @@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
68915 put_cred(old);
68916 return 0;
68917 }
68918 +#ifdef CONFIG_GRKERNSEC_SETXID
68919 +extern int set_user(struct cred *new);
68920 +
68921 +void gr_delayed_cred_worker(void)
68922 +{
68923 + const struct cred *new = current->delayed_cred;
68924 + struct cred *ncred;
68925 +
68926 + current->delayed_cred = NULL;
68927 +
68928 + if (current_uid() && new != NULL) {
68929 + // from doing get_cred on it when queueing this
68930 + put_cred(new);
68931 + return;
68932 + } else if (new == NULL)
68933 + return;
68934 +
68935 + ncred = prepare_creds();
68936 + if (!ncred)
68937 + goto die;
68938 + // uids
68939 + ncred->uid = new->uid;
68940 + ncred->euid = new->euid;
68941 + ncred->suid = new->suid;
68942 + ncred->fsuid = new->fsuid;
68943 + // gids
68944 + ncred->gid = new->gid;
68945 + ncred->egid = new->egid;
68946 + ncred->sgid = new->sgid;
68947 + ncred->fsgid = new->fsgid;
68948 + // groups
68949 + if (set_groups(ncred, new->group_info) < 0) {
68950 + abort_creds(ncred);
68951 + goto die;
68952 + }
68953 + // caps
68954 + ncred->securebits = new->securebits;
68955 + ncred->cap_inheritable = new->cap_inheritable;
68956 + ncred->cap_permitted = new->cap_permitted;
68957 + ncred->cap_effective = new->cap_effective;
68958 + ncred->cap_bset = new->cap_bset;
68959 +
68960 + if (set_user(ncred)) {
68961 + abort_creds(ncred);
68962 + goto die;
68963 + }
68964 +
68965 + // from doing get_cred on it when queueing this
68966 + put_cred(new);
68967 +
68968 + __commit_creds(ncred);
68969 + return;
68970 +die:
68971 + // from doing get_cred on it when queueing this
68972 + put_cred(new);
68973 + do_group_exit(SIGKILL);
68974 +}
68975 +#endif
68976 +
68977 +int commit_creds(struct cred *new)
68978 +{
68979 +#ifdef CONFIG_GRKERNSEC_SETXID
68980 + int ret;
68981 + int schedule_it = 0;
68982 + struct task_struct *t;
68983 +
68984 + /* we won't get called with tasklist_lock held for writing
68985 + and interrupts disabled as the cred struct in that case is
68986 + init_cred
68987 + */
68988 + if (grsec_enable_setxid && !current_is_single_threaded() &&
68989 + !current_uid() && new->uid) {
68990 + schedule_it = 1;
68991 + }
68992 + ret = __commit_creds(new);
68993 + if (schedule_it) {
68994 + rcu_read_lock();
68995 + read_lock(&tasklist_lock);
68996 + for (t = next_thread(current); t != current;
68997 + t = next_thread(t)) {
68998 + if (t->delayed_cred == NULL) {
68999 + t->delayed_cred = get_cred(new);
69000 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
69001 + set_tsk_need_resched(t);
69002 + }
69003 + }
69004 + read_unlock(&tasklist_lock);
69005 + rcu_read_unlock();
69006 + }
69007 + return ret;
69008 +#else
69009 + return __commit_creds(new);
69010 +#endif
69011 +}
69012 +
69013 EXPORT_SYMBOL(commit_creds);
69014
69015 /**
69016 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
69017 index 9a61738..c5c8f3a 100644
69018 --- a/kernel/debug/debug_core.c
69019 +++ b/kernel/debug/debug_core.c
69020 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
69021 */
69022 static atomic_t masters_in_kgdb;
69023 static atomic_t slaves_in_kgdb;
69024 -static atomic_t kgdb_break_tasklet_var;
69025 +static atomic_unchecked_t kgdb_break_tasklet_var;
69026 atomic_t kgdb_setting_breakpoint;
69027
69028 struct task_struct *kgdb_usethread;
69029 @@ -132,7 +132,7 @@ int kgdb_single_step;
69030 static pid_t kgdb_sstep_pid;
69031
69032 /* to keep track of the CPU which is doing the single stepping*/
69033 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
69034 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
69035
69036 /*
69037 * If you are debugging a problem where roundup (the collection of
69038 @@ -540,7 +540,7 @@ return_normal:
69039 * kernel will only try for the value of sstep_tries before
69040 * giving up and continuing on.
69041 */
69042 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
69043 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
69044 (kgdb_info[cpu].task &&
69045 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
69046 atomic_set(&kgdb_active, -1);
69047 @@ -634,8 +634,8 @@ cpu_master_loop:
69048 }
69049
69050 kgdb_restore:
69051 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
69052 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
69053 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
69054 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
69055 if (kgdb_info[sstep_cpu].task)
69056 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
69057 else
69058 @@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
69059 static void kgdb_tasklet_bpt(unsigned long ing)
69060 {
69061 kgdb_breakpoint();
69062 - atomic_set(&kgdb_break_tasklet_var, 0);
69063 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
69064 }
69065
69066 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
69067
69068 void kgdb_schedule_breakpoint(void)
69069 {
69070 - if (atomic_read(&kgdb_break_tasklet_var) ||
69071 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
69072 atomic_read(&kgdb_active) != -1 ||
69073 atomic_read(&kgdb_setting_breakpoint))
69074 return;
69075 - atomic_inc(&kgdb_break_tasklet_var);
69076 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
69077 tasklet_schedule(&kgdb_tasklet_breakpoint);
69078 }
69079 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
69080 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
69081 index 4d5f8d5..4743f33 100644
69082 --- a/kernel/debug/kdb/kdb_main.c
69083 +++ b/kernel/debug/kdb/kdb_main.c
69084 @@ -1972,7 +1972,7 @@ static int kdb_lsmod(int argc, const char **argv)
69085 list_for_each_entry(mod, kdb_modules, list) {
69086
69087 kdb_printf("%-20s%8u 0x%p ", mod->name,
69088 - mod->core_size, (void *)mod);
69089 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
69090 #ifdef CONFIG_MODULE_UNLOAD
69091 kdb_printf("%4ld ", module_refcount(mod));
69092 #endif
69093 @@ -1982,7 +1982,7 @@ static int kdb_lsmod(int argc, const char **argv)
69094 kdb_printf(" (Loading)");
69095 else
69096 kdb_printf(" (Live)");
69097 - kdb_printf(" 0x%p", mod->module_core);
69098 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
69099
69100 #ifdef CONFIG_MODULE_UNLOAD
69101 {
69102 diff --git a/kernel/events/core.c b/kernel/events/core.c
69103 index dbccf83..8c66482 100644
69104 --- a/kernel/events/core.c
69105 +++ b/kernel/events/core.c
69106 @@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
69107 return 0;
69108 }
69109
69110 -static atomic64_t perf_event_id;
69111 +static atomic64_unchecked_t perf_event_id;
69112
69113 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
69114 enum event_type_t event_type);
69115 @@ -2668,7 +2668,7 @@ static void __perf_event_read(void *info)
69116
69117 static inline u64 perf_event_count(struct perf_event *event)
69118 {
69119 - return local64_read(&event->count) + atomic64_read(&event->child_count);
69120 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
69121 }
69122
69123 static u64 perf_event_read(struct perf_event *event)
69124 @@ -2998,9 +2998,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
69125 mutex_lock(&event->child_mutex);
69126 total += perf_event_read(event);
69127 *enabled += event->total_time_enabled +
69128 - atomic64_read(&event->child_total_time_enabled);
69129 + atomic64_read_unchecked(&event->child_total_time_enabled);
69130 *running += event->total_time_running +
69131 - atomic64_read(&event->child_total_time_running);
69132 + atomic64_read_unchecked(&event->child_total_time_running);
69133
69134 list_for_each_entry(child, &event->child_list, child_list) {
69135 total += perf_event_read(child);
69136 @@ -3403,10 +3403,10 @@ void perf_event_update_userpage(struct perf_event *event)
69137 userpg->offset -= local64_read(&event->hw.prev_count);
69138
69139 userpg->time_enabled = enabled +
69140 - atomic64_read(&event->child_total_time_enabled);
69141 + atomic64_read_unchecked(&event->child_total_time_enabled);
69142
69143 userpg->time_running = running +
69144 - atomic64_read(&event->child_total_time_running);
69145 + atomic64_read_unchecked(&event->child_total_time_running);
69146
69147 arch_perf_update_userpage(userpg, now);
69148
69149 @@ -3965,11 +3965,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
69150 values[n++] = perf_event_count(event);
69151 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
69152 values[n++] = enabled +
69153 - atomic64_read(&event->child_total_time_enabled);
69154 + atomic64_read_unchecked(&event->child_total_time_enabled);
69155 }
69156 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
69157 values[n++] = running +
69158 - atomic64_read(&event->child_total_time_running);
69159 + atomic64_read_unchecked(&event->child_total_time_running);
69160 }
69161 if (read_format & PERF_FORMAT_ID)
69162 values[n++] = primary_event_id(event);
69163 @@ -4712,12 +4712,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
69164 * need to add enough zero bytes after the string to handle
69165 * the 64bit alignment we do later.
69166 */
69167 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
69168 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
69169 if (!buf) {
69170 name = strncpy(tmp, "//enomem", sizeof(tmp));
69171 goto got_name;
69172 }
69173 - name = d_path(&file->f_path, buf, PATH_MAX);
69174 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
69175 if (IS_ERR(name)) {
69176 name = strncpy(tmp, "//toolong", sizeof(tmp));
69177 goto got_name;
69178 @@ -6156,7 +6156,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
69179 event->parent = parent_event;
69180
69181 event->ns = get_pid_ns(current->nsproxy->pid_ns);
69182 - event->id = atomic64_inc_return(&perf_event_id);
69183 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
69184
69185 event->state = PERF_EVENT_STATE_INACTIVE;
69186
69187 @@ -6774,10 +6774,10 @@ static void sync_child_event(struct perf_event *child_event,
69188 /*
69189 * Add back the child's count to the parent's count:
69190 */
69191 - atomic64_add(child_val, &parent_event->child_count);
69192 - atomic64_add(child_event->total_time_enabled,
69193 + atomic64_add_unchecked(child_val, &parent_event->child_count);
69194 + atomic64_add_unchecked(child_event->total_time_enabled,
69195 &parent_event->child_total_time_enabled);
69196 - atomic64_add(child_event->total_time_running,
69197 + atomic64_add_unchecked(child_event->total_time_running,
69198 &parent_event->child_total_time_running);
69199
69200 /*
69201 diff --git a/kernel/exit.c b/kernel/exit.c
69202 index 346616c..a86ec83 100644
69203 --- a/kernel/exit.c
69204 +++ b/kernel/exit.c
69205 @@ -182,6 +182,10 @@ void release_task(struct task_struct * p)
69206 struct task_struct *leader;
69207 int zap_leader;
69208 repeat:
69209 +#ifdef CONFIG_NET
69210 + gr_del_task_from_ip_table(p);
69211 +#endif
69212 +
69213 /* don't need to get the RCU readlock here - the process is dead and
69214 * can't be modifying its own credentials. But shut RCU-lockdep up */
69215 rcu_read_lock();
69216 @@ -394,7 +398,7 @@ int allow_signal(int sig)
69217 * know it'll be handled, so that they don't get converted to
69218 * SIGKILL or just silently dropped.
69219 */
69220 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
69221 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
69222 recalc_sigpending();
69223 spin_unlock_irq(&current->sighand->siglock);
69224 return 0;
69225 @@ -430,6 +434,8 @@ void daemonize(const char *name, ...)
69226 vsnprintf(current->comm, sizeof(current->comm), name, args);
69227 va_end(args);
69228
69229 + gr_set_kernel_label(current);
69230 +
69231 /*
69232 * If we were started as result of loading a module, close all of the
69233 * user space pages. We don't need them, and if we didn't close them
69234 @@ -812,6 +818,8 @@ void do_exit(long code)
69235 struct task_struct *tsk = current;
69236 int group_dead;
69237
69238 + set_fs(USER_DS);
69239 +
69240 profile_task_exit(tsk);
69241
69242 WARN_ON(blk_needs_flush_plug(tsk));
69243 @@ -828,7 +836,6 @@ void do_exit(long code)
69244 * mm_release()->clear_child_tid() from writing to a user-controlled
69245 * kernel address.
69246 */
69247 - set_fs(USER_DS);
69248
69249 ptrace_event(PTRACE_EVENT_EXIT, code);
69250
69251 @@ -887,6 +894,9 @@ void do_exit(long code)
69252 tsk->exit_code = code;
69253 taskstats_exit(tsk, group_dead);
69254
69255 + gr_acl_handle_psacct(tsk, code);
69256 + gr_acl_handle_exit();
69257 +
69258 exit_mm(tsk);
69259
69260 if (group_dead)
69261 @@ -1007,7 +1017,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
69262 * Take down every thread in the group. This is called by fatal signals
69263 * as well as by sys_exit_group (below).
69264 */
69265 -void
69266 +__noreturn void
69267 do_group_exit(int exit_code)
69268 {
69269 struct signal_struct *sig = current->signal;
69270 diff --git a/kernel/fork.c b/kernel/fork.c
69271 index 8b20ab7..58f2e45 100644
69272 --- a/kernel/fork.c
69273 +++ b/kernel/fork.c
69274 @@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
69275 *stackend = STACK_END_MAGIC; /* for overflow detection */
69276
69277 #ifdef CONFIG_CC_STACKPROTECTOR
69278 - tsk->stack_canary = get_random_int();
69279 + tsk->stack_canary = pax_get_random_long();
69280 #endif
69281
69282 /*
69283 @@ -344,13 +344,81 @@ free_tsk:
69284 }
69285
69286 #ifdef CONFIG_MMU
69287 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
69288 +{
69289 + struct vm_area_struct *tmp;
69290 + unsigned long charge;
69291 + struct mempolicy *pol;
69292 + struct file *file;
69293 +
69294 + charge = 0;
69295 + if (mpnt->vm_flags & VM_ACCOUNT) {
69296 + unsigned long len = vma_pages(mpnt);
69297 +
69298 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
69299 + goto fail_nomem;
69300 + charge = len;
69301 + }
69302 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69303 + if (!tmp)
69304 + goto fail_nomem;
69305 + *tmp = *mpnt;
69306 + tmp->vm_mm = mm;
69307 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
69308 + pol = mpol_dup(vma_policy(mpnt));
69309 + if (IS_ERR(pol))
69310 + goto fail_nomem_policy;
69311 + vma_set_policy(tmp, pol);
69312 + if (anon_vma_fork(tmp, mpnt))
69313 + goto fail_nomem_anon_vma_fork;
69314 + tmp->vm_flags &= ~VM_LOCKED;
69315 + tmp->vm_next = tmp->vm_prev = NULL;
69316 + tmp->vm_mirror = NULL;
69317 + file = tmp->vm_file;
69318 + if (file) {
69319 + struct inode *inode = file->f_path.dentry->d_inode;
69320 + struct address_space *mapping = file->f_mapping;
69321 +
69322 + get_file(file);
69323 + if (tmp->vm_flags & VM_DENYWRITE)
69324 + atomic_dec(&inode->i_writecount);
69325 + mutex_lock(&mapping->i_mmap_mutex);
69326 + if (tmp->vm_flags & VM_SHARED)
69327 + mapping->i_mmap_writable++;
69328 + flush_dcache_mmap_lock(mapping);
69329 + /* insert tmp into the share list, just after mpnt */
69330 + if (unlikely(tmp->vm_flags & VM_NONLINEAR))
69331 + vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
69332 + else
69333 + vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
69334 + flush_dcache_mmap_unlock(mapping);
69335 + mutex_unlock(&mapping->i_mmap_mutex);
69336 + }
69337 +
69338 + /*
69339 + * Clear hugetlb-related page reserves for children. This only
69340 + * affects MAP_PRIVATE mappings. Faults generated by the child
69341 + * are not guaranteed to succeed, even if read-only
69342 + */
69343 + if (is_vm_hugetlb_page(tmp))
69344 + reset_vma_resv_huge_pages(tmp);
69345 +
69346 + return tmp;
69347 +
69348 +fail_nomem_anon_vma_fork:
69349 + mpol_put(pol);
69350 +fail_nomem_policy:
69351 + kmem_cache_free(vm_area_cachep, tmp);
69352 +fail_nomem:
69353 + vm_unacct_memory(charge);
69354 + return NULL;
69355 +}
69356 +
69357 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69358 {
69359 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
69360 struct rb_node **rb_link, *rb_parent;
69361 int retval;
69362 - unsigned long charge;
69363 - struct mempolicy *pol;
69364
69365 down_write(&oldmm->mmap_sem);
69366 flush_cache_dup_mm(oldmm);
69367 @@ -363,8 +431,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69368 mm->locked_vm = 0;
69369 mm->mmap = NULL;
69370 mm->mmap_cache = NULL;
69371 - mm->free_area_cache = oldmm->mmap_base;
69372 - mm->cached_hole_size = ~0UL;
69373 + mm->free_area_cache = oldmm->free_area_cache;
69374 + mm->cached_hole_size = oldmm->cached_hole_size;
69375 mm->map_count = 0;
69376 cpumask_clear(mm_cpumask(mm));
69377 mm->mm_rb = RB_ROOT;
69378 @@ -380,57 +448,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69379
69380 prev = NULL;
69381 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
69382 - struct file *file;
69383 -
69384 if (mpnt->vm_flags & VM_DONTCOPY) {
69385 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
69386 -vma_pages(mpnt));
69387 continue;
69388 }
69389 - charge = 0;
69390 - if (mpnt->vm_flags & VM_ACCOUNT) {
69391 - unsigned long len = vma_pages(mpnt);
69392 -
69393 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
69394 - goto fail_nomem;
69395 - charge = len;
69396 - }
69397 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69398 - if (!tmp)
69399 - goto fail_nomem;
69400 - *tmp = *mpnt;
69401 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
69402 - pol = mpol_dup(vma_policy(mpnt));
69403 - retval = PTR_ERR(pol);
69404 - if (IS_ERR(pol))
69405 - goto fail_nomem_policy;
69406 - vma_set_policy(tmp, pol);
69407 - tmp->vm_mm = mm;
69408 - if (anon_vma_fork(tmp, mpnt))
69409 - goto fail_nomem_anon_vma_fork;
69410 - tmp->vm_flags &= ~VM_LOCKED;
69411 - tmp->vm_next = tmp->vm_prev = NULL;
69412 - file = tmp->vm_file;
69413 - if (file) {
69414 - struct inode *inode = file->f_path.dentry->d_inode;
69415 - struct address_space *mapping = file->f_mapping;
69416 -
69417 - get_file(file);
69418 - if (tmp->vm_flags & VM_DENYWRITE)
69419 - atomic_dec(&inode->i_writecount);
69420 - mutex_lock(&mapping->i_mmap_mutex);
69421 - if (tmp->vm_flags & VM_SHARED)
69422 - mapping->i_mmap_writable++;
69423 - flush_dcache_mmap_lock(mapping);
69424 - /* insert tmp into the share list, just after mpnt */
69425 - if (unlikely(tmp->vm_flags & VM_NONLINEAR))
69426 - vma_nonlinear_insert(tmp,
69427 - &mapping->i_mmap_nonlinear);
69428 - else
69429 - vma_interval_tree_insert_after(tmp, mpnt,
69430 - &mapping->i_mmap);
69431 - flush_dcache_mmap_unlock(mapping);
69432 - mutex_unlock(&mapping->i_mmap_mutex);
69433 + tmp = dup_vma(mm, oldmm, mpnt);
69434 + if (!tmp) {
69435 + retval = -ENOMEM;
69436 + goto out;
69437 }
69438
69439 /*
69440 @@ -462,6 +488,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69441 if (retval)
69442 goto out;
69443 }
69444 +
69445 +#ifdef CONFIG_PAX_SEGMEXEC
69446 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
69447 + struct vm_area_struct *mpnt_m;
69448 +
69449 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
69450 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
69451 +
69452 + if (!mpnt->vm_mirror)
69453 + continue;
69454 +
69455 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
69456 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
69457 + mpnt->vm_mirror = mpnt_m;
69458 + } else {
69459 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
69460 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
69461 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
69462 + mpnt->vm_mirror->vm_mirror = mpnt;
69463 + }
69464 + }
69465 + BUG_ON(mpnt_m);
69466 + }
69467 +#endif
69468 +
69469 /* a new mm has just been created */
69470 arch_dup_mmap(oldmm, mm);
69471 retval = 0;
69472 @@ -470,14 +521,6 @@ out:
69473 flush_tlb_mm(oldmm);
69474 up_write(&oldmm->mmap_sem);
69475 return retval;
69476 -fail_nomem_anon_vma_fork:
69477 - mpol_put(pol);
69478 -fail_nomem_policy:
69479 - kmem_cache_free(vm_area_cachep, tmp);
69480 -fail_nomem:
69481 - retval = -ENOMEM;
69482 - vm_unacct_memory(charge);
69483 - goto out;
69484 }
69485
69486 static inline int mm_alloc_pgd(struct mm_struct *mm)
69487 @@ -692,8 +735,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
69488 return ERR_PTR(err);
69489
69490 mm = get_task_mm(task);
69491 - if (mm && mm != current->mm &&
69492 - !ptrace_may_access(task, mode)) {
69493 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
69494 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
69495 mmput(mm);
69496 mm = ERR_PTR(-EACCES);
69497 }
69498 @@ -912,13 +955,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
69499 spin_unlock(&fs->lock);
69500 return -EAGAIN;
69501 }
69502 - fs->users++;
69503 + atomic_inc(&fs->users);
69504 spin_unlock(&fs->lock);
69505 return 0;
69506 }
69507 tsk->fs = copy_fs_struct(fs);
69508 if (!tsk->fs)
69509 return -ENOMEM;
69510 + /* Carry through gr_chroot_dentry and is_chrooted instead
69511 + of recomputing it here. Already copied when the task struct
69512 + is duplicated. This allows pivot_root to not be treated as
69513 + a chroot
69514 + */
69515 + //gr_set_chroot_entries(tsk, &tsk->fs->root);
69516 +
69517 return 0;
69518 }
69519
69520 @@ -1184,6 +1234,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69521 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
69522 #endif
69523 retval = -EAGAIN;
69524 +
69525 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
69526 +
69527 if (atomic_read(&p->real_cred->user->processes) >=
69528 task_rlimit(p, RLIMIT_NPROC)) {
69529 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
69530 @@ -1402,6 +1455,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69531 /* Need tasklist lock for parent etc handling! */
69532 write_lock_irq(&tasklist_lock);
69533
69534 + /* synchronizes with gr_set_acls() */
69535 + gr_copy_label(p);
69536 +
69537 /* CLONE_PARENT re-uses the old parent */
69538 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
69539 p->real_parent = current->real_parent;
69540 @@ -1512,6 +1568,8 @@ bad_fork_cleanup_count:
69541 bad_fork_free:
69542 free_task(p);
69543 fork_out:
69544 + gr_log_forkfail(retval);
69545 +
69546 return ERR_PTR(retval);
69547 }
69548
69549 @@ -1612,6 +1670,8 @@ long do_fork(unsigned long clone_flags,
69550 if (clone_flags & CLONE_PARENT_SETTID)
69551 put_user(nr, parent_tidptr);
69552
69553 + gr_handle_brute_check();
69554 +
69555 if (clone_flags & CLONE_VFORK) {
69556 p->vfork_done = &vfork;
69557 init_completion(&vfork);
69558 @@ -1721,7 +1781,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
69559 return 0;
69560
69561 /* don't need lock here; in the worst case we'll do useless copy */
69562 - if (fs->users == 1)
69563 + if (atomic_read(&fs->users) == 1)
69564 return 0;
69565
69566 *new_fsp = copy_fs_struct(fs);
69567 @@ -1810,7 +1870,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
69568 fs = current->fs;
69569 spin_lock(&fs->lock);
69570 current->fs = new_fs;
69571 - if (--fs->users)
69572 + gr_set_chroot_entries(current, &current->fs->root);
69573 + if (atomic_dec_return(&fs->users))
69574 new_fs = NULL;
69575 else
69576 new_fs = fs;
69577 diff --git a/kernel/futex.c b/kernel/futex.c
69578 index 19eb089..b8c65ea 100644
69579 --- a/kernel/futex.c
69580 +++ b/kernel/futex.c
69581 @@ -54,6 +54,7 @@
69582 #include <linux/mount.h>
69583 #include <linux/pagemap.h>
69584 #include <linux/syscalls.h>
69585 +#include <linux/ptrace.h>
69586 #include <linux/signal.h>
69587 #include <linux/export.h>
69588 #include <linux/magic.h>
69589 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
69590 struct page *page, *page_head;
69591 int err, ro = 0;
69592
69593 +#ifdef CONFIG_PAX_SEGMEXEC
69594 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
69595 + return -EFAULT;
69596 +#endif
69597 +
69598 /*
69599 * The futex address must be "naturally" aligned.
69600 */
69601 @@ -2733,6 +2739,7 @@ static int __init futex_init(void)
69602 {
69603 u32 curval;
69604 int i;
69605 + mm_segment_t oldfs;
69606
69607 /*
69608 * This will fail and we want it. Some arch implementations do
69609 @@ -2744,8 +2751,11 @@ static int __init futex_init(void)
69610 * implementation, the non-functional ones will return
69611 * -ENOSYS.
69612 */
69613 + oldfs = get_fs();
69614 + set_fs(USER_DS);
69615 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
69616 futex_cmpxchg_enabled = 1;
69617 + set_fs(oldfs);
69618
69619 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
69620 plist_head_init(&futex_queues[i].chain);
69621 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
69622 index 9b22d03..6295b62 100644
69623 --- a/kernel/gcov/base.c
69624 +++ b/kernel/gcov/base.c
69625 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
69626 }
69627
69628 #ifdef CONFIG_MODULES
69629 -static inline int within(void *addr, void *start, unsigned long size)
69630 -{
69631 - return ((addr >= start) && (addr < start + size));
69632 -}
69633 -
69634 /* Update list and generate events when modules are unloaded. */
69635 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69636 void *data)
69637 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69638 prev = NULL;
69639 /* Remove entries located in module from linked list. */
69640 for (info = gcov_info_head; info; info = info->next) {
69641 - if (within(info, mod->module_core, mod->core_size)) {
69642 + if (within_module_core_rw((unsigned long)info, mod)) {
69643 if (prev)
69644 prev->next = info->next;
69645 else
69646 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
69647 index 6db7a5e..25b6648 100644
69648 --- a/kernel/hrtimer.c
69649 +++ b/kernel/hrtimer.c
69650 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
69651 local_irq_restore(flags);
69652 }
69653
69654 -static void run_hrtimer_softirq(struct softirq_action *h)
69655 +static void run_hrtimer_softirq(void)
69656 {
69657 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
69658
69659 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
69660 index 60f48fa..7f3a770 100644
69661 --- a/kernel/jump_label.c
69662 +++ b/kernel/jump_label.c
69663 @@ -13,6 +13,7 @@
69664 #include <linux/sort.h>
69665 #include <linux/err.h>
69666 #include <linux/static_key.h>
69667 +#include <linux/mm.h>
69668
69669 #ifdef HAVE_JUMP_LABEL
69670
69671 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
69672
69673 size = (((unsigned long)stop - (unsigned long)start)
69674 / sizeof(struct jump_entry));
69675 + pax_open_kernel();
69676 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
69677 + pax_close_kernel();
69678 }
69679
69680 static void jump_label_update(struct static_key *key, int enable);
69681 @@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
69682 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
69683 struct jump_entry *iter;
69684
69685 + pax_open_kernel();
69686 for (iter = iter_start; iter < iter_stop; iter++) {
69687 if (within_module_init(iter->code, mod))
69688 iter->code = 0;
69689 }
69690 + pax_close_kernel();
69691 }
69692
69693 static int
69694 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
69695 index 2169fee..45c017a 100644
69696 --- a/kernel/kallsyms.c
69697 +++ b/kernel/kallsyms.c
69698 @@ -11,6 +11,9 @@
69699 * Changed the compression method from stem compression to "table lookup"
69700 * compression (see scripts/kallsyms.c for a more complete description)
69701 */
69702 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69703 +#define __INCLUDED_BY_HIDESYM 1
69704 +#endif
69705 #include <linux/kallsyms.h>
69706 #include <linux/module.h>
69707 #include <linux/init.h>
69708 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
69709
69710 static inline int is_kernel_inittext(unsigned long addr)
69711 {
69712 + if (system_state != SYSTEM_BOOTING)
69713 + return 0;
69714 +
69715 if (addr >= (unsigned long)_sinittext
69716 && addr <= (unsigned long)_einittext)
69717 return 1;
69718 return 0;
69719 }
69720
69721 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69722 +#ifdef CONFIG_MODULES
69723 +static inline int is_module_text(unsigned long addr)
69724 +{
69725 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
69726 + return 1;
69727 +
69728 + addr = ktla_ktva(addr);
69729 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
69730 +}
69731 +#else
69732 +static inline int is_module_text(unsigned long addr)
69733 +{
69734 + return 0;
69735 +}
69736 +#endif
69737 +#endif
69738 +
69739 static inline int is_kernel_text(unsigned long addr)
69740 {
69741 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
69742 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
69743
69744 static inline int is_kernel(unsigned long addr)
69745 {
69746 +
69747 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69748 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
69749 + return 1;
69750 +
69751 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
69752 +#else
69753 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
69754 +#endif
69755 +
69756 return 1;
69757 return in_gate_area_no_mm(addr);
69758 }
69759
69760 static int is_ksym_addr(unsigned long addr)
69761 {
69762 +
69763 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69764 + if (is_module_text(addr))
69765 + return 0;
69766 +#endif
69767 +
69768 if (all_var)
69769 return is_kernel(addr);
69770
69771 @@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
69772
69773 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
69774 {
69775 - iter->name[0] = '\0';
69776 iter->nameoff = get_symbol_offset(new_pos);
69777 iter->pos = new_pos;
69778 }
69779 @@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
69780 {
69781 struct kallsym_iter *iter = m->private;
69782
69783 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69784 + if (current_uid())
69785 + return 0;
69786 +#endif
69787 +
69788 /* Some debugging symbols have no name. Ignore them. */
69789 if (!iter->name[0])
69790 return 0;
69791 @@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
69792 */
69793 type = iter->exported ? toupper(iter->type) :
69794 tolower(iter->type);
69795 +
69796 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
69797 type, iter->name, iter->module_name);
69798 } else
69799 @@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
69800 struct kallsym_iter *iter;
69801 int ret;
69802
69803 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
69804 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
69805 if (!iter)
69806 return -ENOMEM;
69807 reset_iter(iter, 0);
69808 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
69809 index 30b7b22..c726387 100644
69810 --- a/kernel/kcmp.c
69811 +++ b/kernel/kcmp.c
69812 @@ -98,6 +98,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
69813 struct task_struct *task1, *task2;
69814 int ret;
69815
69816 +#ifdef CONFIG_GRKERNSEC
69817 + return -ENOSYS;
69818 +#endif
69819 +
69820 rcu_read_lock();
69821
69822 /*
69823 diff --git a/kernel/kexec.c b/kernel/kexec.c
69824 index 5e4bd78..00c5b91 100644
69825 --- a/kernel/kexec.c
69826 +++ b/kernel/kexec.c
69827 @@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
69828 unsigned long flags)
69829 {
69830 struct compat_kexec_segment in;
69831 - struct kexec_segment out, __user *ksegments;
69832 + struct kexec_segment out;
69833 + struct kexec_segment __user *ksegments;
69834 unsigned long i, result;
69835
69836 /* Don't allow clients that don't understand the native
69837 diff --git a/kernel/kmod.c b/kernel/kmod.c
69838 index 1c317e3..4a92a55 100644
69839 --- a/kernel/kmod.c
69840 +++ b/kernel/kmod.c
69841 @@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
69842 kfree(info->argv);
69843 }
69844
69845 -static int call_modprobe(char *module_name, int wait)
69846 +static int call_modprobe(char *module_name, char *module_param, int wait)
69847 {
69848 static char *envp[] = {
69849 "HOME=/",
69850 @@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
69851 NULL
69852 };
69853
69854 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
69855 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
69856 if (!argv)
69857 goto out;
69858
69859 @@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
69860 argv[1] = "-q";
69861 argv[2] = "--";
69862 argv[3] = module_name; /* check free_modprobe_argv() */
69863 - argv[4] = NULL;
69864 + argv[4] = module_param;
69865 + argv[5] = NULL;
69866
69867 return call_usermodehelper_fns(modprobe_path, argv, envp,
69868 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
69869 @@ -120,9 +121,8 @@ out:
69870 * If module auto-loading support is disabled then this function
69871 * becomes a no-operation.
69872 */
69873 -int __request_module(bool wait, const char *fmt, ...)
69874 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
69875 {
69876 - va_list args;
69877 char module_name[MODULE_NAME_LEN];
69878 unsigned int max_modprobes;
69879 int ret;
69880 @@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
69881 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
69882 static int kmod_loop_msg;
69883
69884 - va_start(args, fmt);
69885 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
69886 - va_end(args);
69887 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
69888 if (ret >= MODULE_NAME_LEN)
69889 return -ENAMETOOLONG;
69890
69891 @@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
69892 if (ret)
69893 return ret;
69894
69895 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
69896 + if (!current_uid()) {
69897 + /* hack to workaround consolekit/udisks stupidity */
69898 + read_lock(&tasklist_lock);
69899 + if (!strcmp(current->comm, "mount") &&
69900 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
69901 + read_unlock(&tasklist_lock);
69902 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
69903 + return -EPERM;
69904 + }
69905 + read_unlock(&tasklist_lock);
69906 + }
69907 +#endif
69908 +
69909 /* If modprobe needs a service that is in a module, we get a recursive
69910 * loop. Limit the number of running kmod threads to max_threads/2 or
69911 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
69912 @@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
69913
69914 trace_module_request(module_name, wait, _RET_IP_);
69915
69916 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69917 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69918
69919 atomic_dec(&kmod_concurrent);
69920 return ret;
69921 }
69922 +
69923 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
69924 +{
69925 + va_list args;
69926 + int ret;
69927 +
69928 + va_start(args, fmt);
69929 + ret = ____request_module(wait, module_param, fmt, args);
69930 + va_end(args);
69931 +
69932 + return ret;
69933 +}
69934 +
69935 +int __request_module(bool wait, const char *fmt, ...)
69936 +{
69937 + va_list args;
69938 + int ret;
69939 +
69940 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
69941 + if (current_uid()) {
69942 + char module_param[MODULE_NAME_LEN];
69943 +
69944 + memset(module_param, 0, sizeof(module_param));
69945 +
69946 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
69947 +
69948 + va_start(args, fmt);
69949 + ret = ____request_module(wait, module_param, fmt, args);
69950 + va_end(args);
69951 +
69952 + return ret;
69953 + }
69954 +#endif
69955 +
69956 + va_start(args, fmt);
69957 + ret = ____request_module(wait, NULL, fmt, args);
69958 + va_end(args);
69959 +
69960 + return ret;
69961 +}
69962 +
69963 EXPORT_SYMBOL(__request_module);
69964 #endif /* CONFIG_MODULES */
69965
69966 @@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
69967 *
69968 * Thus the __user pointer cast is valid here.
69969 */
69970 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
69971 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
69972
69973 /*
69974 * If ret is 0, either ____call_usermodehelper failed and the
69975 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
69976 index 098f396..fe85ff1 100644
69977 --- a/kernel/kprobes.c
69978 +++ b/kernel/kprobes.c
69979 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
69980 * kernel image and loaded module images reside. This is required
69981 * so x86_64 can correctly handle the %rip-relative fixups.
69982 */
69983 - kip->insns = module_alloc(PAGE_SIZE);
69984 + kip->insns = module_alloc_exec(PAGE_SIZE);
69985 if (!kip->insns) {
69986 kfree(kip);
69987 return NULL;
69988 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
69989 */
69990 if (!list_is_singular(&kip->list)) {
69991 list_del(&kip->list);
69992 - module_free(NULL, kip->insns);
69993 + module_free_exec(NULL, kip->insns);
69994 kfree(kip);
69995 }
69996 return 1;
69997 @@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
69998 {
69999 int i, err = 0;
70000 unsigned long offset = 0, size = 0;
70001 - char *modname, namebuf[128];
70002 + char *modname, namebuf[KSYM_NAME_LEN];
70003 const char *symbol_name;
70004 void *addr;
70005 struct kprobe_blackpoint *kb;
70006 @@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
70007 kprobe_type = "k";
70008
70009 if (sym)
70010 - seq_printf(pi, "%p %s %s+0x%x %s ",
70011 + seq_printf(pi, "%pK %s %s+0x%x %s ",
70012 p->addr, kprobe_type, sym, offset,
70013 (modname ? modname : " "));
70014 else
70015 - seq_printf(pi, "%p %s %p ",
70016 + seq_printf(pi, "%pK %s %pK ",
70017 p->addr, kprobe_type, p->addr);
70018
70019 if (!pp)
70020 @@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
70021 const char *sym = NULL;
70022 unsigned int i = *(loff_t *) v;
70023 unsigned long offset = 0;
70024 - char *modname, namebuf[128];
70025 + char *modname, namebuf[KSYM_NAME_LEN];
70026
70027 head = &kprobe_table[i];
70028 preempt_disable();
70029 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
70030 index 4e316e1..5501eef 100644
70031 --- a/kernel/ksysfs.c
70032 +++ b/kernel/ksysfs.c
70033 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
70034 {
70035 if (count+1 > UEVENT_HELPER_PATH_LEN)
70036 return -ENOENT;
70037 + if (!capable(CAP_SYS_ADMIN))
70038 + return -EPERM;
70039 memcpy(uevent_helper, buf, count);
70040 uevent_helper[count] = '\0';
70041 if (count && uevent_helper[count-1] == '\n')
70042 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
70043 index 7981e5b..7f2105c 100644
70044 --- a/kernel/lockdep.c
70045 +++ b/kernel/lockdep.c
70046 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
70047 end = (unsigned long) &_end,
70048 addr = (unsigned long) obj;
70049
70050 +#ifdef CONFIG_PAX_KERNEXEC
70051 + start = ktla_ktva(start);
70052 +#endif
70053 +
70054 /*
70055 * static variable?
70056 */
70057 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
70058 if (!static_obj(lock->key)) {
70059 debug_locks_off();
70060 printk("INFO: trying to register non-static key.\n");
70061 + printk("lock:%pS key:%pS.\n", lock, lock->key);
70062 printk("the code is fine but needs lockdep annotation.\n");
70063 printk("turning off the locking correctness validator.\n");
70064 dump_stack();
70065 @@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
70066 if (!class)
70067 return 0;
70068 }
70069 - atomic_inc((atomic_t *)&class->ops);
70070 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
70071 if (very_verbose(class)) {
70072 printk("\nacquire class [%p] %s", class->key, class->name);
70073 if (class->name_version > 1)
70074 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
70075 index 91c32a0..7b88d63 100644
70076 --- a/kernel/lockdep_proc.c
70077 +++ b/kernel/lockdep_proc.c
70078 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
70079
70080 static void print_name(struct seq_file *m, struct lock_class *class)
70081 {
70082 - char str[128];
70083 + char str[KSYM_NAME_LEN];
70084 const char *name = class->name;
70085
70086 if (!name) {
70087 @@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
70088 return 0;
70089 }
70090
70091 - seq_printf(m, "%p", class->key);
70092 + seq_printf(m, "%pK", class->key);
70093 #ifdef CONFIG_DEBUG_LOCKDEP
70094 seq_printf(m, " OPS:%8ld", class->ops);
70095 #endif
70096 @@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
70097
70098 list_for_each_entry(entry, &class->locks_after, entry) {
70099 if (entry->distance == 1) {
70100 - seq_printf(m, " -> [%p] ", entry->class->key);
70101 + seq_printf(m, " -> [%pK] ", entry->class->key);
70102 print_name(m, entry->class);
70103 seq_puts(m, "\n");
70104 }
70105 @@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
70106 if (!class->key)
70107 continue;
70108
70109 - seq_printf(m, "[%p] ", class->key);
70110 + seq_printf(m, "[%pK] ", class->key);
70111 print_name(m, class);
70112 seq_puts(m, "\n");
70113 }
70114 @@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
70115 if (!i)
70116 seq_line(m, '-', 40-namelen, namelen);
70117
70118 - snprintf(ip, sizeof(ip), "[<%p>]",
70119 + snprintf(ip, sizeof(ip), "[<%pK>]",
70120 (void *)class->contention_point[i]);
70121 seq_printf(m, "%40s %14lu %29s %pS\n",
70122 name, stats->contention_point[i],
70123 @@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
70124 if (!i)
70125 seq_line(m, '-', 40-namelen, namelen);
70126
70127 - snprintf(ip, sizeof(ip), "[<%p>]",
70128 + snprintf(ip, sizeof(ip), "[<%pK>]",
70129 (void *)class->contending_point[i]);
70130 seq_printf(m, "%40s %14lu %29s %pS\n",
70131 name, stats->contending_point[i],
70132 diff --git a/kernel/module.c b/kernel/module.c
70133 index 6e48c3a..ac2ef5b 100644
70134 --- a/kernel/module.c
70135 +++ b/kernel/module.c
70136 @@ -59,6 +59,7 @@
70137 #include <linux/pfn.h>
70138 #include <linux/bsearch.h>
70139 #include <linux/fips.h>
70140 +#include <linux/grsecurity.h>
70141 #include "module-internal.h"
70142
70143 #define CREATE_TRACE_POINTS
70144 @@ -153,7 +154,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
70145
70146 /* Bounds of module allocation, for speeding __module_address.
70147 * Protected by module_mutex. */
70148 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
70149 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
70150 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
70151
70152 int register_module_notifier(struct notifier_block * nb)
70153 {
70154 @@ -318,7 +320,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
70155 return true;
70156
70157 list_for_each_entry_rcu(mod, &modules, list) {
70158 - struct symsearch arr[] = {
70159 + struct symsearch modarr[] = {
70160 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
70161 NOT_GPL_ONLY, false },
70162 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
70163 @@ -340,7 +342,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
70164 #endif
70165 };
70166
70167 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
70168 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
70169 return true;
70170 }
70171 return false;
70172 @@ -472,7 +474,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
70173 static int percpu_modalloc(struct module *mod,
70174 unsigned long size, unsigned long align)
70175 {
70176 - if (align > PAGE_SIZE) {
70177 + if (align-1 >= PAGE_SIZE) {
70178 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
70179 mod->name, align, PAGE_SIZE);
70180 align = PAGE_SIZE;
70181 @@ -1072,7 +1074,7 @@ struct module_attribute module_uevent =
70182 static ssize_t show_coresize(struct module_attribute *mattr,
70183 struct module_kobject *mk, char *buffer)
70184 {
70185 - return sprintf(buffer, "%u\n", mk->mod->core_size);
70186 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
70187 }
70188
70189 static struct module_attribute modinfo_coresize =
70190 @@ -1081,7 +1083,7 @@ static struct module_attribute modinfo_coresize =
70191 static ssize_t show_initsize(struct module_attribute *mattr,
70192 struct module_kobject *mk, char *buffer)
70193 {
70194 - return sprintf(buffer, "%u\n", mk->mod->init_size);
70195 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
70196 }
70197
70198 static struct module_attribute modinfo_initsize =
70199 @@ -1295,7 +1297,7 @@ resolve_symbol_wait(struct module *mod,
70200 */
70201 #ifdef CONFIG_SYSFS
70202
70203 -#ifdef CONFIG_KALLSYMS
70204 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
70205 static inline bool sect_empty(const Elf_Shdr *sect)
70206 {
70207 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
70208 @@ -1761,21 +1763,21 @@ static void set_section_ro_nx(void *base,
70209
70210 static void unset_module_core_ro_nx(struct module *mod)
70211 {
70212 - set_page_attributes(mod->module_core + mod->core_text_size,
70213 - mod->module_core + mod->core_size,
70214 + set_page_attributes(mod->module_core_rw,
70215 + mod->module_core_rw + mod->core_size_rw,
70216 set_memory_x);
70217 - set_page_attributes(mod->module_core,
70218 - mod->module_core + mod->core_ro_size,
70219 + set_page_attributes(mod->module_core_rx,
70220 + mod->module_core_rx + mod->core_size_rx,
70221 set_memory_rw);
70222 }
70223
70224 static void unset_module_init_ro_nx(struct module *mod)
70225 {
70226 - set_page_attributes(mod->module_init + mod->init_text_size,
70227 - mod->module_init + mod->init_size,
70228 + set_page_attributes(mod->module_init_rw,
70229 + mod->module_init_rw + mod->init_size_rw,
70230 set_memory_x);
70231 - set_page_attributes(mod->module_init,
70232 - mod->module_init + mod->init_ro_size,
70233 + set_page_attributes(mod->module_init_rx,
70234 + mod->module_init_rx + mod->init_size_rx,
70235 set_memory_rw);
70236 }
70237
70238 @@ -1786,14 +1788,14 @@ void set_all_modules_text_rw(void)
70239
70240 mutex_lock(&module_mutex);
70241 list_for_each_entry_rcu(mod, &modules, list) {
70242 - if ((mod->module_core) && (mod->core_text_size)) {
70243 - set_page_attributes(mod->module_core,
70244 - mod->module_core + mod->core_text_size,
70245 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
70246 + set_page_attributes(mod->module_core_rx,
70247 + mod->module_core_rx + mod->core_size_rx,
70248 set_memory_rw);
70249 }
70250 - if ((mod->module_init) && (mod->init_text_size)) {
70251 - set_page_attributes(mod->module_init,
70252 - mod->module_init + mod->init_text_size,
70253 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
70254 + set_page_attributes(mod->module_init_rx,
70255 + mod->module_init_rx + mod->init_size_rx,
70256 set_memory_rw);
70257 }
70258 }
70259 @@ -1807,14 +1809,14 @@ void set_all_modules_text_ro(void)
70260
70261 mutex_lock(&module_mutex);
70262 list_for_each_entry_rcu(mod, &modules, list) {
70263 - if ((mod->module_core) && (mod->core_text_size)) {
70264 - set_page_attributes(mod->module_core,
70265 - mod->module_core + mod->core_text_size,
70266 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
70267 + set_page_attributes(mod->module_core_rx,
70268 + mod->module_core_rx + mod->core_size_rx,
70269 set_memory_ro);
70270 }
70271 - if ((mod->module_init) && (mod->init_text_size)) {
70272 - set_page_attributes(mod->module_init,
70273 - mod->module_init + mod->init_text_size,
70274 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
70275 + set_page_attributes(mod->module_init_rx,
70276 + mod->module_init_rx + mod->init_size_rx,
70277 set_memory_ro);
70278 }
70279 }
70280 @@ -1860,16 +1862,19 @@ static void free_module(struct module *mod)
70281
70282 /* This may be NULL, but that's OK */
70283 unset_module_init_ro_nx(mod);
70284 - module_free(mod, mod->module_init);
70285 + module_free(mod, mod->module_init_rw);
70286 + module_free_exec(mod, mod->module_init_rx);
70287 kfree(mod->args);
70288 percpu_modfree(mod);
70289
70290 /* Free lock-classes: */
70291 - lockdep_free_key_range(mod->module_core, mod->core_size);
70292 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
70293 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
70294
70295 /* Finally, free the core (containing the module structure) */
70296 unset_module_core_ro_nx(mod);
70297 - module_free(mod, mod->module_core);
70298 + module_free_exec(mod, mod->module_core_rx);
70299 + module_free(mod, mod->module_core_rw);
70300
70301 #ifdef CONFIG_MPU
70302 update_protections(current->mm);
70303 @@ -1939,9 +1944,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
70304 int ret = 0;
70305 const struct kernel_symbol *ksym;
70306
70307 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
70308 + int is_fs_load = 0;
70309 + int register_filesystem_found = 0;
70310 + char *p;
70311 +
70312 + p = strstr(mod->args, "grsec_modharden_fs");
70313 + if (p) {
70314 + char *endptr = p + sizeof("grsec_modharden_fs") - 1;
70315 + /* copy \0 as well */
70316 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
70317 + is_fs_load = 1;
70318 + }
70319 +#endif
70320 +
70321 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
70322 const char *name = info->strtab + sym[i].st_name;
70323
70324 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
70325 + /* it's a real shame this will never get ripped and copied
70326 + upstream! ;(
70327 + */
70328 + if (is_fs_load && !strcmp(name, "register_filesystem"))
70329 + register_filesystem_found = 1;
70330 +#endif
70331 +
70332 switch (sym[i].st_shndx) {
70333 case SHN_COMMON:
70334 /* We compiled with -fno-common. These are not
70335 @@ -1962,7 +1989,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
70336 ksym = resolve_symbol_wait(mod, info, name);
70337 /* Ok if resolved. */
70338 if (ksym && !IS_ERR(ksym)) {
70339 + pax_open_kernel();
70340 sym[i].st_value = ksym->value;
70341 + pax_close_kernel();
70342 break;
70343 }
70344
70345 @@ -1981,11 +2010,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
70346 secbase = (unsigned long)mod_percpu(mod);
70347 else
70348 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
70349 + pax_open_kernel();
70350 sym[i].st_value += secbase;
70351 + pax_close_kernel();
70352 break;
70353 }
70354 }
70355
70356 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
70357 + if (is_fs_load && !register_filesystem_found) {
70358 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
70359 + ret = -EPERM;
70360 + }
70361 +#endif
70362 +
70363 return ret;
70364 }
70365
70366 @@ -2069,22 +2107,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
70367 || s->sh_entsize != ~0UL
70368 || strstarts(sname, ".init"))
70369 continue;
70370 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
70371 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
70372 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
70373 + else
70374 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
70375 pr_debug("\t%s\n", sname);
70376 }
70377 - switch (m) {
70378 - case 0: /* executable */
70379 - mod->core_size = debug_align(mod->core_size);
70380 - mod->core_text_size = mod->core_size;
70381 - break;
70382 - case 1: /* RO: text and ro-data */
70383 - mod->core_size = debug_align(mod->core_size);
70384 - mod->core_ro_size = mod->core_size;
70385 - break;
70386 - case 3: /* whole core */
70387 - mod->core_size = debug_align(mod->core_size);
70388 - break;
70389 - }
70390 }
70391
70392 pr_debug("Init section allocation order:\n");
70393 @@ -2098,23 +2126,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
70394 || s->sh_entsize != ~0UL
70395 || !strstarts(sname, ".init"))
70396 continue;
70397 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
70398 - | INIT_OFFSET_MASK);
70399 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
70400 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
70401 + else
70402 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
70403 + s->sh_entsize |= INIT_OFFSET_MASK;
70404 pr_debug("\t%s\n", sname);
70405 }
70406 - switch (m) {
70407 - case 0: /* executable */
70408 - mod->init_size = debug_align(mod->init_size);
70409 - mod->init_text_size = mod->init_size;
70410 - break;
70411 - case 1: /* RO: text and ro-data */
70412 - mod->init_size = debug_align(mod->init_size);
70413 - mod->init_ro_size = mod->init_size;
70414 - break;
70415 - case 3: /* whole init */
70416 - mod->init_size = debug_align(mod->init_size);
70417 - break;
70418 - }
70419 }
70420 }
70421
70422 @@ -2286,7 +2304,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70423
70424 /* Put symbol section at end of init part of module. */
70425 symsect->sh_flags |= SHF_ALLOC;
70426 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
70427 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
70428 info->index.sym) | INIT_OFFSET_MASK;
70429 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
70430
70431 @@ -2306,13 +2324,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70432 }
70433
70434 /* Append room for core symbols at end of core part. */
70435 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
70436 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
70437 - mod->core_size += strtab_size;
70438 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
70439 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
70440 + mod->core_size_rx += strtab_size;
70441
70442 /* Put string table section at end of init part of module. */
70443 strsect->sh_flags |= SHF_ALLOC;
70444 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
70445 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
70446 info->index.str) | INIT_OFFSET_MASK;
70447 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
70448 }
70449 @@ -2330,12 +2348,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70450 /* Make sure we get permanent strtab: don't use info->strtab. */
70451 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
70452
70453 + pax_open_kernel();
70454 +
70455 /* Set types up while we still have access to sections. */
70456 for (i = 0; i < mod->num_symtab; i++)
70457 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
70458
70459 - mod->core_symtab = dst = mod->module_core + info->symoffs;
70460 - mod->core_strtab = s = mod->module_core + info->stroffs;
70461 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
70462 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
70463 src = mod->symtab;
70464 *s++ = 0;
70465 for (ndst = i = 0; i < mod->num_symtab; i++) {
70466 @@ -2348,6 +2368,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70467 }
70468 }
70469 mod->core_num_syms = ndst;
70470 +
70471 + pax_close_kernel();
70472 }
70473 #else
70474 static inline void layout_symtab(struct module *mod, struct load_info *info)
70475 @@ -2381,17 +2403,33 @@ void * __weak module_alloc(unsigned long size)
70476 return size == 0 ? NULL : vmalloc_exec(size);
70477 }
70478
70479 -static void *module_alloc_update_bounds(unsigned long size)
70480 +static void *module_alloc_update_bounds_rw(unsigned long size)
70481 {
70482 void *ret = module_alloc(size);
70483
70484 if (ret) {
70485 mutex_lock(&module_mutex);
70486 /* Update module bounds. */
70487 - if ((unsigned long)ret < module_addr_min)
70488 - module_addr_min = (unsigned long)ret;
70489 - if ((unsigned long)ret + size > module_addr_max)
70490 - module_addr_max = (unsigned long)ret + size;
70491 + if ((unsigned long)ret < module_addr_min_rw)
70492 + module_addr_min_rw = (unsigned long)ret;
70493 + if ((unsigned long)ret + size > module_addr_max_rw)
70494 + module_addr_max_rw = (unsigned long)ret + size;
70495 + mutex_unlock(&module_mutex);
70496 + }
70497 + return ret;
70498 +}
70499 +
70500 +static void *module_alloc_update_bounds_rx(unsigned long size)
70501 +{
70502 + void *ret = module_alloc_exec(size);
70503 +
70504 + if (ret) {
70505 + mutex_lock(&module_mutex);
70506 + /* Update module bounds. */
70507 + if ((unsigned long)ret < module_addr_min_rx)
70508 + module_addr_min_rx = (unsigned long)ret;
70509 + if ((unsigned long)ret + size > module_addr_max_rx)
70510 + module_addr_max_rx = (unsigned long)ret + size;
70511 mutex_unlock(&module_mutex);
70512 }
70513 return ret;
70514 @@ -2610,8 +2648,14 @@ static struct module *setup_load_info(struct load_info *info)
70515 static int check_modinfo(struct module *mod, struct load_info *info)
70516 {
70517 const char *modmagic = get_modinfo(info, "vermagic");
70518 + const char *license = get_modinfo(info, "license");
70519 int err;
70520
70521 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
70522 + if (!license || !license_is_gpl_compatible(license))
70523 + return -ENOEXEC;
70524 +#endif
70525 +
70526 /* This is allowed: modprobe --force will invalidate it. */
70527 if (!modmagic) {
70528 err = try_to_force_load(mod, "bad vermagic");
70529 @@ -2634,7 +2678,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
70530 }
70531
70532 /* Set up license info based on the info section */
70533 - set_license(mod, get_modinfo(info, "license"));
70534 + set_license(mod, license);
70535
70536 return 0;
70537 }
70538 @@ -2728,7 +2772,7 @@ static int move_module(struct module *mod, struct load_info *info)
70539 void *ptr;
70540
70541 /* Do the allocs. */
70542 - ptr = module_alloc_update_bounds(mod->core_size);
70543 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
70544 /*
70545 * The pointer to this block is stored in the module structure
70546 * which is inside the block. Just mark it as not being a
70547 @@ -2738,23 +2782,50 @@ static int move_module(struct module *mod, struct load_info *info)
70548 if (!ptr)
70549 return -ENOMEM;
70550
70551 - memset(ptr, 0, mod->core_size);
70552 - mod->module_core = ptr;
70553 + memset(ptr, 0, mod->core_size_rw);
70554 + mod->module_core_rw = ptr;
70555
70556 - ptr = module_alloc_update_bounds(mod->init_size);
70557 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
70558 /*
70559 * The pointer to this block is stored in the module structure
70560 * which is inside the block. This block doesn't need to be
70561 * scanned as it contains data and code that will be freed
70562 * after the module is initialized.
70563 */
70564 - kmemleak_ignore(ptr);
70565 - if (!ptr && mod->init_size) {
70566 - module_free(mod, mod->module_core);
70567 + kmemleak_not_leak(ptr);
70568 + if (!ptr && mod->init_size_rw) {
70569 + module_free(mod, mod->module_core_rw);
70570 return -ENOMEM;
70571 }
70572 - memset(ptr, 0, mod->init_size);
70573 - mod->module_init = ptr;
70574 + memset(ptr, 0, mod->init_size_rw);
70575 + mod->module_init_rw = ptr;
70576 +
70577 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
70578 + kmemleak_not_leak(ptr);
70579 + if (!ptr) {
70580 + module_free(mod, mod->module_init_rw);
70581 + module_free(mod, mod->module_core_rw);
70582 + return -ENOMEM;
70583 + }
70584 +
70585 + pax_open_kernel();
70586 + memset(ptr, 0, mod->core_size_rx);
70587 + pax_close_kernel();
70588 + mod->module_core_rx = ptr;
70589 +
70590 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
70591 + kmemleak_not_leak(ptr);
70592 + if (!ptr && mod->init_size_rx) {
70593 + module_free_exec(mod, mod->module_core_rx);
70594 + module_free(mod, mod->module_init_rw);
70595 + module_free(mod, mod->module_core_rw);
70596 + return -ENOMEM;
70597 + }
70598 +
70599 + pax_open_kernel();
70600 + memset(ptr, 0, mod->init_size_rx);
70601 + pax_close_kernel();
70602 + mod->module_init_rx = ptr;
70603
70604 /* Transfer each section which specifies SHF_ALLOC */
70605 pr_debug("final section addresses:\n");
70606 @@ -2765,16 +2836,45 @@ static int move_module(struct module *mod, struct load_info *info)
70607 if (!(shdr->sh_flags & SHF_ALLOC))
70608 continue;
70609
70610 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
70611 - dest = mod->module_init
70612 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70613 - else
70614 - dest = mod->module_core + shdr->sh_entsize;
70615 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
70616 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70617 + dest = mod->module_init_rw
70618 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70619 + else
70620 + dest = mod->module_init_rx
70621 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70622 + } else {
70623 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70624 + dest = mod->module_core_rw + shdr->sh_entsize;
70625 + else
70626 + dest = mod->module_core_rx + shdr->sh_entsize;
70627 + }
70628 +
70629 + if (shdr->sh_type != SHT_NOBITS) {
70630 +
70631 +#ifdef CONFIG_PAX_KERNEXEC
70632 +#ifdef CONFIG_X86_64
70633 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
70634 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
70635 +#endif
70636 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
70637 + pax_open_kernel();
70638 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70639 + pax_close_kernel();
70640 + } else
70641 +#endif
70642
70643 - if (shdr->sh_type != SHT_NOBITS)
70644 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70645 + }
70646 /* Update sh_addr to point to copy in image. */
70647 - shdr->sh_addr = (unsigned long)dest;
70648 +
70649 +#ifdef CONFIG_PAX_KERNEXEC
70650 + if (shdr->sh_flags & SHF_EXECINSTR)
70651 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
70652 + else
70653 +#endif
70654 +
70655 + shdr->sh_addr = (unsigned long)dest;
70656 pr_debug("\t0x%lx %s\n",
70657 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
70658 }
70659 @@ -2829,12 +2929,12 @@ static void flush_module_icache(const struct module *mod)
70660 * Do it before processing of module parameters, so the module
70661 * can provide parameter accessor functions of its own.
70662 */
70663 - if (mod->module_init)
70664 - flush_icache_range((unsigned long)mod->module_init,
70665 - (unsigned long)mod->module_init
70666 - + mod->init_size);
70667 - flush_icache_range((unsigned long)mod->module_core,
70668 - (unsigned long)mod->module_core + mod->core_size);
70669 + if (mod->module_init_rx)
70670 + flush_icache_range((unsigned long)mod->module_init_rx,
70671 + (unsigned long)mod->module_init_rx
70672 + + mod->init_size_rx);
70673 + flush_icache_range((unsigned long)mod->module_core_rx,
70674 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
70675
70676 set_fs(old_fs);
70677 }
70678 @@ -2904,8 +3004,10 @@ out:
70679 static void module_deallocate(struct module *mod, struct load_info *info)
70680 {
70681 percpu_modfree(mod);
70682 - module_free(mod, mod->module_init);
70683 - module_free(mod, mod->module_core);
70684 + module_free_exec(mod, mod->module_init_rx);
70685 + module_free_exec(mod, mod->module_core_rx);
70686 + module_free(mod, mod->module_init_rw);
70687 + module_free(mod, mod->module_core_rw);
70688 }
70689
70690 int __weak module_finalize(const Elf_Ehdr *hdr,
70691 @@ -2918,7 +3020,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
70692 static int post_relocation(struct module *mod, const struct load_info *info)
70693 {
70694 /* Sort exception table now relocations are done. */
70695 + pax_open_kernel();
70696 sort_extable(mod->extable, mod->extable + mod->num_exentries);
70697 + pax_close_kernel();
70698
70699 /* Copy relocated percpu area over. */
70700 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
70701 @@ -2989,9 +3093,38 @@ static struct module *load_module(void __user *umod,
70702 if (err)
70703 goto free_unload;
70704
70705 + /* Now copy in args */
70706 + mod->args = strndup_user(uargs, ~0UL >> 1);
70707 + if (IS_ERR(mod->args)) {
70708 + err = PTR_ERR(mod->args);
70709 + goto free_unload;
70710 + }
70711 +
70712 /* Set up MODINFO_ATTR fields */
70713 setup_modinfo(mod, &info);
70714
70715 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
70716 + {
70717 + char *p, *p2;
70718 +
70719 + if (strstr(mod->args, "grsec_modharden_netdev")) {
70720 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
70721 + err = -EPERM;
70722 + goto free_modinfo;
70723 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
70724 + p += sizeof("grsec_modharden_normal") - 1;
70725 + p2 = strstr(p, "_");
70726 + if (p2) {
70727 + *p2 = '\0';
70728 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
70729 + *p2 = '_';
70730 + }
70731 + err = -EPERM;
70732 + goto free_modinfo;
70733 + }
70734 + }
70735 +#endif
70736 +
70737 /* Fix up syms, so that st_value is a pointer to location. */
70738 err = simplify_symbols(mod, &info);
70739 if (err < 0)
70740 @@ -3007,13 +3140,6 @@ static struct module *load_module(void __user *umod,
70741
70742 flush_module_icache(mod);
70743
70744 - /* Now copy in args */
70745 - mod->args = strndup_user(uargs, ~0UL >> 1);
70746 - if (IS_ERR(mod->args)) {
70747 - err = PTR_ERR(mod->args);
70748 - goto free_arch_cleanup;
70749 - }
70750 -
70751 /* Mark state as coming so strong_try_module_get() ignores us. */
70752 mod->state = MODULE_STATE_COMING;
70753
70754 @@ -3081,11 +3207,11 @@ again:
70755 unlock:
70756 mutex_unlock(&module_mutex);
70757 synchronize_sched();
70758 - kfree(mod->args);
70759 free_arch_cleanup:
70760 module_arch_cleanup(mod);
70761 free_modinfo:
70762 free_modinfo(mod);
70763 + kfree(mod->args);
70764 free_unload:
70765 module_unload_free(mod);
70766 free_module:
70767 @@ -3126,16 +3252,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70768 MODULE_STATE_COMING, mod);
70769
70770 /* Set RO and NX regions for core */
70771 - set_section_ro_nx(mod->module_core,
70772 - mod->core_text_size,
70773 - mod->core_ro_size,
70774 - mod->core_size);
70775 + set_section_ro_nx(mod->module_core_rx,
70776 + mod->core_size_rx,
70777 + mod->core_size_rx,
70778 + mod->core_size_rx);
70779
70780 /* Set RO and NX regions for init */
70781 - set_section_ro_nx(mod->module_init,
70782 - mod->init_text_size,
70783 - mod->init_ro_size,
70784 - mod->init_size);
70785 + set_section_ro_nx(mod->module_init_rx,
70786 + mod->init_size_rx,
70787 + mod->init_size_rx,
70788 + mod->init_size_rx);
70789
70790 do_mod_ctors(mod);
70791 /* Start the module */
70792 @@ -3180,11 +3306,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70793 mod->strtab = mod->core_strtab;
70794 #endif
70795 unset_module_init_ro_nx(mod);
70796 - module_free(mod, mod->module_init);
70797 - mod->module_init = NULL;
70798 - mod->init_size = 0;
70799 - mod->init_ro_size = 0;
70800 - mod->init_text_size = 0;
70801 + module_free(mod, mod->module_init_rw);
70802 + module_free_exec(mod, mod->module_init_rx);
70803 + mod->module_init_rw = NULL;
70804 + mod->module_init_rx = NULL;
70805 + mod->init_size_rw = 0;
70806 + mod->init_size_rx = 0;
70807 mutex_unlock(&module_mutex);
70808 wake_up_all(&module_wq);
70809
70810 @@ -3216,10 +3343,16 @@ static const char *get_ksymbol(struct module *mod,
70811 unsigned long nextval;
70812
70813 /* At worse, next value is at end of module */
70814 - if (within_module_init(addr, mod))
70815 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
70816 + if (within_module_init_rx(addr, mod))
70817 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
70818 + else if (within_module_init_rw(addr, mod))
70819 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
70820 + else if (within_module_core_rx(addr, mod))
70821 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
70822 + else if (within_module_core_rw(addr, mod))
70823 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
70824 else
70825 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
70826 + return NULL;
70827
70828 /* Scan for closest preceding symbol, and next symbol. (ELF
70829 starts real symbols at 1). */
70830 @@ -3454,7 +3587,7 @@ static int m_show(struct seq_file *m, void *p)
70831 char buf[8];
70832
70833 seq_printf(m, "%s %u",
70834 - mod->name, mod->init_size + mod->core_size);
70835 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
70836 print_unload_info(m, mod);
70837
70838 /* Informative for users. */
70839 @@ -3463,7 +3596,7 @@ static int m_show(struct seq_file *m, void *p)
70840 mod->state == MODULE_STATE_COMING ? "Loading":
70841 "Live");
70842 /* Used by oprofile and other similar tools. */
70843 - seq_printf(m, " 0x%pK", mod->module_core);
70844 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
70845
70846 /* Taints info */
70847 if (mod->taints)
70848 @@ -3499,7 +3632,17 @@ static const struct file_operations proc_modules_operations = {
70849
70850 static int __init proc_modules_init(void)
70851 {
70852 +#ifndef CONFIG_GRKERNSEC_HIDESYM
70853 +#ifdef CONFIG_GRKERNSEC_PROC_USER
70854 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70855 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70856 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
70857 +#else
70858 proc_create("modules", 0, NULL, &proc_modules_operations);
70859 +#endif
70860 +#else
70861 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70862 +#endif
70863 return 0;
70864 }
70865 module_init(proc_modules_init);
70866 @@ -3558,12 +3701,12 @@ struct module *__module_address(unsigned long addr)
70867 {
70868 struct module *mod;
70869
70870 - if (addr < module_addr_min || addr > module_addr_max)
70871 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
70872 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
70873 return NULL;
70874
70875 list_for_each_entry_rcu(mod, &modules, list)
70876 - if (within_module_core(addr, mod)
70877 - || within_module_init(addr, mod))
70878 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
70879 return mod;
70880 return NULL;
70881 }
70882 @@ -3597,11 +3740,20 @@ bool is_module_text_address(unsigned long addr)
70883 */
70884 struct module *__module_text_address(unsigned long addr)
70885 {
70886 - struct module *mod = __module_address(addr);
70887 + struct module *mod;
70888 +
70889 +#ifdef CONFIG_X86_32
70890 + addr = ktla_ktva(addr);
70891 +#endif
70892 +
70893 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
70894 + return NULL;
70895 +
70896 + mod = __module_address(addr);
70897 +
70898 if (mod) {
70899 /* Make sure it's within the text section. */
70900 - if (!within(addr, mod->module_init, mod->init_text_size)
70901 - && !within(addr, mod->module_core, mod->core_text_size))
70902 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
70903 mod = NULL;
70904 }
70905 return mod;
70906 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
70907 index 7e3443f..b2a1e6b 100644
70908 --- a/kernel/mutex-debug.c
70909 +++ b/kernel/mutex-debug.c
70910 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
70911 }
70912
70913 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70914 - struct thread_info *ti)
70915 + struct task_struct *task)
70916 {
70917 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
70918
70919 /* Mark the current thread as blocked on the lock: */
70920 - ti->task->blocked_on = waiter;
70921 + task->blocked_on = waiter;
70922 }
70923
70924 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70925 - struct thread_info *ti)
70926 + struct task_struct *task)
70927 {
70928 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
70929 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
70930 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
70931 - ti->task->blocked_on = NULL;
70932 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
70933 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
70934 + task->blocked_on = NULL;
70935
70936 list_del_init(&waiter->list);
70937 waiter->task = NULL;
70938 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
70939 index 0799fd3..d06ae3b 100644
70940 --- a/kernel/mutex-debug.h
70941 +++ b/kernel/mutex-debug.h
70942 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
70943 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
70944 extern void debug_mutex_add_waiter(struct mutex *lock,
70945 struct mutex_waiter *waiter,
70946 - struct thread_info *ti);
70947 + struct task_struct *task);
70948 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70949 - struct thread_info *ti);
70950 + struct task_struct *task);
70951 extern void debug_mutex_unlock(struct mutex *lock);
70952 extern void debug_mutex_init(struct mutex *lock, const char *name,
70953 struct lock_class_key *key);
70954 diff --git a/kernel/mutex.c b/kernel/mutex.c
70955 index a307cc9..27fd2e9 100644
70956 --- a/kernel/mutex.c
70957 +++ b/kernel/mutex.c
70958 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70959 spin_lock_mutex(&lock->wait_lock, flags);
70960
70961 debug_mutex_lock_common(lock, &waiter);
70962 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
70963 + debug_mutex_add_waiter(lock, &waiter, task);
70964
70965 /* add waiting tasks to the end of the waitqueue (FIFO): */
70966 list_add_tail(&waiter.list, &lock->wait_list);
70967 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70968 * TASK_UNINTERRUPTIBLE case.)
70969 */
70970 if (unlikely(signal_pending_state(state, task))) {
70971 - mutex_remove_waiter(lock, &waiter,
70972 - task_thread_info(task));
70973 + mutex_remove_waiter(lock, &waiter, task);
70974 mutex_release(&lock->dep_map, 1, ip);
70975 spin_unlock_mutex(&lock->wait_lock, flags);
70976
70977 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70978 done:
70979 lock_acquired(&lock->dep_map, ip);
70980 /* got the lock - rejoice! */
70981 - mutex_remove_waiter(lock, &waiter, current_thread_info());
70982 + mutex_remove_waiter(lock, &waiter, task);
70983 mutex_set_owner(lock);
70984
70985 /* set it to 0 if there are no waiters left: */
70986 diff --git a/kernel/notifier.c b/kernel/notifier.c
70987 index 2d5cc4c..d9ea600 100644
70988 --- a/kernel/notifier.c
70989 +++ b/kernel/notifier.c
70990 @@ -5,6 +5,7 @@
70991 #include <linux/rcupdate.h>
70992 #include <linux/vmalloc.h>
70993 #include <linux/reboot.h>
70994 +#include <linux/mm.h>
70995
70996 /*
70997 * Notifier list for kernel code which wants to be called
70998 @@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
70999 while ((*nl) != NULL) {
71000 if (n->priority > (*nl)->priority)
71001 break;
71002 - nl = &((*nl)->next);
71003 + nl = (struct notifier_block **)&((*nl)->next);
71004 }
71005 - n->next = *nl;
71006 + pax_open_kernel();
71007 + *(const void **)&n->next = *nl;
71008 rcu_assign_pointer(*nl, n);
71009 + pax_close_kernel();
71010 return 0;
71011 }
71012
71013 @@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
71014 return 0;
71015 if (n->priority > (*nl)->priority)
71016 break;
71017 - nl = &((*nl)->next);
71018 + nl = (struct notifier_block **)&((*nl)->next);
71019 }
71020 - n->next = *nl;
71021 + pax_open_kernel();
71022 + *(const void **)&n->next = *nl;
71023 rcu_assign_pointer(*nl, n);
71024 + pax_close_kernel();
71025 return 0;
71026 }
71027
71028 @@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
71029 {
71030 while ((*nl) != NULL) {
71031 if ((*nl) == n) {
71032 + pax_open_kernel();
71033 rcu_assign_pointer(*nl, n->next);
71034 + pax_close_kernel();
71035 return 0;
71036 }
71037 - nl = &((*nl)->next);
71038 + nl = (struct notifier_block **)&((*nl)->next);
71039 }
71040 return -ENOENT;
71041 }
71042 diff --git a/kernel/panic.c b/kernel/panic.c
71043 index e1b2822..5edc1d9 100644
71044 --- a/kernel/panic.c
71045 +++ b/kernel/panic.c
71046 @@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
71047 const char *board;
71048
71049 printk(KERN_WARNING "------------[ cut here ]------------\n");
71050 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
71051 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
71052 board = dmi_get_system_info(DMI_PRODUCT_NAME);
71053 if (board)
71054 printk(KERN_WARNING "Hardware name: %s\n", board);
71055 @@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
71056 */
71057 void __stack_chk_fail(void)
71058 {
71059 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
71060 + dump_stack();
71061 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
71062 __builtin_return_address(0));
71063 }
71064 EXPORT_SYMBOL(__stack_chk_fail);
71065 diff --git a/kernel/pid.c b/kernel/pid.c
71066 index aebd4f5..1693c13 100644
71067 --- a/kernel/pid.c
71068 +++ b/kernel/pid.c
71069 @@ -33,6 +33,7 @@
71070 #include <linux/rculist.h>
71071 #include <linux/bootmem.h>
71072 #include <linux/hash.h>
71073 +#include <linux/security.h>
71074 #include <linux/pid_namespace.h>
71075 #include <linux/init_task.h>
71076 #include <linux/syscalls.h>
71077 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
71078
71079 int pid_max = PID_MAX_DEFAULT;
71080
71081 -#define RESERVED_PIDS 300
71082 +#define RESERVED_PIDS 500
71083
71084 int pid_max_min = RESERVED_PIDS + 1;
71085 int pid_max_max = PID_MAX_LIMIT;
71086 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
71087 */
71088 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
71089 {
71090 + struct task_struct *task;
71091 +
71092 rcu_lockdep_assert(rcu_read_lock_held(),
71093 "find_task_by_pid_ns() needs rcu_read_lock()"
71094 " protection");
71095 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
71096 +
71097 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
71098 +
71099 + if (gr_pid_is_chrooted(task))
71100 + return NULL;
71101 +
71102 + return task;
71103 }
71104
71105 struct task_struct *find_task_by_vpid(pid_t vnr)
71106 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
71107 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
71108 }
71109
71110 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
71111 +{
71112 + rcu_lockdep_assert(rcu_read_lock_held(),
71113 + "find_task_by_pid_ns() needs rcu_read_lock()"
71114 + " protection");
71115 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
71116 +}
71117 +
71118 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
71119 {
71120 struct pid *pid;
71121 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
71122 index 125cb67..2e5c8ad 100644
71123 --- a/kernel/posix-cpu-timers.c
71124 +++ b/kernel/posix-cpu-timers.c
71125 @@ -6,9 +6,11 @@
71126 #include <linux/posix-timers.h>
71127 #include <linux/errno.h>
71128 #include <linux/math64.h>
71129 +#include <linux/security.h>
71130 #include <asm/uaccess.h>
71131 #include <linux/kernel_stat.h>
71132 #include <trace/events/timer.h>
71133 +#include <linux/random.h>
71134
71135 /*
71136 * Called after updating RLIMIT_CPU to run cpu timer and update
71137 @@ -494,6 +496,8 @@ static void cleanup_timers(struct list_head *head,
71138 */
71139 void posix_cpu_timers_exit(struct task_struct *tsk)
71140 {
71141 + add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
71142 + sizeof(unsigned long long));
71143 cleanup_timers(tsk->cpu_timers,
71144 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
71145
71146 @@ -1578,14 +1582,14 @@ struct k_clock clock_posix_cpu = {
71147
71148 static __init int init_posix_cpu_timers(void)
71149 {
71150 - struct k_clock process = {
71151 + static struct k_clock process = {
71152 .clock_getres = process_cpu_clock_getres,
71153 .clock_get = process_cpu_clock_get,
71154 .timer_create = process_cpu_timer_create,
71155 .nsleep = process_cpu_nsleep,
71156 .nsleep_restart = process_cpu_nsleep_restart,
71157 };
71158 - struct k_clock thread = {
71159 + static struct k_clock thread = {
71160 .clock_getres = thread_cpu_clock_getres,
71161 .clock_get = thread_cpu_clock_get,
71162 .timer_create = thread_cpu_timer_create,
71163 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
71164 index 69185ae..cc2847a 100644
71165 --- a/kernel/posix-timers.c
71166 +++ b/kernel/posix-timers.c
71167 @@ -43,6 +43,7 @@
71168 #include <linux/idr.h>
71169 #include <linux/posix-clock.h>
71170 #include <linux/posix-timers.h>
71171 +#include <linux/grsecurity.h>
71172 #include <linux/syscalls.h>
71173 #include <linux/wait.h>
71174 #include <linux/workqueue.h>
71175 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
71176 * which we beg off on and pass to do_sys_settimeofday().
71177 */
71178
71179 -static struct k_clock posix_clocks[MAX_CLOCKS];
71180 +static struct k_clock *posix_clocks[MAX_CLOCKS];
71181
71182 /*
71183 * These ones are defined below.
71184 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
71185 */
71186 static __init int init_posix_timers(void)
71187 {
71188 - struct k_clock clock_realtime = {
71189 + static struct k_clock clock_realtime = {
71190 .clock_getres = hrtimer_get_res,
71191 .clock_get = posix_clock_realtime_get,
71192 .clock_set = posix_clock_realtime_set,
71193 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
71194 .timer_get = common_timer_get,
71195 .timer_del = common_timer_del,
71196 };
71197 - struct k_clock clock_monotonic = {
71198 + static struct k_clock clock_monotonic = {
71199 .clock_getres = hrtimer_get_res,
71200 .clock_get = posix_ktime_get_ts,
71201 .nsleep = common_nsleep,
71202 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
71203 .timer_get = common_timer_get,
71204 .timer_del = common_timer_del,
71205 };
71206 - struct k_clock clock_monotonic_raw = {
71207 + static struct k_clock clock_monotonic_raw = {
71208 .clock_getres = hrtimer_get_res,
71209 .clock_get = posix_get_monotonic_raw,
71210 };
71211 - struct k_clock clock_realtime_coarse = {
71212 + static struct k_clock clock_realtime_coarse = {
71213 .clock_getres = posix_get_coarse_res,
71214 .clock_get = posix_get_realtime_coarse,
71215 };
71216 - struct k_clock clock_monotonic_coarse = {
71217 + static struct k_clock clock_monotonic_coarse = {
71218 .clock_getres = posix_get_coarse_res,
71219 .clock_get = posix_get_monotonic_coarse,
71220 };
71221 - struct k_clock clock_boottime = {
71222 + static struct k_clock clock_boottime = {
71223 .clock_getres = hrtimer_get_res,
71224 .clock_get = posix_get_boottime,
71225 .nsleep = common_nsleep,
71226 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
71227 return;
71228 }
71229
71230 - posix_clocks[clock_id] = *new_clock;
71231 + posix_clocks[clock_id] = new_clock;
71232 }
71233 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
71234
71235 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
71236 return (id & CLOCKFD_MASK) == CLOCKFD ?
71237 &clock_posix_dynamic : &clock_posix_cpu;
71238
71239 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
71240 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
71241 return NULL;
71242 - return &posix_clocks[id];
71243 + return posix_clocks[id];
71244 }
71245
71246 static int common_timer_create(struct k_itimer *new_timer)
71247 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
71248 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
71249 return -EFAULT;
71250
71251 + /* only the CLOCK_REALTIME clock can be set, all other clocks
71252 + have their clock_set fptr set to a nosettime dummy function
71253 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
71254 + call common_clock_set, which calls do_sys_settimeofday, which
71255 + we hook
71256 + */
71257 +
71258 return kc->clock_set(which_clock, &new_tp);
71259 }
71260
71261 diff --git a/kernel/power/process.c b/kernel/power/process.c
71262 index 87da817..30ddd13 100644
71263 --- a/kernel/power/process.c
71264 +++ b/kernel/power/process.c
71265 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
71266 u64 elapsed_csecs64;
71267 unsigned int elapsed_csecs;
71268 bool wakeup = false;
71269 + bool timedout = false;
71270
71271 do_gettimeofday(&start);
71272
71273 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
71274
71275 while (true) {
71276 todo = 0;
71277 + if (time_after(jiffies, end_time))
71278 + timedout = true;
71279 read_lock(&tasklist_lock);
71280 do_each_thread(g, p) {
71281 if (p == current || !freeze_task(p))
71282 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
71283 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
71284 * transition can't race with task state testing here.
71285 */
71286 - if (!task_is_stopped_or_traced(p) &&
71287 - !freezer_should_skip(p))
71288 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
71289 todo++;
71290 + if (timedout) {
71291 + printk(KERN_ERR "Task refusing to freeze:\n");
71292 + sched_show_task(p);
71293 + }
71294 + }
71295 } while_each_thread(g, p);
71296 read_unlock(&tasklist_lock);
71297
71298 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
71299 todo += wq_busy;
71300 }
71301
71302 - if (!todo || time_after(jiffies, end_time))
71303 + if (!todo || timedout)
71304 break;
71305
71306 if (pm_wakeup_pending()) {
71307 diff --git a/kernel/printk.c b/kernel/printk.c
71308 index 2d607f4..7413773 100644
71309 --- a/kernel/printk.c
71310 +++ b/kernel/printk.c
71311 @@ -817,6 +817,11 @@ static int check_syslog_permissions(int type, bool from_file)
71312 if (from_file && type != SYSLOG_ACTION_OPEN)
71313 return 0;
71314
71315 +#ifdef CONFIG_GRKERNSEC_DMESG
71316 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
71317 + return -EPERM;
71318 +#endif
71319 +
71320 if (syslog_action_restricted(type)) {
71321 if (capable(CAP_SYSLOG))
71322 return 0;
71323 diff --git a/kernel/profile.c b/kernel/profile.c
71324 index 76b8e77..a2930e8 100644
71325 --- a/kernel/profile.c
71326 +++ b/kernel/profile.c
71327 @@ -39,7 +39,7 @@ struct profile_hit {
71328 /* Oprofile timer tick hook */
71329 static int (*timer_hook)(struct pt_regs *) __read_mostly;
71330
71331 -static atomic_t *prof_buffer;
71332 +static atomic_unchecked_t *prof_buffer;
71333 static unsigned long prof_len, prof_shift;
71334
71335 int prof_on __read_mostly;
71336 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
71337 hits[i].pc = 0;
71338 continue;
71339 }
71340 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
71341 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
71342 hits[i].hits = hits[i].pc = 0;
71343 }
71344 }
71345 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
71346 * Add the current hit(s) and flush the write-queue out
71347 * to the global buffer:
71348 */
71349 - atomic_add(nr_hits, &prof_buffer[pc]);
71350 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
71351 for (i = 0; i < NR_PROFILE_HIT; ++i) {
71352 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
71353 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
71354 hits[i].pc = hits[i].hits = 0;
71355 }
71356 out:
71357 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
71358 {
71359 unsigned long pc;
71360 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
71361 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
71362 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
71363 }
71364 #endif /* !CONFIG_SMP */
71365
71366 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
71367 return -EFAULT;
71368 buf++; p++; count--; read++;
71369 }
71370 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
71371 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
71372 if (copy_to_user(buf, (void *)pnt, count))
71373 return -EFAULT;
71374 read += count;
71375 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
71376 }
71377 #endif
71378 profile_discard_flip_buffers();
71379 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
71380 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
71381 return count;
71382 }
71383
71384 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
71385 index 1f5e55d..8b8f969 100644
71386 --- a/kernel/ptrace.c
71387 +++ b/kernel/ptrace.c
71388 @@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
71389
71390 if (seize)
71391 flags |= PT_SEIZED;
71392 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
71393 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
71394 flags |= PT_PTRACE_CAP;
71395 task->ptrace = flags;
71396
71397 @@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
71398 break;
71399 return -EIO;
71400 }
71401 - if (copy_to_user(dst, buf, retval))
71402 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
71403 return -EFAULT;
71404 copied += retval;
71405 src += retval;
71406 @@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
71407 bool seized = child->ptrace & PT_SEIZED;
71408 int ret = -EIO;
71409 siginfo_t siginfo, *si;
71410 - void __user *datavp = (void __user *) data;
71411 + void __user *datavp = (__force void __user *) data;
71412 unsigned long __user *datalp = datavp;
71413 unsigned long flags;
71414
71415 @@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
71416 goto out;
71417 }
71418
71419 + if (gr_handle_ptrace(child, request)) {
71420 + ret = -EPERM;
71421 + goto out_put_task_struct;
71422 + }
71423 +
71424 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71425 ret = ptrace_attach(child, request, addr, data);
71426 /*
71427 * Some architectures need to do book-keeping after
71428 * a ptrace attach.
71429 */
71430 - if (!ret)
71431 + if (!ret) {
71432 arch_ptrace_attach(child);
71433 + gr_audit_ptrace(child);
71434 + }
71435 goto out_put_task_struct;
71436 }
71437
71438 @@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
71439 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
71440 if (copied != sizeof(tmp))
71441 return -EIO;
71442 - return put_user(tmp, (unsigned long __user *)data);
71443 + return put_user(tmp, (__force unsigned long __user *)data);
71444 }
71445
71446 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
71447 @@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
71448 goto out;
71449 }
71450
71451 + if (gr_handle_ptrace(child, request)) {
71452 + ret = -EPERM;
71453 + goto out_put_task_struct;
71454 + }
71455 +
71456 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71457 ret = ptrace_attach(child, request, addr, data);
71458 /*
71459 * Some architectures need to do book-keeping after
71460 * a ptrace attach.
71461 */
71462 - if (!ret)
71463 + if (!ret) {
71464 arch_ptrace_attach(child);
71465 + gr_audit_ptrace(child);
71466 + }
71467 goto out_put_task_struct;
71468 }
71469
71470 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
71471 index e4c6a59..c86621a 100644
71472 --- a/kernel/rcutiny.c
71473 +++ b/kernel/rcutiny.c
71474 @@ -46,7 +46,7 @@
71475 struct rcu_ctrlblk;
71476 static void invoke_rcu_callbacks(void);
71477 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
71478 -static void rcu_process_callbacks(struct softirq_action *unused);
71479 +static void rcu_process_callbacks(void);
71480 static void __call_rcu(struct rcu_head *head,
71481 void (*func)(struct rcu_head *rcu),
71482 struct rcu_ctrlblk *rcp);
71483 @@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
71484 rcu_is_callbacks_kthread()));
71485 }
71486
71487 -static void rcu_process_callbacks(struct softirq_action *unused)
71488 +static void rcu_process_callbacks(void)
71489 {
71490 __rcu_process_callbacks(&rcu_sched_ctrlblk);
71491 __rcu_process_callbacks(&rcu_bh_ctrlblk);
71492 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
71493 index 3d01902..afbf46e 100644
71494 --- a/kernel/rcutiny_plugin.h
71495 +++ b/kernel/rcutiny_plugin.h
71496 @@ -893,7 +893,7 @@ static int rcu_kthread(void *arg)
71497 have_rcu_kthread_work = morework;
71498 local_irq_restore(flags);
71499 if (work)
71500 - rcu_process_callbacks(NULL);
71501 + rcu_process_callbacks();
71502 schedule_timeout_interruptible(1); /* Leave CPU for others. */
71503 }
71504
71505 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
71506 index aaa7b9f..055ff1e 100644
71507 --- a/kernel/rcutorture.c
71508 +++ b/kernel/rcutorture.c
71509 @@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
71510 { 0 };
71511 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
71512 { 0 };
71513 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71514 -static atomic_t n_rcu_torture_alloc;
71515 -static atomic_t n_rcu_torture_alloc_fail;
71516 -static atomic_t n_rcu_torture_free;
71517 -static atomic_t n_rcu_torture_mberror;
71518 -static atomic_t n_rcu_torture_error;
71519 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71520 +static atomic_unchecked_t n_rcu_torture_alloc;
71521 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
71522 +static atomic_unchecked_t n_rcu_torture_free;
71523 +static atomic_unchecked_t n_rcu_torture_mberror;
71524 +static atomic_unchecked_t n_rcu_torture_error;
71525 static long n_rcu_torture_barrier_error;
71526 static long n_rcu_torture_boost_ktrerror;
71527 static long n_rcu_torture_boost_rterror;
71528 @@ -272,11 +272,11 @@ rcu_torture_alloc(void)
71529
71530 spin_lock_bh(&rcu_torture_lock);
71531 if (list_empty(&rcu_torture_freelist)) {
71532 - atomic_inc(&n_rcu_torture_alloc_fail);
71533 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
71534 spin_unlock_bh(&rcu_torture_lock);
71535 return NULL;
71536 }
71537 - atomic_inc(&n_rcu_torture_alloc);
71538 + atomic_inc_unchecked(&n_rcu_torture_alloc);
71539 p = rcu_torture_freelist.next;
71540 list_del_init(p);
71541 spin_unlock_bh(&rcu_torture_lock);
71542 @@ -289,7 +289,7 @@ rcu_torture_alloc(void)
71543 static void
71544 rcu_torture_free(struct rcu_torture *p)
71545 {
71546 - atomic_inc(&n_rcu_torture_free);
71547 + atomic_inc_unchecked(&n_rcu_torture_free);
71548 spin_lock_bh(&rcu_torture_lock);
71549 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
71550 spin_unlock_bh(&rcu_torture_lock);
71551 @@ -410,7 +410,7 @@ rcu_torture_cb(struct rcu_head *p)
71552 i = rp->rtort_pipe_count;
71553 if (i > RCU_TORTURE_PIPE_LEN)
71554 i = RCU_TORTURE_PIPE_LEN;
71555 - atomic_inc(&rcu_torture_wcount[i]);
71556 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
71557 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71558 rp->rtort_mbtest = 0;
71559 rcu_torture_free(rp);
71560 @@ -459,7 +459,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
71561 i = rp->rtort_pipe_count;
71562 if (i > RCU_TORTURE_PIPE_LEN)
71563 i = RCU_TORTURE_PIPE_LEN;
71564 - atomic_inc(&rcu_torture_wcount[i]);
71565 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
71566 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71567 rp->rtort_mbtest = 0;
71568 list_del(&rp->rtort_free);
71569 @@ -1002,7 +1002,7 @@ rcu_torture_writer(void *arg)
71570 i = old_rp->rtort_pipe_count;
71571 if (i > RCU_TORTURE_PIPE_LEN)
71572 i = RCU_TORTURE_PIPE_LEN;
71573 - atomic_inc(&rcu_torture_wcount[i]);
71574 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
71575 old_rp->rtort_pipe_count++;
71576 cur_ops->deferred_free(old_rp);
71577 }
71578 @@ -1087,7 +1087,7 @@ static void rcu_torture_timer(unsigned long unused)
71579 }
71580 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71581 if (p->rtort_mbtest == 0)
71582 - atomic_inc(&n_rcu_torture_mberror);
71583 + atomic_inc_unchecked(&n_rcu_torture_mberror);
71584 spin_lock(&rand_lock);
71585 cur_ops->read_delay(&rand);
71586 n_rcu_torture_timers++;
71587 @@ -1151,7 +1151,7 @@ rcu_torture_reader(void *arg)
71588 }
71589 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71590 if (p->rtort_mbtest == 0)
71591 - atomic_inc(&n_rcu_torture_mberror);
71592 + atomic_inc_unchecked(&n_rcu_torture_mberror);
71593 cur_ops->read_delay(&rand);
71594 preempt_disable();
71595 pipe_count = p->rtort_pipe_count;
71596 @@ -1210,11 +1210,11 @@ rcu_torture_printk(char *page)
71597 rcu_torture_current,
71598 rcu_torture_current_version,
71599 list_empty(&rcu_torture_freelist),
71600 - atomic_read(&n_rcu_torture_alloc),
71601 - atomic_read(&n_rcu_torture_alloc_fail),
71602 - atomic_read(&n_rcu_torture_free));
71603 + atomic_read_unchecked(&n_rcu_torture_alloc),
71604 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
71605 + atomic_read_unchecked(&n_rcu_torture_free));
71606 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
71607 - atomic_read(&n_rcu_torture_mberror),
71608 + atomic_read_unchecked(&n_rcu_torture_mberror),
71609 n_rcu_torture_boost_ktrerror,
71610 n_rcu_torture_boost_rterror);
71611 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
71612 @@ -1233,14 +1233,14 @@ rcu_torture_printk(char *page)
71613 n_barrier_attempts,
71614 n_rcu_torture_barrier_error);
71615 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
71616 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
71617 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
71618 n_rcu_torture_barrier_error != 0 ||
71619 n_rcu_torture_boost_ktrerror != 0 ||
71620 n_rcu_torture_boost_rterror != 0 ||
71621 n_rcu_torture_boost_failure != 0 ||
71622 i > 1) {
71623 cnt += sprintf(&page[cnt], "!!! ");
71624 - atomic_inc(&n_rcu_torture_error);
71625 + atomic_inc_unchecked(&n_rcu_torture_error);
71626 WARN_ON_ONCE(1);
71627 }
71628 cnt += sprintf(&page[cnt], "Reader Pipe: ");
71629 @@ -1254,7 +1254,7 @@ rcu_torture_printk(char *page)
71630 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
71631 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71632 cnt += sprintf(&page[cnt], " %d",
71633 - atomic_read(&rcu_torture_wcount[i]));
71634 + atomic_read_unchecked(&rcu_torture_wcount[i]));
71635 }
71636 cnt += sprintf(&page[cnt], "\n");
71637 if (cur_ops->stats)
71638 @@ -1938,7 +1938,7 @@ rcu_torture_cleanup(void)
71639
71640 if (cur_ops->cleanup)
71641 cur_ops->cleanup();
71642 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71643 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71644 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
71645 else if (n_online_successes != n_online_attempts ||
71646 n_offline_successes != n_offline_attempts)
71647 @@ -2007,18 +2007,18 @@ rcu_torture_init(void)
71648
71649 rcu_torture_current = NULL;
71650 rcu_torture_current_version = 0;
71651 - atomic_set(&n_rcu_torture_alloc, 0);
71652 - atomic_set(&n_rcu_torture_alloc_fail, 0);
71653 - atomic_set(&n_rcu_torture_free, 0);
71654 - atomic_set(&n_rcu_torture_mberror, 0);
71655 - atomic_set(&n_rcu_torture_error, 0);
71656 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
71657 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
71658 + atomic_set_unchecked(&n_rcu_torture_free, 0);
71659 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
71660 + atomic_set_unchecked(&n_rcu_torture_error, 0);
71661 n_rcu_torture_barrier_error = 0;
71662 n_rcu_torture_boost_ktrerror = 0;
71663 n_rcu_torture_boost_rterror = 0;
71664 n_rcu_torture_boost_failure = 0;
71665 n_rcu_torture_boosts = 0;
71666 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
71667 - atomic_set(&rcu_torture_wcount[i], 0);
71668 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
71669 for_each_possible_cpu(cpu) {
71670 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71671 per_cpu(rcu_torture_count, cpu)[i] = 0;
71672 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
71673 index 2682295..0f2297e 100644
71674 --- a/kernel/rcutree.c
71675 +++ b/kernel/rcutree.c
71676 @@ -348,9 +348,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
71677 rcu_prepare_for_idle(smp_processor_id());
71678 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71679 smp_mb__before_atomic_inc(); /* See above. */
71680 - atomic_inc(&rdtp->dynticks);
71681 + atomic_inc_unchecked(&rdtp->dynticks);
71682 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
71683 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71684 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71685
71686 /*
71687 * It is illegal to enter an extended quiescent state while
71688 @@ -508,10 +508,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
71689 int user)
71690 {
71691 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
71692 - atomic_inc(&rdtp->dynticks);
71693 + atomic_inc_unchecked(&rdtp->dynticks);
71694 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71695 smp_mb__after_atomic_inc(); /* See above. */
71696 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71697 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71698 rcu_cleanup_after_idle(smp_processor_id());
71699 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
71700 if (!user && !is_idle_task(current)) {
71701 @@ -670,14 +670,14 @@ void rcu_nmi_enter(void)
71702 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
71703
71704 if (rdtp->dynticks_nmi_nesting == 0 &&
71705 - (atomic_read(&rdtp->dynticks) & 0x1))
71706 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
71707 return;
71708 rdtp->dynticks_nmi_nesting++;
71709 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
71710 - atomic_inc(&rdtp->dynticks);
71711 + atomic_inc_unchecked(&rdtp->dynticks);
71712 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71713 smp_mb__after_atomic_inc(); /* See above. */
71714 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71715 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71716 }
71717
71718 /**
71719 @@ -696,9 +696,9 @@ void rcu_nmi_exit(void)
71720 return;
71721 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71722 smp_mb__before_atomic_inc(); /* See above. */
71723 - atomic_inc(&rdtp->dynticks);
71724 + atomic_inc_unchecked(&rdtp->dynticks);
71725 smp_mb__after_atomic_inc(); /* Force delay to next write. */
71726 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71727 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71728 }
71729
71730 /**
71731 @@ -712,7 +712,7 @@ int rcu_is_cpu_idle(void)
71732 int ret;
71733
71734 preempt_disable();
71735 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71736 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71737 preempt_enable();
71738 return ret;
71739 }
71740 @@ -795,7 +795,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
71741 */
71742 static int dyntick_save_progress_counter(struct rcu_data *rdp)
71743 {
71744 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
71745 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71746 return (rdp->dynticks_snap & 0x1) == 0;
71747 }
71748
71749 @@ -810,7 +810,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
71750 unsigned int curr;
71751 unsigned int snap;
71752
71753 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
71754 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71755 snap = (unsigned int)rdp->dynticks_snap;
71756
71757 /*
71758 @@ -858,10 +858,10 @@ static int jiffies_till_stall_check(void)
71759 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
71760 */
71761 if (till_stall_check < 3) {
71762 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
71763 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
71764 till_stall_check = 3;
71765 } else if (till_stall_check > 300) {
71766 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
71767 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
71768 till_stall_check = 300;
71769 }
71770 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
71771 @@ -1589,7 +1589,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
71772 rsp->qlen += rdp->qlen;
71773 rdp->n_cbs_orphaned += rdp->qlen;
71774 rdp->qlen_lazy = 0;
71775 - ACCESS_ONCE(rdp->qlen) = 0;
71776 + ACCESS_ONCE_RW(rdp->qlen) = 0;
71777 }
71778
71779 /*
71780 @@ -1831,7 +1831,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
71781 }
71782 smp_mb(); /* List handling before counting for rcu_barrier(). */
71783 rdp->qlen_lazy -= count_lazy;
71784 - ACCESS_ONCE(rdp->qlen) -= count;
71785 + ACCESS_ONCE_RW(rdp->qlen) -= count;
71786 rdp->n_cbs_invoked += count;
71787
71788 /* Reinstate batch limit if we have worked down the excess. */
71789 @@ -2024,7 +2024,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
71790 /*
71791 * Do RCU core processing for the current CPU.
71792 */
71793 -static void rcu_process_callbacks(struct softirq_action *unused)
71794 +static void rcu_process_callbacks(void)
71795 {
71796 struct rcu_state *rsp;
71797
71798 @@ -2136,7 +2136,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
71799 local_irq_restore(flags);
71800 return;
71801 }
71802 - ACCESS_ONCE(rdp->qlen)++;
71803 + ACCESS_ONCE_RW(rdp->qlen)++;
71804 if (lazy)
71805 rdp->qlen_lazy++;
71806 else
71807 @@ -2250,8 +2250,8 @@ void synchronize_rcu_bh(void)
71808 }
71809 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
71810
71811 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
71812 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
71813 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
71814 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
71815
71816 static int synchronize_sched_expedited_cpu_stop(void *data)
71817 {
71818 @@ -2312,7 +2312,7 @@ void synchronize_sched_expedited(void)
71819 int firstsnap, s, snap, trycount = 0;
71820
71821 /* Note that atomic_inc_return() implies full memory barrier. */
71822 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
71823 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
71824 get_online_cpus();
71825 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
71826
71827 @@ -2334,7 +2334,7 @@ void synchronize_sched_expedited(void)
71828 }
71829
71830 /* Check to see if someone else did our work for us. */
71831 - s = atomic_read(&sync_sched_expedited_done);
71832 + s = atomic_read_unchecked(&sync_sched_expedited_done);
71833 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
71834 smp_mb(); /* ensure test happens before caller kfree */
71835 return;
71836 @@ -2349,7 +2349,7 @@ void synchronize_sched_expedited(void)
71837 * grace period works for us.
71838 */
71839 get_online_cpus();
71840 - snap = atomic_read(&sync_sched_expedited_started);
71841 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
71842 smp_mb(); /* ensure read is before try_stop_cpus(). */
71843 }
71844
71845 @@ -2360,12 +2360,12 @@ void synchronize_sched_expedited(void)
71846 * than we did beat us to the punch.
71847 */
71848 do {
71849 - s = atomic_read(&sync_sched_expedited_done);
71850 + s = atomic_read_unchecked(&sync_sched_expedited_done);
71851 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
71852 smp_mb(); /* ensure test happens before caller kfree */
71853 break;
71854 }
71855 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
71856 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
71857
71858 put_online_cpus();
71859 }
71860 @@ -2539,7 +2539,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71861 * ACCESS_ONCE() to prevent the compiler from speculating
71862 * the increment to precede the early-exit check.
71863 */
71864 - ACCESS_ONCE(rsp->n_barrier_done)++;
71865 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71866 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
71867 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
71868 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
71869 @@ -2581,7 +2581,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71870
71871 /* Increment ->n_barrier_done to prevent duplicate work. */
71872 smp_mb(); /* Keep increment after above mechanism. */
71873 - ACCESS_ONCE(rsp->n_barrier_done)++;
71874 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71875 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
71876 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
71877 smp_mb(); /* Keep increment before caller's subsequent code. */
71878 @@ -2626,10 +2626,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
71879 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
71880 init_callback_list(rdp);
71881 rdp->qlen_lazy = 0;
71882 - ACCESS_ONCE(rdp->qlen) = 0;
71883 + ACCESS_ONCE_RW(rdp->qlen) = 0;
71884 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
71885 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
71886 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
71887 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
71888 #ifdef CONFIG_RCU_USER_QS
71889 WARN_ON_ONCE(rdp->dynticks->in_user);
71890 #endif
71891 @@ -2664,8 +2664,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
71892 rdp->blimit = blimit;
71893 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
71894 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
71895 - atomic_set(&rdp->dynticks->dynticks,
71896 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
71897 + atomic_set_unchecked(&rdp->dynticks->dynticks,
71898 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
71899 rcu_prepare_for_idle_init(cpu);
71900 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
71901
71902 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
71903 index a240f03..d469618 100644
71904 --- a/kernel/rcutree.h
71905 +++ b/kernel/rcutree.h
71906 @@ -86,7 +86,7 @@ struct rcu_dynticks {
71907 long long dynticks_nesting; /* Track irq/process nesting level. */
71908 /* Process level is worth LLONG_MAX/2. */
71909 int dynticks_nmi_nesting; /* Track NMI nesting level. */
71910 - atomic_t dynticks; /* Even value for idle, else odd. */
71911 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
71912 #ifdef CONFIG_RCU_FAST_NO_HZ
71913 int dyntick_drain; /* Prepare-for-idle state variable. */
71914 unsigned long dyntick_holdoff;
71915 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
71916 index f921154..34c4873 100644
71917 --- a/kernel/rcutree_plugin.h
71918 +++ b/kernel/rcutree_plugin.h
71919 @@ -865,7 +865,7 @@ void synchronize_rcu_expedited(void)
71920
71921 /* Clean up and exit. */
71922 smp_mb(); /* ensure expedited GP seen before counter increment. */
71923 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
71924 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
71925 unlock_mb_ret:
71926 mutex_unlock(&sync_rcu_preempt_exp_mutex);
71927 mb_ret:
71928 @@ -2040,7 +2040,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
71929 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
71930 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
71931 cpu, ticks_value, ticks_title,
71932 - atomic_read(&rdtp->dynticks) & 0xfff,
71933 + atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
71934 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
71935 fast_no_hz);
71936 }
71937 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
71938 index 693513b..b9f1d63 100644
71939 --- a/kernel/rcutree_trace.c
71940 +++ b/kernel/rcutree_trace.c
71941 @@ -92,7 +92,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
71942 rdp->completed, rdp->gpnum,
71943 rdp->passed_quiesce, rdp->qs_pending);
71944 seq_printf(m, " dt=%d/%llx/%d df=%lu",
71945 - atomic_read(&rdp->dynticks->dynticks),
71946 + atomic_read_unchecked(&rdp->dynticks->dynticks),
71947 rdp->dynticks->dynticks_nesting,
71948 rdp->dynticks->dynticks_nmi_nesting,
71949 rdp->dynticks_fqs);
71950 @@ -154,7 +154,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
71951 rdp->completed, rdp->gpnum,
71952 rdp->passed_quiesce, rdp->qs_pending);
71953 seq_printf(m, ",%d,%llx,%d,%lu",
71954 - atomic_read(&rdp->dynticks->dynticks),
71955 + atomic_read_unchecked(&rdp->dynticks->dynticks),
71956 rdp->dynticks->dynticks_nesting,
71957 rdp->dynticks->dynticks_nmi_nesting,
71958 rdp->dynticks_fqs);
71959 diff --git a/kernel/resource.c b/kernel/resource.c
71960 index 73f35d4..4684fc4 100644
71961 --- a/kernel/resource.c
71962 +++ b/kernel/resource.c
71963 @@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
71964
71965 static int __init ioresources_init(void)
71966 {
71967 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71968 +#ifdef CONFIG_GRKERNSEC_PROC_USER
71969 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
71970 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
71971 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71972 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
71973 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
71974 +#endif
71975 +#else
71976 proc_create("ioports", 0, NULL, &proc_ioports_operations);
71977 proc_create("iomem", 0, NULL, &proc_iomem_operations);
71978 +#endif
71979 return 0;
71980 }
71981 __initcall(ioresources_init);
71982 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
71983 index 98ec494..4241d6d 100644
71984 --- a/kernel/rtmutex-tester.c
71985 +++ b/kernel/rtmutex-tester.c
71986 @@ -20,7 +20,7 @@
71987 #define MAX_RT_TEST_MUTEXES 8
71988
71989 static spinlock_t rttest_lock;
71990 -static atomic_t rttest_event;
71991 +static atomic_unchecked_t rttest_event;
71992
71993 struct test_thread_data {
71994 int opcode;
71995 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71996
71997 case RTTEST_LOCKCONT:
71998 td->mutexes[td->opdata] = 1;
71999 - td->event = atomic_add_return(1, &rttest_event);
72000 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72001 return 0;
72002
72003 case RTTEST_RESET:
72004 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
72005 return 0;
72006
72007 case RTTEST_RESETEVENT:
72008 - atomic_set(&rttest_event, 0);
72009 + atomic_set_unchecked(&rttest_event, 0);
72010 return 0;
72011
72012 default:
72013 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
72014 return ret;
72015
72016 td->mutexes[id] = 1;
72017 - td->event = atomic_add_return(1, &rttest_event);
72018 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72019 rt_mutex_lock(&mutexes[id]);
72020 - td->event = atomic_add_return(1, &rttest_event);
72021 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72022 td->mutexes[id] = 4;
72023 return 0;
72024
72025 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
72026 return ret;
72027
72028 td->mutexes[id] = 1;
72029 - td->event = atomic_add_return(1, &rttest_event);
72030 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72031 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
72032 - td->event = atomic_add_return(1, &rttest_event);
72033 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72034 td->mutexes[id] = ret ? 0 : 4;
72035 return ret ? -EINTR : 0;
72036
72037 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
72038 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
72039 return ret;
72040
72041 - td->event = atomic_add_return(1, &rttest_event);
72042 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72043 rt_mutex_unlock(&mutexes[id]);
72044 - td->event = atomic_add_return(1, &rttest_event);
72045 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72046 td->mutexes[id] = 0;
72047 return 0;
72048
72049 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
72050 break;
72051
72052 td->mutexes[dat] = 2;
72053 - td->event = atomic_add_return(1, &rttest_event);
72054 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72055 break;
72056
72057 default:
72058 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
72059 return;
72060
72061 td->mutexes[dat] = 3;
72062 - td->event = atomic_add_return(1, &rttest_event);
72063 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72064 break;
72065
72066 case RTTEST_LOCKNOWAIT:
72067 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
72068 return;
72069
72070 td->mutexes[dat] = 1;
72071 - td->event = atomic_add_return(1, &rttest_event);
72072 + td->event = atomic_add_return_unchecked(1, &rttest_event);
72073 return;
72074
72075 default:
72076 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
72077 index 15f60d0..7e50319 100644
72078 --- a/kernel/sched/auto_group.c
72079 +++ b/kernel/sched/auto_group.c
72080 @@ -11,7 +11,7 @@
72081
72082 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
72083 static struct autogroup autogroup_default;
72084 -static atomic_t autogroup_seq_nr;
72085 +static atomic_unchecked_t autogroup_seq_nr;
72086
72087 void __init autogroup_init(struct task_struct *init_task)
72088 {
72089 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
72090
72091 kref_init(&ag->kref);
72092 init_rwsem(&ag->lock);
72093 - ag->id = atomic_inc_return(&autogroup_seq_nr);
72094 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
72095 ag->tg = tg;
72096 #ifdef CONFIG_RT_GROUP_SCHED
72097 /*
72098 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
72099 index 2d8927f..f617765 100644
72100 --- a/kernel/sched/core.c
72101 +++ b/kernel/sched/core.c
72102 @@ -3562,6 +3562,8 @@ int can_nice(const struct task_struct *p, const int nice)
72103 /* convert nice value [19,-20] to rlimit style value [1,40] */
72104 int nice_rlim = 20 - nice;
72105
72106 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
72107 +
72108 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
72109 capable(CAP_SYS_NICE));
72110 }
72111 @@ -3595,7 +3597,8 @@ SYSCALL_DEFINE1(nice, int, increment)
72112 if (nice > 19)
72113 nice = 19;
72114
72115 - if (increment < 0 && !can_nice(current, nice))
72116 + if (increment < 0 && (!can_nice(current, nice) ||
72117 + gr_handle_chroot_nice()))
72118 return -EPERM;
72119
72120 retval = security_task_setnice(current, nice);
72121 @@ -3749,6 +3752,7 @@ recheck:
72122 unsigned long rlim_rtprio =
72123 task_rlimit(p, RLIMIT_RTPRIO);
72124
72125 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
72126 /* can't set/change the rt policy */
72127 if (policy != p->policy && !rlim_rtprio)
72128 return -EPERM;
72129 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
72130 index 6b800a1..0c36227 100644
72131 --- a/kernel/sched/fair.c
72132 +++ b/kernel/sched/fair.c
72133 @@ -4890,7 +4890,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
72134 * run_rebalance_domains is triggered when needed from the scheduler tick.
72135 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
72136 */
72137 -static void run_rebalance_domains(struct softirq_action *h)
72138 +static void run_rebalance_domains(void)
72139 {
72140 int this_cpu = smp_processor_id();
72141 struct rq *this_rq = cpu_rq(this_cpu);
72142 diff --git a/kernel/signal.c b/kernel/signal.c
72143 index 0af8868..a00119d 100644
72144 --- a/kernel/signal.c
72145 +++ b/kernel/signal.c
72146 @@ -49,12 +49,12 @@ static struct kmem_cache *sigqueue_cachep;
72147
72148 int print_fatal_signals __read_mostly;
72149
72150 -static void __user *sig_handler(struct task_struct *t, int sig)
72151 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
72152 {
72153 return t->sighand->action[sig - 1].sa.sa_handler;
72154 }
72155
72156 -static int sig_handler_ignored(void __user *handler, int sig)
72157 +static int sig_handler_ignored(__sighandler_t handler, int sig)
72158 {
72159 /* Is it explicitly or implicitly ignored? */
72160 return handler == SIG_IGN ||
72161 @@ -63,7 +63,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
72162
72163 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
72164 {
72165 - void __user *handler;
72166 + __sighandler_t handler;
72167
72168 handler = sig_handler(t, sig);
72169
72170 @@ -367,6 +367,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
72171 atomic_inc(&user->sigpending);
72172 rcu_read_unlock();
72173
72174 + if (!override_rlimit)
72175 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
72176 +
72177 if (override_rlimit ||
72178 atomic_read(&user->sigpending) <=
72179 task_rlimit(t, RLIMIT_SIGPENDING)) {
72180 @@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
72181
72182 int unhandled_signal(struct task_struct *tsk, int sig)
72183 {
72184 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
72185 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
72186 if (is_global_init(tsk))
72187 return 1;
72188 if (handler != SIG_IGN && handler != SIG_DFL)
72189 @@ -817,6 +820,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
72190 }
72191 }
72192
72193 + /* allow glibc communication via tgkill to other threads in our
72194 + thread group */
72195 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
72196 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
72197 + && gr_handle_signal(t, sig))
72198 + return -EPERM;
72199 +
72200 return security_task_kill(t, info, sig, 0);
72201 }
72202
72203 @@ -1198,7 +1208,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
72204 return send_signal(sig, info, p, 1);
72205 }
72206
72207 -static int
72208 +int
72209 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
72210 {
72211 return send_signal(sig, info, t, 0);
72212 @@ -1235,6 +1245,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
72213 unsigned long int flags;
72214 int ret, blocked, ignored;
72215 struct k_sigaction *action;
72216 + int is_unhandled = 0;
72217
72218 spin_lock_irqsave(&t->sighand->siglock, flags);
72219 action = &t->sighand->action[sig-1];
72220 @@ -1249,9 +1260,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
72221 }
72222 if (action->sa.sa_handler == SIG_DFL)
72223 t->signal->flags &= ~SIGNAL_UNKILLABLE;
72224 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
72225 + is_unhandled = 1;
72226 ret = specific_send_sig_info(sig, info, t);
72227 spin_unlock_irqrestore(&t->sighand->siglock, flags);
72228
72229 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
72230 + normal operation */
72231 + if (is_unhandled) {
72232 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
72233 + gr_handle_crash(t, sig);
72234 + }
72235 +
72236 return ret;
72237 }
72238
72239 @@ -1318,8 +1338,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
72240 ret = check_kill_permission(sig, info, p);
72241 rcu_read_unlock();
72242
72243 - if (!ret && sig)
72244 + if (!ret && sig) {
72245 ret = do_send_sig_info(sig, info, p, true);
72246 + if (!ret)
72247 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
72248 + }
72249
72250 return ret;
72251 }
72252 @@ -2864,7 +2887,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
72253 int error = -ESRCH;
72254
72255 rcu_read_lock();
72256 - p = find_task_by_vpid(pid);
72257 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72258 + /* allow glibc communication via tgkill to other threads in our
72259 + thread group */
72260 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
72261 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
72262 + p = find_task_by_vpid_unrestricted(pid);
72263 + else
72264 +#endif
72265 + p = find_task_by_vpid(pid);
72266 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
72267 error = check_kill_permission(sig, info, p);
72268 /*
72269 diff --git a/kernel/softirq.c b/kernel/softirq.c
72270 index cc96bdc..8bb9750 100644
72271 --- a/kernel/softirq.c
72272 +++ b/kernel/softirq.c
72273 @@ -57,7 +57,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
72274
72275 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
72276
72277 -char *softirq_to_name[NR_SOFTIRQS] = {
72278 +const char * const softirq_to_name[NR_SOFTIRQS] = {
72279 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
72280 "TASKLET", "SCHED", "HRTIMER", "RCU"
72281 };
72282 @@ -244,7 +244,7 @@ restart:
72283 kstat_incr_softirqs_this_cpu(vec_nr);
72284
72285 trace_softirq_entry(vec_nr);
72286 - h->action(h);
72287 + h->action();
72288 trace_softirq_exit(vec_nr);
72289 if (unlikely(prev_count != preempt_count())) {
72290 printk(KERN_ERR "huh, entered softirq %u %s %p"
72291 @@ -391,9 +391,11 @@ void __raise_softirq_irqoff(unsigned int nr)
72292 or_softirq_pending(1UL << nr);
72293 }
72294
72295 -void open_softirq(int nr, void (*action)(struct softirq_action *))
72296 +void open_softirq(int nr, void (*action)(void))
72297 {
72298 - softirq_vec[nr].action = action;
72299 + pax_open_kernel();
72300 + *(void **)&softirq_vec[nr].action = action;
72301 + pax_close_kernel();
72302 }
72303
72304 /*
72305 @@ -447,7 +449,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
72306
72307 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
72308
72309 -static void tasklet_action(struct softirq_action *a)
72310 +static void tasklet_action(void)
72311 {
72312 struct tasklet_struct *list;
72313
72314 @@ -482,7 +484,7 @@ static void tasklet_action(struct softirq_action *a)
72315 }
72316 }
72317
72318 -static void tasklet_hi_action(struct softirq_action *a)
72319 +static void tasklet_hi_action(void)
72320 {
72321 struct tasklet_struct *list;
72322
72323 diff --git a/kernel/srcu.c b/kernel/srcu.c
72324 index 97c465e..d83f3bb 100644
72325 --- a/kernel/srcu.c
72326 +++ b/kernel/srcu.c
72327 @@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
72328 preempt_disable();
72329 idx = rcu_dereference_index_check(sp->completed,
72330 rcu_read_lock_sched_held()) & 0x1;
72331 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
72332 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
72333 smp_mb(); /* B */ /* Avoid leaking the critical section. */
72334 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
72335 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
72336 preempt_enable();
72337 return idx;
72338 }
72339 @@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
72340 {
72341 preempt_disable();
72342 smp_mb(); /* C */ /* Avoid leaking the critical section. */
72343 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
72344 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
72345 preempt_enable();
72346 }
72347 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
72348 diff --git a/kernel/sys.c b/kernel/sys.c
72349 index e6e0ece..1f2e413 100644
72350 --- a/kernel/sys.c
72351 +++ b/kernel/sys.c
72352 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
72353 error = -EACCES;
72354 goto out;
72355 }
72356 +
72357 + if (gr_handle_chroot_setpriority(p, niceval)) {
72358 + error = -EACCES;
72359 + goto out;
72360 + }
72361 +
72362 no_nice = security_task_setnice(p, niceval);
72363 if (no_nice) {
72364 error = no_nice;
72365 @@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
72366 goto error;
72367 }
72368
72369 + if (gr_check_group_change(new->gid, new->egid, -1))
72370 + goto error;
72371 +
72372 if (rgid != (gid_t) -1 ||
72373 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
72374 new->sgid = new->egid;
72375 @@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
72376 old = current_cred();
72377
72378 retval = -EPERM;
72379 +
72380 + if (gr_check_group_change(kgid, kgid, kgid))
72381 + goto error;
72382 +
72383 if (nsown_capable(CAP_SETGID))
72384 new->gid = new->egid = new->sgid = new->fsgid = kgid;
72385 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
72386 @@ -647,7 +660,7 @@ error:
72387 /*
72388 * change the user struct in a credentials set to match the new UID
72389 */
72390 -static int set_user(struct cred *new)
72391 +int set_user(struct cred *new)
72392 {
72393 struct user_struct *new_user;
72394
72395 @@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
72396 goto error;
72397 }
72398
72399 + if (gr_check_user_change(new->uid, new->euid, -1))
72400 + goto error;
72401 +
72402 if (!uid_eq(new->uid, old->uid)) {
72403 retval = set_user(new);
72404 if (retval < 0)
72405 @@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
72406 old = current_cred();
72407
72408 retval = -EPERM;
72409 +
72410 + if (gr_check_crash_uid(kuid))
72411 + goto error;
72412 + if (gr_check_user_change(kuid, kuid, kuid))
72413 + goto error;
72414 +
72415 if (nsown_capable(CAP_SETUID)) {
72416 new->suid = new->uid = kuid;
72417 if (!uid_eq(kuid, old->uid)) {
72418 @@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
72419 goto error;
72420 }
72421
72422 + if (gr_check_user_change(kruid, keuid, -1))
72423 + goto error;
72424 +
72425 if (ruid != (uid_t) -1) {
72426 new->uid = kruid;
72427 if (!uid_eq(kruid, old->uid)) {
72428 @@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
72429 goto error;
72430 }
72431
72432 + if (gr_check_group_change(krgid, kegid, -1))
72433 + goto error;
72434 +
72435 if (rgid != (gid_t) -1)
72436 new->gid = krgid;
72437 if (egid != (gid_t) -1)
72438 @@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72439 if (!uid_valid(kuid))
72440 return old_fsuid;
72441
72442 + if (gr_check_user_change(-1, -1, kuid))
72443 + goto error;
72444 +
72445 new = prepare_creds();
72446 if (!new)
72447 return old_fsuid;
72448 @@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72449 }
72450 }
72451
72452 +error:
72453 abort_creds(new);
72454 return old_fsuid;
72455
72456 @@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
72457 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
72458 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
72459 nsown_capable(CAP_SETGID)) {
72460 + if (gr_check_group_change(-1, -1, kgid))
72461 + goto error;
72462 +
72463 if (!gid_eq(kgid, old->fsgid)) {
72464 new->fsgid = kgid;
72465 goto change_okay;
72466 }
72467 }
72468
72469 +error:
72470 abort_creds(new);
72471 return old_fsgid;
72472
72473 @@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
72474 return -EFAULT;
72475
72476 down_read(&uts_sem);
72477 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
72478 + error = __copy_to_user(name->sysname, &utsname()->sysname,
72479 __OLD_UTS_LEN);
72480 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
72481 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
72482 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
72483 __OLD_UTS_LEN);
72484 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
72485 - error |= __copy_to_user(&name->release, &utsname()->release,
72486 + error |= __copy_to_user(name->release, &utsname()->release,
72487 __OLD_UTS_LEN);
72488 error |= __put_user(0, name->release + __OLD_UTS_LEN);
72489 - error |= __copy_to_user(&name->version, &utsname()->version,
72490 + error |= __copy_to_user(name->version, &utsname()->version,
72491 __OLD_UTS_LEN);
72492 error |= __put_user(0, name->version + __OLD_UTS_LEN);
72493 - error |= __copy_to_user(&name->machine, &utsname()->machine,
72494 + error |= __copy_to_user(name->machine, &utsname()->machine,
72495 __OLD_UTS_LEN);
72496 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
72497 up_read(&uts_sem);
72498 @@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
72499 error = get_dumpable(me->mm);
72500 break;
72501 case PR_SET_DUMPABLE:
72502 - if (arg2 < 0 || arg2 > 1) {
72503 + if (arg2 > 1) {
72504 error = -EINVAL;
72505 break;
72506 }
72507 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
72508 index 26f65ea..df8e5ad 100644
72509 --- a/kernel/sysctl.c
72510 +++ b/kernel/sysctl.c
72511 @@ -92,7 +92,6 @@
72512
72513
72514 #if defined(CONFIG_SYSCTL)
72515 -
72516 /* External variables not in a header file. */
72517 extern int sysctl_overcommit_memory;
72518 extern int sysctl_overcommit_ratio;
72519 @@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
72520 void __user *buffer, size_t *lenp, loff_t *ppos);
72521 #endif
72522
72523 -#ifdef CONFIG_PRINTK
72524 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72525 void __user *buffer, size_t *lenp, loff_t *ppos);
72526 -#endif
72527
72528 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
72529 void __user *buffer, size_t *lenp, loff_t *ppos);
72530 @@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
72531
72532 #endif
72533
72534 +extern struct ctl_table grsecurity_table[];
72535 +
72536 static struct ctl_table kern_table[];
72537 static struct ctl_table vm_table[];
72538 static struct ctl_table fs_table[];
72539 @@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
72540 int sysctl_legacy_va_layout;
72541 #endif
72542
72543 +#ifdef CONFIG_PAX_SOFTMODE
72544 +static ctl_table pax_table[] = {
72545 + {
72546 + .procname = "softmode",
72547 + .data = &pax_softmode,
72548 + .maxlen = sizeof(unsigned int),
72549 + .mode = 0600,
72550 + .proc_handler = &proc_dointvec,
72551 + },
72552 +
72553 + { }
72554 +};
72555 +#endif
72556 +
72557 /* The default sysctl tables: */
72558
72559 static struct ctl_table sysctl_base_table[] = {
72560 @@ -266,6 +279,22 @@ static int max_extfrag_threshold = 1000;
72561 #endif
72562
72563 static struct ctl_table kern_table[] = {
72564 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
72565 + {
72566 + .procname = "grsecurity",
72567 + .mode = 0500,
72568 + .child = grsecurity_table,
72569 + },
72570 +#endif
72571 +
72572 +#ifdef CONFIG_PAX_SOFTMODE
72573 + {
72574 + .procname = "pax",
72575 + .mode = 0500,
72576 + .child = pax_table,
72577 + },
72578 +#endif
72579 +
72580 {
72581 .procname = "sched_child_runs_first",
72582 .data = &sysctl_sched_child_runs_first,
72583 @@ -552,7 +581,7 @@ static struct ctl_table kern_table[] = {
72584 .data = &modprobe_path,
72585 .maxlen = KMOD_PATH_LEN,
72586 .mode = 0644,
72587 - .proc_handler = proc_dostring,
72588 + .proc_handler = proc_dostring_modpriv,
72589 },
72590 {
72591 .procname = "modules_disabled",
72592 @@ -719,16 +748,20 @@ static struct ctl_table kern_table[] = {
72593 .extra1 = &zero,
72594 .extra2 = &one,
72595 },
72596 +#endif
72597 {
72598 .procname = "kptr_restrict",
72599 .data = &kptr_restrict,
72600 .maxlen = sizeof(int),
72601 .mode = 0644,
72602 .proc_handler = proc_dointvec_minmax_sysadmin,
72603 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72604 + .extra1 = &two,
72605 +#else
72606 .extra1 = &zero,
72607 +#endif
72608 .extra2 = &two,
72609 },
72610 -#endif
72611 {
72612 .procname = "ngroups_max",
72613 .data = &ngroups_max,
72614 @@ -1225,6 +1258,13 @@ static struct ctl_table vm_table[] = {
72615 .proc_handler = proc_dointvec_minmax,
72616 .extra1 = &zero,
72617 },
72618 + {
72619 + .procname = "heap_stack_gap",
72620 + .data = &sysctl_heap_stack_gap,
72621 + .maxlen = sizeof(sysctl_heap_stack_gap),
72622 + .mode = 0644,
72623 + .proc_handler = proc_doulongvec_minmax,
72624 + },
72625 #else
72626 {
72627 .procname = "nr_trim_pages",
72628 @@ -1675,6 +1715,16 @@ int proc_dostring(struct ctl_table *table, int write,
72629 buffer, lenp, ppos);
72630 }
72631
72632 +int proc_dostring_modpriv(struct ctl_table *table, int write,
72633 + void __user *buffer, size_t *lenp, loff_t *ppos)
72634 +{
72635 + if (write && !capable(CAP_SYS_MODULE))
72636 + return -EPERM;
72637 +
72638 + return _proc_do_string(table->data, table->maxlen, write,
72639 + buffer, lenp, ppos);
72640 +}
72641 +
72642 static size_t proc_skip_spaces(char **buf)
72643 {
72644 size_t ret;
72645 @@ -1780,6 +1830,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
72646 len = strlen(tmp);
72647 if (len > *size)
72648 len = *size;
72649 + if (len > sizeof(tmp))
72650 + len = sizeof(tmp);
72651 if (copy_to_user(*buf, tmp, len))
72652 return -EFAULT;
72653 *size -= len;
72654 @@ -1972,7 +2024,6 @@ static int proc_taint(struct ctl_table *table, int write,
72655 return err;
72656 }
72657
72658 -#ifdef CONFIG_PRINTK
72659 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72660 void __user *buffer, size_t *lenp, loff_t *ppos)
72661 {
72662 @@ -1981,7 +2032,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72663
72664 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
72665 }
72666 -#endif
72667
72668 struct do_proc_dointvec_minmax_conv_param {
72669 int *min;
72670 @@ -2128,8 +2178,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
72671 *i = val;
72672 } else {
72673 val = convdiv * (*i) / convmul;
72674 - if (!first)
72675 + if (!first) {
72676 err = proc_put_char(&buffer, &left, '\t');
72677 + if (err)
72678 + break;
72679 + }
72680 err = proc_put_long(&buffer, &left, val, false);
72681 if (err)
72682 break;
72683 @@ -2521,6 +2574,12 @@ int proc_dostring(struct ctl_table *table, int write,
72684 return -ENOSYS;
72685 }
72686
72687 +int proc_dostring_modpriv(struct ctl_table *table, int write,
72688 + void __user *buffer, size_t *lenp, loff_t *ppos)
72689 +{
72690 + return -ENOSYS;
72691 +}
72692 +
72693 int proc_dointvec(struct ctl_table *table, int write,
72694 void __user *buffer, size_t *lenp, loff_t *ppos)
72695 {
72696 @@ -2577,5 +2636,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
72697 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
72698 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
72699 EXPORT_SYMBOL(proc_dostring);
72700 +EXPORT_SYMBOL(proc_dostring_modpriv);
72701 EXPORT_SYMBOL(proc_doulongvec_minmax);
72702 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
72703 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
72704 index 65bdcf1..21eb831 100644
72705 --- a/kernel/sysctl_binary.c
72706 +++ b/kernel/sysctl_binary.c
72707 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
72708 int i;
72709
72710 set_fs(KERNEL_DS);
72711 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72712 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72713 set_fs(old_fs);
72714 if (result < 0)
72715 goto out_kfree;
72716 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
72717 }
72718
72719 set_fs(KERNEL_DS);
72720 - result = vfs_write(file, buffer, str - buffer, &pos);
72721 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72722 set_fs(old_fs);
72723 if (result < 0)
72724 goto out_kfree;
72725 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
72726 int i;
72727
72728 set_fs(KERNEL_DS);
72729 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72730 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72731 set_fs(old_fs);
72732 if (result < 0)
72733 goto out_kfree;
72734 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
72735 }
72736
72737 set_fs(KERNEL_DS);
72738 - result = vfs_write(file, buffer, str - buffer, &pos);
72739 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72740 set_fs(old_fs);
72741 if (result < 0)
72742 goto out_kfree;
72743 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
72744 int i;
72745
72746 set_fs(KERNEL_DS);
72747 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72748 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72749 set_fs(old_fs);
72750 if (result < 0)
72751 goto out;
72752 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72753 __le16 dnaddr;
72754
72755 set_fs(KERNEL_DS);
72756 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72757 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72758 set_fs(old_fs);
72759 if (result < 0)
72760 goto out;
72761 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72762 le16_to_cpu(dnaddr) & 0x3ff);
72763
72764 set_fs(KERNEL_DS);
72765 - result = vfs_write(file, buf, len, &pos);
72766 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
72767 set_fs(old_fs);
72768 if (result < 0)
72769 goto out;
72770 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
72771 index 145bb4d..b2aa969 100644
72772 --- a/kernel/taskstats.c
72773 +++ b/kernel/taskstats.c
72774 @@ -28,9 +28,12 @@
72775 #include <linux/fs.h>
72776 #include <linux/file.h>
72777 #include <linux/pid_namespace.h>
72778 +#include <linux/grsecurity.h>
72779 #include <net/genetlink.h>
72780 #include <linux/atomic.h>
72781
72782 +extern int gr_is_taskstats_denied(int pid);
72783 +
72784 /*
72785 * Maximum length of a cpumask that can be specified in
72786 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
72787 @@ -570,6 +573,9 @@ err:
72788
72789 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
72790 {
72791 + if (gr_is_taskstats_denied(current->pid))
72792 + return -EACCES;
72793 +
72794 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
72795 return cmd_attr_register_cpumask(info);
72796 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
72797 diff --git a/kernel/time.c b/kernel/time.c
72798 index d226c6a..c7c0960 100644
72799 --- a/kernel/time.c
72800 +++ b/kernel/time.c
72801 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
72802 return error;
72803
72804 if (tz) {
72805 + /* we log in do_settimeofday called below, so don't log twice
72806 + */
72807 + if (!tv)
72808 + gr_log_timechange();
72809 +
72810 sys_tz = *tz;
72811 update_vsyscall_tz();
72812 if (firsttime) {
72813 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
72814 index f11d83b..d016d91 100644
72815 --- a/kernel/time/alarmtimer.c
72816 +++ b/kernel/time/alarmtimer.c
72817 @@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
72818 struct platform_device *pdev;
72819 int error = 0;
72820 int i;
72821 - struct k_clock alarm_clock = {
72822 + static struct k_clock alarm_clock = {
72823 .clock_getres = alarm_clock_getres,
72824 .clock_get = alarm_clock_get,
72825 .timer_create = alarm_timer_create,
72826 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
72827 index f113755..ec24223 100644
72828 --- a/kernel/time/tick-broadcast.c
72829 +++ b/kernel/time/tick-broadcast.c
72830 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
72831 * then clear the broadcast bit.
72832 */
72833 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
72834 - int cpu = smp_processor_id();
72835 + cpu = smp_processor_id();
72836
72837 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
72838 tick_broadcast_clear_oneshot(cpu);
72839 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
72840 index e424970..4c7962b 100644
72841 --- a/kernel/time/timekeeping.c
72842 +++ b/kernel/time/timekeeping.c
72843 @@ -15,6 +15,7 @@
72844 #include <linux/init.h>
72845 #include <linux/mm.h>
72846 #include <linux/sched.h>
72847 +#include <linux/grsecurity.h>
72848 #include <linux/syscore_ops.h>
72849 #include <linux/clocksource.h>
72850 #include <linux/jiffies.h>
72851 @@ -368,6 +369,8 @@ int do_settimeofday(const struct timespec *tv)
72852 if (!timespec_valid_strict(tv))
72853 return -EINVAL;
72854
72855 + gr_log_timechange();
72856 +
72857 write_seqlock_irqsave(&tk->lock, flags);
72858
72859 timekeeping_forward_now(tk);
72860 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
72861 index af5a7e9..715611a 100644
72862 --- a/kernel/time/timer_list.c
72863 +++ b/kernel/time/timer_list.c
72864 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
72865
72866 static void print_name_offset(struct seq_file *m, void *sym)
72867 {
72868 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72869 + SEQ_printf(m, "<%p>", NULL);
72870 +#else
72871 char symname[KSYM_NAME_LEN];
72872
72873 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
72874 SEQ_printf(m, "<%pK>", sym);
72875 else
72876 SEQ_printf(m, "%s", symname);
72877 +#endif
72878 }
72879
72880 static void
72881 @@ -112,7 +116,11 @@ next_one:
72882 static void
72883 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
72884 {
72885 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72886 + SEQ_printf(m, " .base: %p\n", NULL);
72887 +#else
72888 SEQ_printf(m, " .base: %pK\n", base);
72889 +#endif
72890 SEQ_printf(m, " .index: %d\n",
72891 base->index);
72892 SEQ_printf(m, " .resolution: %Lu nsecs\n",
72893 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
72894 {
72895 struct proc_dir_entry *pe;
72896
72897 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72898 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
72899 +#else
72900 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
72901 +#endif
72902 if (!pe)
72903 return -ENOMEM;
72904 return 0;
72905 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
72906 index 0b537f2..40d6c20 100644
72907 --- a/kernel/time/timer_stats.c
72908 +++ b/kernel/time/timer_stats.c
72909 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
72910 static unsigned long nr_entries;
72911 static struct entry entries[MAX_ENTRIES];
72912
72913 -static atomic_t overflow_count;
72914 +static atomic_unchecked_t overflow_count;
72915
72916 /*
72917 * The entries are in a hash-table, for fast lookup:
72918 @@ -140,7 +140,7 @@ static void reset_entries(void)
72919 nr_entries = 0;
72920 memset(entries, 0, sizeof(entries));
72921 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
72922 - atomic_set(&overflow_count, 0);
72923 + atomic_set_unchecked(&overflow_count, 0);
72924 }
72925
72926 static struct entry *alloc_entry(void)
72927 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72928 if (likely(entry))
72929 entry->count++;
72930 else
72931 - atomic_inc(&overflow_count);
72932 + atomic_inc_unchecked(&overflow_count);
72933
72934 out_unlock:
72935 raw_spin_unlock_irqrestore(lock, flags);
72936 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72937
72938 static void print_name_offset(struct seq_file *m, unsigned long addr)
72939 {
72940 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72941 + seq_printf(m, "<%p>", NULL);
72942 +#else
72943 char symname[KSYM_NAME_LEN];
72944
72945 if (lookup_symbol_name(addr, symname) < 0)
72946 - seq_printf(m, "<%p>", (void *)addr);
72947 + seq_printf(m, "<%pK>", (void *)addr);
72948 else
72949 seq_printf(m, "%s", symname);
72950 +#endif
72951 }
72952
72953 static int tstats_show(struct seq_file *m, void *v)
72954 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
72955
72956 seq_puts(m, "Timer Stats Version: v0.2\n");
72957 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
72958 - if (atomic_read(&overflow_count))
72959 + if (atomic_read_unchecked(&overflow_count))
72960 seq_printf(m, "Overflow: %d entries\n",
72961 - atomic_read(&overflow_count));
72962 + atomic_read_unchecked(&overflow_count));
72963
72964 for (i = 0; i < nr_entries; i++) {
72965 entry = entries + i;
72966 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
72967 {
72968 struct proc_dir_entry *pe;
72969
72970 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72971 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
72972 +#else
72973 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
72974 +#endif
72975 if (!pe)
72976 return -ENOMEM;
72977 return 0;
72978 diff --git a/kernel/timer.c b/kernel/timer.c
72979 index 367d008..46857a0 100644
72980 --- a/kernel/timer.c
72981 +++ b/kernel/timer.c
72982 @@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
72983 /*
72984 * This function runs timers and the timer-tq in bottom half context.
72985 */
72986 -static void run_timer_softirq(struct softirq_action *h)
72987 +static void run_timer_softirq(void)
72988 {
72989 struct tvec_base *base = __this_cpu_read(tvec_bases);
72990
72991 @@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
72992 return NOTIFY_OK;
72993 }
72994
72995 -static struct notifier_block __cpuinitdata timers_nb = {
72996 +static struct notifier_block __cpuinitconst timers_nb = {
72997 .notifier_call = timer_cpu_notify,
72998 };
72999
73000 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
73001 index c0bd030..62a1927 100644
73002 --- a/kernel/trace/blktrace.c
73003 +++ b/kernel/trace/blktrace.c
73004 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
73005 struct blk_trace *bt = filp->private_data;
73006 char buf[16];
73007
73008 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
73009 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
73010
73011 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
73012 }
73013 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
73014 return 1;
73015
73016 bt = buf->chan->private_data;
73017 - atomic_inc(&bt->dropped);
73018 + atomic_inc_unchecked(&bt->dropped);
73019 return 0;
73020 }
73021
73022 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
73023
73024 bt->dir = dir;
73025 bt->dev = dev;
73026 - atomic_set(&bt->dropped, 0);
73027 + atomic_set_unchecked(&bt->dropped, 0);
73028
73029 ret = -EIO;
73030 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
73031 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
73032 index 51b7159..18137d6 100644
73033 --- a/kernel/trace/ftrace.c
73034 +++ b/kernel/trace/ftrace.c
73035 @@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
73036 if (unlikely(ftrace_disabled))
73037 return 0;
73038
73039 + ret = ftrace_arch_code_modify_prepare();
73040 + FTRACE_WARN_ON(ret);
73041 + if (ret)
73042 + return 0;
73043 +
73044 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
73045 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
73046 if (ret) {
73047 ftrace_bug(ret, ip);
73048 - return 0;
73049 }
73050 - return 1;
73051 + return ret ? 0 : 1;
73052 }
73053
73054 /*
73055 @@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
73056
73057 int
73058 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
73059 - void *data)
73060 + void *data)
73061 {
73062 struct ftrace_func_probe *entry;
73063 struct ftrace_page *pg;
73064 @@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
73065 if (!count)
73066 return 0;
73067
73068 + pax_open_kernel();
73069 sort(start, count, sizeof(*start),
73070 ftrace_cmp_ips, ftrace_swap_ips);
73071 + pax_close_kernel();
73072
73073 start_pg = ftrace_allocate_pages(count);
73074 if (!start_pg)
73075 @@ -4541,8 +4548,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
73076 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
73077
73078 static int ftrace_graph_active;
73079 -static struct notifier_block ftrace_suspend_notifier;
73080 -
73081 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
73082 {
73083 return 0;
73084 @@ -4686,6 +4691,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
73085 return NOTIFY_DONE;
73086 }
73087
73088 +static struct notifier_block ftrace_suspend_notifier = {
73089 + .notifier_call = ftrace_suspend_notifier_call
73090 +};
73091 +
73092 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
73093 trace_func_graph_ent_t entryfunc)
73094 {
73095 @@ -4699,7 +4708,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
73096 goto out;
73097 }
73098
73099 - ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
73100 register_pm_notifier(&ftrace_suspend_notifier);
73101
73102 ftrace_graph_active++;
73103 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
73104 index 4cb5e51..6010f39 100644
73105 --- a/kernel/trace/ring_buffer.c
73106 +++ b/kernel/trace/ring_buffer.c
73107 @@ -346,9 +346,9 @@ struct buffer_data_page {
73108 */
73109 struct buffer_page {
73110 struct list_head list; /* list of buffer pages */
73111 - local_t write; /* index for next write */
73112 + local_unchecked_t write; /* index for next write */
73113 unsigned read; /* index for next read */
73114 - local_t entries; /* entries on this page */
73115 + local_unchecked_t entries; /* entries on this page */
73116 unsigned long real_end; /* real end of data */
73117 struct buffer_data_page *page; /* Actual data page */
73118 };
73119 @@ -460,8 +460,8 @@ struct ring_buffer_per_cpu {
73120 unsigned long lost_events;
73121 unsigned long last_overrun;
73122 local_t entries_bytes;
73123 - local_t commit_overrun;
73124 - local_t overrun;
73125 + local_unchecked_t commit_overrun;
73126 + local_unchecked_t overrun;
73127 local_t entries;
73128 local_t committing;
73129 local_t commits;
73130 @@ -490,7 +490,7 @@ struct ring_buffer {
73131 struct ring_buffer_per_cpu **buffers;
73132
73133 #ifdef CONFIG_HOTPLUG_CPU
73134 - struct notifier_block cpu_notify;
73135 + notifier_block_no_const cpu_notify;
73136 #endif
73137 u64 (*clock)(void);
73138 };
73139 @@ -860,8 +860,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
73140 *
73141 * We add a counter to the write field to denote this.
73142 */
73143 - old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
73144 - old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
73145 + old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
73146 + old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
73147
73148 /*
73149 * Just make sure we have seen our old_write and synchronize
73150 @@ -889,8 +889,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
73151 * cmpxchg to only update if an interrupt did not already
73152 * do it for us. If the cmpxchg fails, we don't care.
73153 */
73154 - (void)local_cmpxchg(&next_page->write, old_write, val);
73155 - (void)local_cmpxchg(&next_page->entries, old_entries, eval);
73156 + (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
73157 + (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
73158
73159 /*
73160 * No need to worry about races with clearing out the commit.
73161 @@ -1249,12 +1249,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
73162
73163 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
73164 {
73165 - return local_read(&bpage->entries) & RB_WRITE_MASK;
73166 + return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
73167 }
73168
73169 static inline unsigned long rb_page_write(struct buffer_page *bpage)
73170 {
73171 - return local_read(&bpage->write) & RB_WRITE_MASK;
73172 + return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
73173 }
73174
73175 static int
73176 @@ -1349,7 +1349,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
73177 * bytes consumed in ring buffer from here.
73178 * Increment overrun to account for the lost events.
73179 */
73180 - local_add(page_entries, &cpu_buffer->overrun);
73181 + local_add_unchecked(page_entries, &cpu_buffer->overrun);
73182 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
73183 }
73184
73185 @@ -1905,7 +1905,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
73186 * it is our responsibility to update
73187 * the counters.
73188 */
73189 - local_add(entries, &cpu_buffer->overrun);
73190 + local_add_unchecked(entries, &cpu_buffer->overrun);
73191 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
73192
73193 /*
73194 @@ -2055,7 +2055,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
73195 if (tail == BUF_PAGE_SIZE)
73196 tail_page->real_end = 0;
73197
73198 - local_sub(length, &tail_page->write);
73199 + local_sub_unchecked(length, &tail_page->write);
73200 return;
73201 }
73202
73203 @@ -2090,7 +2090,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
73204 rb_event_set_padding(event);
73205
73206 /* Set the write back to the previous setting */
73207 - local_sub(length, &tail_page->write);
73208 + local_sub_unchecked(length, &tail_page->write);
73209 return;
73210 }
73211
73212 @@ -2102,7 +2102,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
73213
73214 /* Set write to end of buffer */
73215 length = (tail + length) - BUF_PAGE_SIZE;
73216 - local_sub(length, &tail_page->write);
73217 + local_sub_unchecked(length, &tail_page->write);
73218 }
73219
73220 /*
73221 @@ -2128,7 +2128,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
73222 * about it.
73223 */
73224 if (unlikely(next_page == commit_page)) {
73225 - local_inc(&cpu_buffer->commit_overrun);
73226 + local_inc_unchecked(&cpu_buffer->commit_overrun);
73227 goto out_reset;
73228 }
73229
73230 @@ -2182,7 +2182,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
73231 cpu_buffer->tail_page) &&
73232 (cpu_buffer->commit_page ==
73233 cpu_buffer->reader_page))) {
73234 - local_inc(&cpu_buffer->commit_overrun);
73235 + local_inc_unchecked(&cpu_buffer->commit_overrun);
73236 goto out_reset;
73237 }
73238 }
73239 @@ -2230,7 +2230,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
73240 length += RB_LEN_TIME_EXTEND;
73241
73242 tail_page = cpu_buffer->tail_page;
73243 - write = local_add_return(length, &tail_page->write);
73244 + write = local_add_return_unchecked(length, &tail_page->write);
73245
73246 /* set write to only the index of the write */
73247 write &= RB_WRITE_MASK;
73248 @@ -2247,7 +2247,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
73249 kmemcheck_annotate_bitfield(event, bitfield);
73250 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
73251
73252 - local_inc(&tail_page->entries);
73253 + local_inc_unchecked(&tail_page->entries);
73254
73255 /*
73256 * If this is the first commit on the page, then update
73257 @@ -2280,7 +2280,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
73258
73259 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
73260 unsigned long write_mask =
73261 - local_read(&bpage->write) & ~RB_WRITE_MASK;
73262 + local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
73263 unsigned long event_length = rb_event_length(event);
73264 /*
73265 * This is on the tail page. It is possible that
73266 @@ -2290,7 +2290,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
73267 */
73268 old_index += write_mask;
73269 new_index += write_mask;
73270 - index = local_cmpxchg(&bpage->write, old_index, new_index);
73271 + index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
73272 if (index == old_index) {
73273 /* update counters */
73274 local_sub(event_length, &cpu_buffer->entries_bytes);
73275 @@ -2629,7 +2629,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
73276
73277 /* Do the likely case first */
73278 if (likely(bpage->page == (void *)addr)) {
73279 - local_dec(&bpage->entries);
73280 + local_dec_unchecked(&bpage->entries);
73281 return;
73282 }
73283
73284 @@ -2641,7 +2641,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
73285 start = bpage;
73286 do {
73287 if (bpage->page == (void *)addr) {
73288 - local_dec(&bpage->entries);
73289 + local_dec_unchecked(&bpage->entries);
73290 return;
73291 }
73292 rb_inc_page(cpu_buffer, &bpage);
73293 @@ -2923,7 +2923,7 @@ static inline unsigned long
73294 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
73295 {
73296 return local_read(&cpu_buffer->entries) -
73297 - (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
73298 + (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
73299 }
73300
73301 /**
73302 @@ -3011,7 +3011,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
73303 return 0;
73304
73305 cpu_buffer = buffer->buffers[cpu];
73306 - ret = local_read(&cpu_buffer->overrun);
73307 + ret = local_read_unchecked(&cpu_buffer->overrun);
73308
73309 return ret;
73310 }
73311 @@ -3032,7 +3032,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
73312 return 0;
73313
73314 cpu_buffer = buffer->buffers[cpu];
73315 - ret = local_read(&cpu_buffer->commit_overrun);
73316 + ret = local_read_unchecked(&cpu_buffer->commit_overrun);
73317
73318 return ret;
73319 }
73320 @@ -3077,7 +3077,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
73321 /* if you care about this being correct, lock the buffer */
73322 for_each_buffer_cpu(buffer, cpu) {
73323 cpu_buffer = buffer->buffers[cpu];
73324 - overruns += local_read(&cpu_buffer->overrun);
73325 + overruns += local_read_unchecked(&cpu_buffer->overrun);
73326 }
73327
73328 return overruns;
73329 @@ -3253,8 +3253,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
73330 /*
73331 * Reset the reader page to size zero.
73332 */
73333 - local_set(&cpu_buffer->reader_page->write, 0);
73334 - local_set(&cpu_buffer->reader_page->entries, 0);
73335 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
73336 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
73337 local_set(&cpu_buffer->reader_page->page->commit, 0);
73338 cpu_buffer->reader_page->real_end = 0;
73339
73340 @@ -3288,7 +3288,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
73341 * want to compare with the last_overrun.
73342 */
73343 smp_mb();
73344 - overwrite = local_read(&(cpu_buffer->overrun));
73345 + overwrite = local_read_unchecked(&(cpu_buffer->overrun));
73346
73347 /*
73348 * Here's the tricky part.
73349 @@ -3858,8 +3858,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
73350
73351 cpu_buffer->head_page
73352 = list_entry(cpu_buffer->pages, struct buffer_page, list);
73353 - local_set(&cpu_buffer->head_page->write, 0);
73354 - local_set(&cpu_buffer->head_page->entries, 0);
73355 + local_set_unchecked(&cpu_buffer->head_page->write, 0);
73356 + local_set_unchecked(&cpu_buffer->head_page->entries, 0);
73357 local_set(&cpu_buffer->head_page->page->commit, 0);
73358
73359 cpu_buffer->head_page->read = 0;
73360 @@ -3869,14 +3869,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
73361
73362 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
73363 INIT_LIST_HEAD(&cpu_buffer->new_pages);
73364 - local_set(&cpu_buffer->reader_page->write, 0);
73365 - local_set(&cpu_buffer->reader_page->entries, 0);
73366 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
73367 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
73368 local_set(&cpu_buffer->reader_page->page->commit, 0);
73369 cpu_buffer->reader_page->read = 0;
73370
73371 - local_set(&cpu_buffer->commit_overrun, 0);
73372 + local_set_unchecked(&cpu_buffer->commit_overrun, 0);
73373 local_set(&cpu_buffer->entries_bytes, 0);
73374 - local_set(&cpu_buffer->overrun, 0);
73375 + local_set_unchecked(&cpu_buffer->overrun, 0);
73376 local_set(&cpu_buffer->entries, 0);
73377 local_set(&cpu_buffer->committing, 0);
73378 local_set(&cpu_buffer->commits, 0);
73379 @@ -4279,8 +4279,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
73380 rb_init_page(bpage);
73381 bpage = reader->page;
73382 reader->page = *data_page;
73383 - local_set(&reader->write, 0);
73384 - local_set(&reader->entries, 0);
73385 + local_set_unchecked(&reader->write, 0);
73386 + local_set_unchecked(&reader->entries, 0);
73387 reader->read = 0;
73388 *data_page = bpage;
73389
73390 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
73391 index 31e4f55..62da00f 100644
73392 --- a/kernel/trace/trace.c
73393 +++ b/kernel/trace/trace.c
73394 @@ -4436,10 +4436,9 @@ static const struct file_operations tracing_dyn_info_fops = {
73395 };
73396 #endif
73397
73398 -static struct dentry *d_tracer;
73399 -
73400 struct dentry *tracing_init_dentry(void)
73401 {
73402 + static struct dentry *d_tracer;
73403 static int once;
73404
73405 if (d_tracer)
73406 @@ -4459,10 +4458,9 @@ struct dentry *tracing_init_dentry(void)
73407 return d_tracer;
73408 }
73409
73410 -static struct dentry *d_percpu;
73411 -
73412 struct dentry *tracing_dentry_percpu(void)
73413 {
73414 + static struct dentry *d_percpu;
73415 static int once;
73416 struct dentry *d_tracer;
73417
73418 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
73419 index d608d09..7eddcb1 100644
73420 --- a/kernel/trace/trace_events.c
73421 +++ b/kernel/trace/trace_events.c
73422 @@ -1320,10 +1320,6 @@ static LIST_HEAD(ftrace_module_file_list);
73423 struct ftrace_module_file_ops {
73424 struct list_head list;
73425 struct module *mod;
73426 - struct file_operations id;
73427 - struct file_operations enable;
73428 - struct file_operations format;
73429 - struct file_operations filter;
73430 };
73431
73432 static struct ftrace_module_file_ops *
73433 @@ -1344,17 +1340,12 @@ trace_create_file_ops(struct module *mod)
73434
73435 file_ops->mod = mod;
73436
73437 - file_ops->id = ftrace_event_id_fops;
73438 - file_ops->id.owner = mod;
73439 -
73440 - file_ops->enable = ftrace_enable_fops;
73441 - file_ops->enable.owner = mod;
73442 -
73443 - file_ops->filter = ftrace_event_filter_fops;
73444 - file_ops->filter.owner = mod;
73445 -
73446 - file_ops->format = ftrace_event_format_fops;
73447 - file_ops->format.owner = mod;
73448 + pax_open_kernel();
73449 + *(void **)&mod->trace_id.owner = mod;
73450 + *(void **)&mod->trace_enable.owner = mod;
73451 + *(void **)&mod->trace_filter.owner = mod;
73452 + *(void **)&mod->trace_format.owner = mod;
73453 + pax_close_kernel();
73454
73455 list_add(&file_ops->list, &ftrace_module_file_list);
73456
73457 @@ -1378,8 +1369,8 @@ static void trace_module_add_events(struct module *mod)
73458
73459 for_each_event(call, start, end) {
73460 __trace_add_event_call(*call, mod,
73461 - &file_ops->id, &file_ops->enable,
73462 - &file_ops->filter, &file_ops->format);
73463 + &mod->trace_id, &mod->trace_enable,
73464 + &mod->trace_filter, &mod->trace_format);
73465 }
73466 }
73467
73468 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
73469 index fd3c8aa..5f324a6 100644
73470 --- a/kernel/trace/trace_mmiotrace.c
73471 +++ b/kernel/trace/trace_mmiotrace.c
73472 @@ -24,7 +24,7 @@ struct header_iter {
73473 static struct trace_array *mmio_trace_array;
73474 static bool overrun_detected;
73475 static unsigned long prev_overruns;
73476 -static atomic_t dropped_count;
73477 +static atomic_unchecked_t dropped_count;
73478
73479 static void mmio_reset_data(struct trace_array *tr)
73480 {
73481 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
73482
73483 static unsigned long count_overruns(struct trace_iterator *iter)
73484 {
73485 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
73486 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
73487 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
73488
73489 if (over > prev_overruns)
73490 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
73491 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
73492 sizeof(*entry), 0, pc);
73493 if (!event) {
73494 - atomic_inc(&dropped_count);
73495 + atomic_inc_unchecked(&dropped_count);
73496 return;
73497 }
73498 entry = ring_buffer_event_data(event);
73499 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
73500 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
73501 sizeof(*entry), 0, pc);
73502 if (!event) {
73503 - atomic_inc(&dropped_count);
73504 + atomic_inc_unchecked(&dropped_count);
73505 return;
73506 }
73507 entry = ring_buffer_event_data(event);
73508 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
73509 index 123b189..97b81f5 100644
73510 --- a/kernel/trace/trace_output.c
73511 +++ b/kernel/trace/trace_output.c
73512 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
73513
73514 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
73515 if (!IS_ERR(p)) {
73516 - p = mangle_path(s->buffer + s->len, p, "\n");
73517 + p = mangle_path(s->buffer + s->len, p, "\n\\");
73518 if (p) {
73519 s->len = p - s->buffer;
73520 return 1;
73521 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
73522 index 0c1b1657..95337e9 100644
73523 --- a/kernel/trace/trace_stack.c
73524 +++ b/kernel/trace/trace_stack.c
73525 @@ -53,7 +53,7 @@ static inline void check_stack(void)
73526 return;
73527
73528 /* we do not handle interrupt stacks yet */
73529 - if (!object_is_on_stack(&this_size))
73530 + if (!object_starts_on_stack(&this_size))
73531 return;
73532
73533 local_irq_save(flags);
73534 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
73535 index 28e9d6c9..50381bd 100644
73536 --- a/lib/Kconfig.debug
73537 +++ b/lib/Kconfig.debug
73538 @@ -1278,6 +1278,7 @@ config LATENCYTOP
73539 depends on DEBUG_KERNEL
73540 depends on STACKTRACE_SUPPORT
73541 depends on PROC_FS
73542 + depends on !GRKERNSEC_HIDESYM
73543 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
73544 select KALLSYMS
73545 select KALLSYMS_ALL
73546 @@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
73547
73548 config PROVIDE_OHCI1394_DMA_INIT
73549 bool "Remote debugging over FireWire early on boot"
73550 - depends on PCI && X86
73551 + depends on PCI && X86 && !GRKERNSEC
73552 help
73553 If you want to debug problems which hang or crash the kernel early
73554 on boot and the crashing machine has a FireWire port, you can use
73555 @@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
73556
73557 config FIREWIRE_OHCI_REMOTE_DMA
73558 bool "Remote debugging over FireWire with firewire-ohci"
73559 - depends on FIREWIRE_OHCI
73560 + depends on FIREWIRE_OHCI && !GRKERNSEC
73561 help
73562 This option lets you use the FireWire bus for remote debugging
73563 with help of the firewire-ohci driver. It enables unfiltered
73564 diff --git a/lib/bitmap.c b/lib/bitmap.c
73565 index 06fdfa1..97c5c7d 100644
73566 --- a/lib/bitmap.c
73567 +++ b/lib/bitmap.c
73568 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
73569 {
73570 int c, old_c, totaldigits, ndigits, nchunks, nbits;
73571 u32 chunk;
73572 - const char __user __force *ubuf = (const char __user __force *)buf;
73573 + const char __user *ubuf = (const char __force_user *)buf;
73574
73575 bitmap_zero(maskp, nmaskbits);
73576
73577 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
73578 {
73579 if (!access_ok(VERIFY_READ, ubuf, ulen))
73580 return -EFAULT;
73581 - return __bitmap_parse((const char __force *)ubuf,
73582 + return __bitmap_parse((const char __force_kernel *)ubuf,
73583 ulen, 1, maskp, nmaskbits);
73584
73585 }
73586 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
73587 {
73588 unsigned a, b;
73589 int c, old_c, totaldigits;
73590 - const char __user __force *ubuf = (const char __user __force *)buf;
73591 + const char __user *ubuf = (const char __force_user *)buf;
73592 int exp_digit, in_range;
73593
73594 totaldigits = c = 0;
73595 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
73596 {
73597 if (!access_ok(VERIFY_READ, ubuf, ulen))
73598 return -EFAULT;
73599 - return __bitmap_parselist((const char __force *)ubuf,
73600 + return __bitmap_parselist((const char __force_kernel *)ubuf,
73601 ulen, 1, maskp, nmaskbits);
73602 }
73603 EXPORT_SYMBOL(bitmap_parselist_user);
73604 diff --git a/lib/bug.c b/lib/bug.c
73605 index a28c141..2bd3d95 100644
73606 --- a/lib/bug.c
73607 +++ b/lib/bug.c
73608 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
73609 return BUG_TRAP_TYPE_NONE;
73610
73611 bug = find_bug(bugaddr);
73612 + if (!bug)
73613 + return BUG_TRAP_TYPE_NONE;
73614
73615 file = NULL;
73616 line = 0;
73617 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
73618 index d11808c..dc2d6f8 100644
73619 --- a/lib/debugobjects.c
73620 +++ b/lib/debugobjects.c
73621 @@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
73622 if (limit > 4)
73623 return;
73624
73625 - is_on_stack = object_is_on_stack(addr);
73626 + is_on_stack = object_starts_on_stack(addr);
73627 if (is_on_stack == onstack)
73628 return;
73629
73630 diff --git a/lib/devres.c b/lib/devres.c
73631 index 80b9c76..9e32279 100644
73632 --- a/lib/devres.c
73633 +++ b/lib/devres.c
73634 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
73635 void devm_iounmap(struct device *dev, void __iomem *addr)
73636 {
73637 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
73638 - (void *)addr));
73639 + (void __force *)addr));
73640 iounmap(addr);
73641 }
73642 EXPORT_SYMBOL(devm_iounmap);
73643 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
73644 {
73645 ioport_unmap(addr);
73646 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
73647 - devm_ioport_map_match, (void *)addr));
73648 + devm_ioport_map_match, (void __force *)addr));
73649 }
73650 EXPORT_SYMBOL(devm_ioport_unmap);
73651
73652 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
73653 index d84beb9..da44791 100644
73654 --- a/lib/dma-debug.c
73655 +++ b/lib/dma-debug.c
73656 @@ -754,7 +754,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
73657
73658 void dma_debug_add_bus(struct bus_type *bus)
73659 {
73660 - struct notifier_block *nb;
73661 + notifier_block_no_const *nb;
73662
73663 if (global_disable)
73664 return;
73665 @@ -919,7 +919,7 @@ out:
73666
73667 static void check_for_stack(struct device *dev, void *addr)
73668 {
73669 - if (object_is_on_stack(addr))
73670 + if (object_starts_on_stack(addr))
73671 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
73672 "stack [addr=%p]\n", addr);
73673 }
73674 diff --git a/lib/inflate.c b/lib/inflate.c
73675 index 013a761..c28f3fc 100644
73676 --- a/lib/inflate.c
73677 +++ b/lib/inflate.c
73678 @@ -269,7 +269,7 @@ static void free(void *where)
73679 malloc_ptr = free_mem_ptr;
73680 }
73681 #else
73682 -#define malloc(a) kmalloc(a, GFP_KERNEL)
73683 +#define malloc(a) kmalloc((a), GFP_KERNEL)
73684 #define free(a) kfree(a)
73685 #endif
73686
73687 diff --git a/lib/ioremap.c b/lib/ioremap.c
73688 index 0c9216c..863bd89 100644
73689 --- a/lib/ioremap.c
73690 +++ b/lib/ioremap.c
73691 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
73692 unsigned long next;
73693
73694 phys_addr -= addr;
73695 - pmd = pmd_alloc(&init_mm, pud, addr);
73696 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73697 if (!pmd)
73698 return -ENOMEM;
73699 do {
73700 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
73701 unsigned long next;
73702
73703 phys_addr -= addr;
73704 - pud = pud_alloc(&init_mm, pgd, addr);
73705 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
73706 if (!pud)
73707 return -ENOMEM;
73708 do {
73709 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
73710 index bd2bea9..6b3c95e 100644
73711 --- a/lib/is_single_threaded.c
73712 +++ b/lib/is_single_threaded.c
73713 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
73714 struct task_struct *p, *t;
73715 bool ret;
73716
73717 + if (!mm)
73718 + return true;
73719 +
73720 if (atomic_read(&task->signal->live) != 1)
73721 return false;
73722
73723 diff --git a/lib/list_debug.c b/lib/list_debug.c
73724 index c24c2f7..bef49ee 100644
73725 --- a/lib/list_debug.c
73726 +++ b/lib/list_debug.c
73727 @@ -23,17 +23,19 @@ void __list_add(struct list_head *new,
73728 struct list_head *prev,
73729 struct list_head *next)
73730 {
73731 - WARN(next->prev != prev,
73732 + if (WARN(next->prev != prev,
73733 "list_add corruption. next->prev should be "
73734 "prev (%p), but was %p. (next=%p).\n",
73735 - prev, next->prev, next);
73736 - WARN(prev->next != next,
73737 + prev, next->prev, next) ||
73738 + WARN(prev->next != next,
73739 "list_add corruption. prev->next should be "
73740 "next (%p), but was %p. (prev=%p).\n",
73741 - next, prev->next, prev);
73742 - WARN(new == prev || new == next,
73743 + next, prev->next, prev) ||
73744 + WARN(new == prev || new == next,
73745 "list_add double add: new=%p, prev=%p, next=%p.\n",
73746 - new, prev, next);
73747 + new, prev, next))
73748 + return;
73749 +
73750 next->prev = new;
73751 new->next = next;
73752 new->prev = prev;
73753 @@ -86,12 +88,14 @@ EXPORT_SYMBOL(list_del);
73754 void __list_add_rcu(struct list_head *new,
73755 struct list_head *prev, struct list_head *next)
73756 {
73757 - WARN(next->prev != prev,
73758 + if (WARN(next->prev != prev,
73759 "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
73760 - prev, next->prev, next);
73761 - WARN(prev->next != next,
73762 + prev, next->prev, next) ||
73763 + WARN(prev->next != next,
73764 "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
73765 - next, prev->next, prev);
73766 + next, prev->next, prev))
73767 + return;
73768 +
73769 new->next = next;
73770 new->prev = prev;
73771 rcu_assign_pointer(list_next_rcu(prev), new);
73772 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
73773 index e796429..6e38f9f 100644
73774 --- a/lib/radix-tree.c
73775 +++ b/lib/radix-tree.c
73776 @@ -92,7 +92,7 @@ struct radix_tree_preload {
73777 int nr;
73778 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
73779 };
73780 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
73781 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
73782
73783 static inline void *ptr_to_indirect(void *ptr)
73784 {
73785 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
73786 index bb2b201..46abaf9 100644
73787 --- a/lib/strncpy_from_user.c
73788 +++ b/lib/strncpy_from_user.c
73789 @@ -21,7 +21,7 @@
73790 */
73791 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
73792 {
73793 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73794 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73795 long res = 0;
73796
73797 /*
73798 diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
73799 index a28df52..3d55877 100644
73800 --- a/lib/strnlen_user.c
73801 +++ b/lib/strnlen_user.c
73802 @@ -26,7 +26,7 @@
73803 */
73804 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
73805 {
73806 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73807 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73808 long align, res = 0;
73809 unsigned long c;
73810
73811 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
73812 index 39c99fe..18f060b 100644
73813 --- a/lib/vsprintf.c
73814 +++ b/lib/vsprintf.c
73815 @@ -16,6 +16,9 @@
73816 * - scnprintf and vscnprintf
73817 */
73818
73819 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73820 +#define __INCLUDED_BY_HIDESYM 1
73821 +#endif
73822 #include <stdarg.h>
73823 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
73824 #include <linux/types.h>
73825 @@ -533,7 +536,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
73826 char sym[KSYM_SYMBOL_LEN];
73827 if (ext == 'B')
73828 sprint_backtrace(sym, value);
73829 - else if (ext != 'f' && ext != 's')
73830 + else if (ext != 'f' && ext != 's' && ext != 'a')
73831 sprint_symbol(sym, value);
73832 else
73833 sprint_symbol_no_offset(sym, value);
73834 @@ -966,7 +969,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
73835 return number(buf, end, *(const netdev_features_t *)addr, spec);
73836 }
73837
73838 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73839 +int kptr_restrict __read_mostly = 2;
73840 +#else
73841 int kptr_restrict __read_mostly;
73842 +#endif
73843
73844 /*
73845 * Show a '%p' thing. A kernel extension is that the '%p' is followed
73846 @@ -980,6 +987,8 @@ int kptr_restrict __read_mostly;
73847 * - 'S' For symbolic direct pointers with offset
73848 * - 's' For symbolic direct pointers without offset
73849 * - 'B' For backtraced symbolic direct pointers with offset
73850 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
73851 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
73852 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
73853 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
73854 * - 'M' For a 6-byte MAC address, it prints the address in the
73855 @@ -1035,12 +1044,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73856
73857 if (!ptr && *fmt != 'K') {
73858 /*
73859 - * Print (null) with the same width as a pointer so it makes
73860 + * Print (nil) with the same width as a pointer so it makes
73861 * tabular output look nice.
73862 */
73863 if (spec.field_width == -1)
73864 spec.field_width = default_width;
73865 - return string(buf, end, "(null)", spec);
73866 + return string(buf, end, "(nil)", spec);
73867 }
73868
73869 switch (*fmt) {
73870 @@ -1050,6 +1059,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73871 /* Fallthrough */
73872 case 'S':
73873 case 's':
73874 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73875 + break;
73876 +#else
73877 + return symbol_string(buf, end, ptr, spec, *fmt);
73878 +#endif
73879 + case 'A':
73880 + case 'a':
73881 case 'B':
73882 return symbol_string(buf, end, ptr, spec, *fmt);
73883 case 'R':
73884 @@ -1090,6 +1106,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73885 va_end(va);
73886 return buf;
73887 }
73888 + case 'P':
73889 + break;
73890 case 'K':
73891 /*
73892 * %pK cannot be used in IRQ context because its test
73893 @@ -1113,6 +1131,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73894 }
73895 break;
73896 }
73897 +
73898 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73899 + /* 'P' = approved pointers to copy to userland,
73900 + as in the /proc/kallsyms case, as we make it display nothing
73901 + for non-root users, and the real contents for root users
73902 + Also ignore 'K' pointers, since we force their NULLing for non-root users
73903 + above
73904 + */
73905 + if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
73906 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
73907 + dump_stack();
73908 + ptr = NULL;
73909 + }
73910 +#endif
73911 +
73912 spec.flags |= SMALL;
73913 if (spec.field_width == -1) {
73914 spec.field_width = default_width;
73915 @@ -1831,11 +1864,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73916 typeof(type) value; \
73917 if (sizeof(type) == 8) { \
73918 args = PTR_ALIGN(args, sizeof(u32)); \
73919 - *(u32 *)&value = *(u32 *)args; \
73920 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
73921 + *(u32 *)&value = *(const u32 *)args; \
73922 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
73923 } else { \
73924 args = PTR_ALIGN(args, sizeof(type)); \
73925 - value = *(typeof(type) *)args; \
73926 + value = *(const typeof(type) *)args; \
73927 } \
73928 args += sizeof(type); \
73929 value; \
73930 @@ -1898,7 +1931,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73931 case FORMAT_TYPE_STR: {
73932 const char *str_arg = args;
73933 args += strlen(str_arg) + 1;
73934 - str = string(str, end, (char *)str_arg, spec);
73935 + str = string(str, end, str_arg, spec);
73936 break;
73937 }
73938
73939 diff --git a/localversion-grsec b/localversion-grsec
73940 new file mode 100644
73941 index 0000000..7cd6065
73942 --- /dev/null
73943 +++ b/localversion-grsec
73944 @@ -0,0 +1 @@
73945 +-grsec
73946 diff --git a/mm/Kconfig b/mm/Kconfig
73947 index a3f8ddd..f31e92e 100644
73948 --- a/mm/Kconfig
73949 +++ b/mm/Kconfig
73950 @@ -252,10 +252,10 @@ config KSM
73951 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
73952
73953 config DEFAULT_MMAP_MIN_ADDR
73954 - int "Low address space to protect from user allocation"
73955 + int "Low address space to protect from user allocation"
73956 depends on MMU
73957 - default 4096
73958 - help
73959 + default 65536
73960 + help
73961 This is the portion of low virtual memory which should be protected
73962 from userspace allocation. Keeping a user from writing to low pages
73963 can help reduce the impact of kernel NULL pointer bugs.
73964 @@ -286,7 +286,7 @@ config MEMORY_FAILURE
73965
73966 config HWPOISON_INJECT
73967 tristate "HWPoison pages injector"
73968 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
73969 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
73970 select PROC_PAGE_MONITOR
73971
73972 config NOMMU_INITIAL_TRIM_EXCESS
73973 diff --git a/mm/filemap.c b/mm/filemap.c
73974 index 83efee7..3f99381 100644
73975 --- a/mm/filemap.c
73976 +++ b/mm/filemap.c
73977 @@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
73978 struct address_space *mapping = file->f_mapping;
73979
73980 if (!mapping->a_ops->readpage)
73981 - return -ENOEXEC;
73982 + return -ENODEV;
73983 file_accessed(file);
73984 vma->vm_ops = &generic_file_vm_ops;
73985 return 0;
73986 @@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
73987 *pos = i_size_read(inode);
73988
73989 if (limit != RLIM_INFINITY) {
73990 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
73991 if (*pos >= limit) {
73992 send_sig(SIGXFSZ, current, 0);
73993 return -EFBIG;
73994 diff --git a/mm/fremap.c b/mm/fremap.c
73995 index a0aaf0e..20325c3 100644
73996 --- a/mm/fremap.c
73997 +++ b/mm/fremap.c
73998 @@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
73999 retry:
74000 vma = find_vma(mm, start);
74001
74002 +#ifdef CONFIG_PAX_SEGMEXEC
74003 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
74004 + goto out;
74005 +#endif
74006 +
74007 /*
74008 * Make sure the vma is shared, that it supports prefaulting,
74009 * and that the remapped range is valid and fully within
74010 diff --git a/mm/highmem.c b/mm/highmem.c
74011 index 2da13a5..9568fec 100644
74012 --- a/mm/highmem.c
74013 +++ b/mm/highmem.c
74014 @@ -137,9 +137,10 @@ static void flush_all_zero_pkmaps(void)
74015 * So no dangers, even with speculative execution.
74016 */
74017 page = pte_page(pkmap_page_table[i]);
74018 + pax_open_kernel();
74019 pte_clear(&init_mm, (unsigned long)page_address(page),
74020 &pkmap_page_table[i]);
74021 -
74022 + pax_close_kernel();
74023 set_page_address(page, NULL);
74024 need_flush = 1;
74025 }
74026 @@ -198,9 +199,11 @@ start:
74027 }
74028 }
74029 vaddr = PKMAP_ADDR(last_pkmap_nr);
74030 +
74031 + pax_open_kernel();
74032 set_pte_at(&init_mm, vaddr,
74033 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
74034 -
74035 + pax_close_kernel();
74036 pkmap_count[last_pkmap_nr] = 1;
74037 set_page_address(page, (void *)vaddr);
74038
74039 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
74040 index 40f17c3..c1cc011 100644
74041 --- a/mm/huge_memory.c
74042 +++ b/mm/huge_memory.c
74043 @@ -710,7 +710,7 @@ out:
74044 * run pte_offset_map on the pmd, if an huge pmd could
74045 * materialize from under us from a different thread.
74046 */
74047 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
74048 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
74049 return VM_FAULT_OOM;
74050 /* if an huge pmd materialized from under us just retry later */
74051 if (unlikely(pmd_trans_huge(*pmd)))
74052 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
74053 index 59a0059..b3f3d86 100644
74054 --- a/mm/hugetlb.c
74055 +++ b/mm/hugetlb.c
74056 @@ -2518,6 +2518,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
74057 return 1;
74058 }
74059
74060 +#ifdef CONFIG_PAX_SEGMEXEC
74061 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
74062 +{
74063 + struct mm_struct *mm = vma->vm_mm;
74064 + struct vm_area_struct *vma_m;
74065 + unsigned long address_m;
74066 + pte_t *ptep_m;
74067 +
74068 + vma_m = pax_find_mirror_vma(vma);
74069 + if (!vma_m)
74070 + return;
74071 +
74072 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74073 + address_m = address + SEGMEXEC_TASK_SIZE;
74074 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
74075 + get_page(page_m);
74076 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
74077 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
74078 +}
74079 +#endif
74080 +
74081 /*
74082 * Hugetlb_cow() should be called with page lock of the original hugepage held.
74083 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
74084 @@ -2636,6 +2657,11 @@ retry_avoidcopy:
74085 make_huge_pte(vma, new_page, 1));
74086 page_remove_rmap(old_page);
74087 hugepage_add_new_anon_rmap(new_page, vma, address);
74088 +
74089 +#ifdef CONFIG_PAX_SEGMEXEC
74090 + pax_mirror_huge_pte(vma, address, new_page);
74091 +#endif
74092 +
74093 /* Make the old page be freed below */
74094 new_page = old_page;
74095 }
74096 @@ -2795,6 +2821,10 @@ retry:
74097 && (vma->vm_flags & VM_SHARED)));
74098 set_huge_pte_at(mm, address, ptep, new_pte);
74099
74100 +#ifdef CONFIG_PAX_SEGMEXEC
74101 + pax_mirror_huge_pte(vma, address, page);
74102 +#endif
74103 +
74104 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
74105 /* Optimization, do the COW without a second fault */
74106 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
74107 @@ -2824,6 +2854,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74108 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
74109 struct hstate *h = hstate_vma(vma);
74110
74111 +#ifdef CONFIG_PAX_SEGMEXEC
74112 + struct vm_area_struct *vma_m;
74113 +#endif
74114 +
74115 address &= huge_page_mask(h);
74116
74117 ptep = huge_pte_offset(mm, address);
74118 @@ -2837,6 +2871,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74119 VM_FAULT_SET_HINDEX(hstate_index(h));
74120 }
74121
74122 +#ifdef CONFIG_PAX_SEGMEXEC
74123 + vma_m = pax_find_mirror_vma(vma);
74124 + if (vma_m) {
74125 + unsigned long address_m;
74126 +
74127 + if (vma->vm_start > vma_m->vm_start) {
74128 + address_m = address;
74129 + address -= SEGMEXEC_TASK_SIZE;
74130 + vma = vma_m;
74131 + h = hstate_vma(vma);
74132 + } else
74133 + address_m = address + SEGMEXEC_TASK_SIZE;
74134 +
74135 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
74136 + return VM_FAULT_OOM;
74137 + address_m &= HPAGE_MASK;
74138 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
74139 + }
74140 +#endif
74141 +
74142 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
74143 if (!ptep)
74144 return VM_FAULT_OOM;
74145 diff --git a/mm/internal.h b/mm/internal.h
74146 index a4fa284..9a02499 100644
74147 --- a/mm/internal.h
74148 +++ b/mm/internal.h
74149 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
74150 * in mm/page_alloc.c
74151 */
74152 extern void __free_pages_bootmem(struct page *page, unsigned int order);
74153 +extern void free_compound_page(struct page *page);
74154 extern void prep_compound_page(struct page *page, unsigned long order);
74155 #ifdef CONFIG_MEMORY_FAILURE
74156 extern bool is_free_buddy_page(struct page *page);
74157 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
74158 index a217cc5..74c9ec0 100644
74159 --- a/mm/kmemleak.c
74160 +++ b/mm/kmemleak.c
74161 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
74162
74163 for (i = 0; i < object->trace_len; i++) {
74164 void *ptr = (void *)object->trace[i];
74165 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
74166 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
74167 }
74168 }
74169
74170 diff --git a/mm/maccess.c b/mm/maccess.c
74171 index d53adf9..03a24bf 100644
74172 --- a/mm/maccess.c
74173 +++ b/mm/maccess.c
74174 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
74175 set_fs(KERNEL_DS);
74176 pagefault_disable();
74177 ret = __copy_from_user_inatomic(dst,
74178 - (__force const void __user *)src, size);
74179 + (const void __force_user *)src, size);
74180 pagefault_enable();
74181 set_fs(old_fs);
74182
74183 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
74184
74185 set_fs(KERNEL_DS);
74186 pagefault_disable();
74187 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
74188 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
74189 pagefault_enable();
74190 set_fs(old_fs);
74191
74192 diff --git a/mm/madvise.c b/mm/madvise.c
74193 index 03dfa5c..b032917 100644
74194 --- a/mm/madvise.c
74195 +++ b/mm/madvise.c
74196 @@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
74197 pgoff_t pgoff;
74198 unsigned long new_flags = vma->vm_flags;
74199
74200 +#ifdef CONFIG_PAX_SEGMEXEC
74201 + struct vm_area_struct *vma_m;
74202 +#endif
74203 +
74204 switch (behavior) {
74205 case MADV_NORMAL:
74206 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
74207 @@ -123,6 +127,13 @@ success:
74208 /*
74209 * vm_flags is protected by the mmap_sem held in write mode.
74210 */
74211 +
74212 +#ifdef CONFIG_PAX_SEGMEXEC
74213 + vma_m = pax_find_mirror_vma(vma);
74214 + if (vma_m)
74215 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
74216 +#endif
74217 +
74218 vma->vm_flags = new_flags;
74219
74220 out:
74221 @@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
74222 struct vm_area_struct ** prev,
74223 unsigned long start, unsigned long end)
74224 {
74225 +
74226 +#ifdef CONFIG_PAX_SEGMEXEC
74227 + struct vm_area_struct *vma_m;
74228 +#endif
74229 +
74230 *prev = vma;
74231 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
74232 return -EINVAL;
74233 @@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
74234 zap_page_range(vma, start, end - start, &details);
74235 } else
74236 zap_page_range(vma, start, end - start, NULL);
74237 +
74238 +#ifdef CONFIG_PAX_SEGMEXEC
74239 + vma_m = pax_find_mirror_vma(vma);
74240 + if (vma_m) {
74241 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
74242 + struct zap_details details = {
74243 + .nonlinear_vma = vma_m,
74244 + .last_index = ULONG_MAX,
74245 + };
74246 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
74247 + } else
74248 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
74249 + }
74250 +#endif
74251 +
74252 return 0;
74253 }
74254
74255 @@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
74256 if (end < start)
74257 goto out;
74258
74259 +#ifdef CONFIG_PAX_SEGMEXEC
74260 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
74261 + if (end > SEGMEXEC_TASK_SIZE)
74262 + goto out;
74263 + } else
74264 +#endif
74265 +
74266 + if (end > TASK_SIZE)
74267 + goto out;
74268 +
74269 error = 0;
74270 if (end == start)
74271 goto out;
74272 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
74273 index 8b20278..05dac18 100644
74274 --- a/mm/memory-failure.c
74275 +++ b/mm/memory-failure.c
74276 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
74277
74278 int sysctl_memory_failure_recovery __read_mostly = 1;
74279
74280 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
74281 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
74282
74283 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
74284
74285 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
74286 pfn, t->comm, t->pid);
74287 si.si_signo = SIGBUS;
74288 si.si_errno = 0;
74289 - si.si_addr = (void *)addr;
74290 + si.si_addr = (void __user *)addr;
74291 #ifdef __ARCH_SI_TRAPNO
74292 si.si_trapno = trapno;
74293 #endif
74294 @@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74295 }
74296
74297 nr_pages = 1 << compound_trans_order(hpage);
74298 - atomic_long_add(nr_pages, &mce_bad_pages);
74299 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
74300
74301 /*
74302 * We need/can do nothing about count=0 pages.
74303 @@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74304 if (!PageHWPoison(hpage)
74305 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
74306 || (p != hpage && TestSetPageHWPoison(hpage))) {
74307 - atomic_long_sub(nr_pages, &mce_bad_pages);
74308 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74309 return 0;
74310 }
74311 set_page_hwpoison_huge_page(hpage);
74312 @@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74313 }
74314 if (hwpoison_filter(p)) {
74315 if (TestClearPageHWPoison(p))
74316 - atomic_long_sub(nr_pages, &mce_bad_pages);
74317 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74318 unlock_page(hpage);
74319 put_page(hpage);
74320 return 0;
74321 @@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
74322 return 0;
74323 }
74324 if (TestClearPageHWPoison(p))
74325 - atomic_long_sub(nr_pages, &mce_bad_pages);
74326 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74327 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
74328 return 0;
74329 }
74330 @@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
74331 */
74332 if (TestClearPageHWPoison(page)) {
74333 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
74334 - atomic_long_sub(nr_pages, &mce_bad_pages);
74335 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74336 freeit = 1;
74337 if (PageHuge(page))
74338 clear_page_hwpoison_huge_page(page);
74339 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
74340 }
74341 done:
74342 if (!PageHWPoison(hpage))
74343 - atomic_long_add(1 << compound_trans_order(hpage),
74344 + atomic_long_add_unchecked(1 << compound_trans_order(hpage),
74345 &mce_bad_pages);
74346 set_page_hwpoison_huge_page(hpage);
74347 dequeue_hwpoisoned_huge_page(hpage);
74348 @@ -1582,7 +1582,7 @@ int soft_offline_page(struct page *page, int flags)
74349 return ret;
74350
74351 done:
74352 - atomic_long_add(1, &mce_bad_pages);
74353 + atomic_long_add_unchecked(1, &mce_bad_pages);
74354 SetPageHWPoison(page);
74355 /* keep elevated page count for bad page */
74356 return ret;
74357 diff --git a/mm/memory.c b/mm/memory.c
74358 index 221fc9f..d1d4db1 100644
74359 --- a/mm/memory.c
74360 +++ b/mm/memory.c
74361 @@ -426,6 +426,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74362 free_pte_range(tlb, pmd, addr);
74363 } while (pmd++, addr = next, addr != end);
74364
74365 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
74366 start &= PUD_MASK;
74367 if (start < floor)
74368 return;
74369 @@ -440,6 +441,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74370 pmd = pmd_offset(pud, start);
74371 pud_clear(pud);
74372 pmd_free_tlb(tlb, pmd, start);
74373 +#endif
74374 +
74375 }
74376
74377 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74378 @@ -459,6 +462,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74379 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
74380 } while (pud++, addr = next, addr != end);
74381
74382 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
74383 start &= PGDIR_MASK;
74384 if (start < floor)
74385 return;
74386 @@ -473,6 +477,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74387 pud = pud_offset(pgd, start);
74388 pgd_clear(pgd);
74389 pud_free_tlb(tlb, pud, start);
74390 +#endif
74391 +
74392 }
74393
74394 /*
74395 @@ -1621,12 +1627,6 @@ no_page_table:
74396 return page;
74397 }
74398
74399 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
74400 -{
74401 - return stack_guard_page_start(vma, addr) ||
74402 - stack_guard_page_end(vma, addr+PAGE_SIZE);
74403 -}
74404 -
74405 /**
74406 * __get_user_pages() - pin user pages in memory
74407 * @tsk: task_struct of target task
74408 @@ -1699,10 +1699,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74409 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
74410 i = 0;
74411
74412 - do {
74413 + while (nr_pages) {
74414 struct vm_area_struct *vma;
74415
74416 - vma = find_extend_vma(mm, start);
74417 + vma = find_vma(mm, start);
74418 if (!vma && in_gate_area(mm, start)) {
74419 unsigned long pg = start & PAGE_MASK;
74420 pgd_t *pgd;
74421 @@ -1750,7 +1750,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74422 goto next_page;
74423 }
74424
74425 - if (!vma ||
74426 + if (!vma || start < vma->vm_start ||
74427 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
74428 !(vm_flags & vma->vm_flags))
74429 return i ? : -EFAULT;
74430 @@ -1777,11 +1777,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74431 int ret;
74432 unsigned int fault_flags = 0;
74433
74434 - /* For mlock, just skip the stack guard page. */
74435 - if (foll_flags & FOLL_MLOCK) {
74436 - if (stack_guard_page(vma, start))
74437 - goto next_page;
74438 - }
74439 if (foll_flags & FOLL_WRITE)
74440 fault_flags |= FAULT_FLAG_WRITE;
74441 if (nonblocking)
74442 @@ -1855,7 +1850,7 @@ next_page:
74443 start += PAGE_SIZE;
74444 nr_pages--;
74445 } while (nr_pages && start < vma->vm_end);
74446 - } while (nr_pages);
74447 + }
74448 return i;
74449 }
74450 EXPORT_SYMBOL(__get_user_pages);
74451 @@ -2062,6 +2057,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
74452 page_add_file_rmap(page);
74453 set_pte_at(mm, addr, pte, mk_pte(page, prot));
74454
74455 +#ifdef CONFIG_PAX_SEGMEXEC
74456 + pax_mirror_file_pte(vma, addr, page, ptl);
74457 +#endif
74458 +
74459 retval = 0;
74460 pte_unmap_unlock(pte, ptl);
74461 return retval;
74462 @@ -2106,9 +2105,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
74463 if (!page_count(page))
74464 return -EINVAL;
74465 if (!(vma->vm_flags & VM_MIXEDMAP)) {
74466 +
74467 +#ifdef CONFIG_PAX_SEGMEXEC
74468 + struct vm_area_struct *vma_m;
74469 +#endif
74470 +
74471 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
74472 BUG_ON(vma->vm_flags & VM_PFNMAP);
74473 vma->vm_flags |= VM_MIXEDMAP;
74474 +
74475 +#ifdef CONFIG_PAX_SEGMEXEC
74476 + vma_m = pax_find_mirror_vma(vma);
74477 + if (vma_m)
74478 + vma_m->vm_flags |= VM_MIXEDMAP;
74479 +#endif
74480 +
74481 }
74482 return insert_page(vma, addr, page, vma->vm_page_prot);
74483 }
74484 @@ -2191,6 +2202,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
74485 unsigned long pfn)
74486 {
74487 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
74488 + BUG_ON(vma->vm_mirror);
74489
74490 if (addr < vma->vm_start || addr >= vma->vm_end)
74491 return -EFAULT;
74492 @@ -2391,7 +2403,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
74493
74494 BUG_ON(pud_huge(*pud));
74495
74496 - pmd = pmd_alloc(mm, pud, addr);
74497 + pmd = (mm == &init_mm) ?
74498 + pmd_alloc_kernel(mm, pud, addr) :
74499 + pmd_alloc(mm, pud, addr);
74500 if (!pmd)
74501 return -ENOMEM;
74502 do {
74503 @@ -2411,7 +2425,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
74504 unsigned long next;
74505 int err;
74506
74507 - pud = pud_alloc(mm, pgd, addr);
74508 + pud = (mm == &init_mm) ?
74509 + pud_alloc_kernel(mm, pgd, addr) :
74510 + pud_alloc(mm, pgd, addr);
74511 if (!pud)
74512 return -ENOMEM;
74513 do {
74514 @@ -2499,6 +2515,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
74515 copy_user_highpage(dst, src, va, vma);
74516 }
74517
74518 +#ifdef CONFIG_PAX_SEGMEXEC
74519 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
74520 +{
74521 + struct mm_struct *mm = vma->vm_mm;
74522 + spinlock_t *ptl;
74523 + pte_t *pte, entry;
74524 +
74525 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
74526 + entry = *pte;
74527 + if (!pte_present(entry)) {
74528 + if (!pte_none(entry)) {
74529 + BUG_ON(pte_file(entry));
74530 + free_swap_and_cache(pte_to_swp_entry(entry));
74531 + pte_clear_not_present_full(mm, address, pte, 0);
74532 + }
74533 + } else {
74534 + struct page *page;
74535 +
74536 + flush_cache_page(vma, address, pte_pfn(entry));
74537 + entry = ptep_clear_flush(vma, address, pte);
74538 + BUG_ON(pte_dirty(entry));
74539 + page = vm_normal_page(vma, address, entry);
74540 + if (page) {
74541 + update_hiwater_rss(mm);
74542 + if (PageAnon(page))
74543 + dec_mm_counter_fast(mm, MM_ANONPAGES);
74544 + else
74545 + dec_mm_counter_fast(mm, MM_FILEPAGES);
74546 + page_remove_rmap(page);
74547 + page_cache_release(page);
74548 + }
74549 + }
74550 + pte_unmap_unlock(pte, ptl);
74551 +}
74552 +
74553 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
74554 + *
74555 + * the ptl of the lower mapped page is held on entry and is not released on exit
74556 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
74557 + */
74558 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74559 +{
74560 + struct mm_struct *mm = vma->vm_mm;
74561 + unsigned long address_m;
74562 + spinlock_t *ptl_m;
74563 + struct vm_area_struct *vma_m;
74564 + pmd_t *pmd_m;
74565 + pte_t *pte_m, entry_m;
74566 +
74567 + BUG_ON(!page_m || !PageAnon(page_m));
74568 +
74569 + vma_m = pax_find_mirror_vma(vma);
74570 + if (!vma_m)
74571 + return;
74572 +
74573 + BUG_ON(!PageLocked(page_m));
74574 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74575 + address_m = address + SEGMEXEC_TASK_SIZE;
74576 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74577 + pte_m = pte_offset_map(pmd_m, address_m);
74578 + ptl_m = pte_lockptr(mm, pmd_m);
74579 + if (ptl != ptl_m) {
74580 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74581 + if (!pte_none(*pte_m))
74582 + goto out;
74583 + }
74584 +
74585 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74586 + page_cache_get(page_m);
74587 + page_add_anon_rmap(page_m, vma_m, address_m);
74588 + inc_mm_counter_fast(mm, MM_ANONPAGES);
74589 + set_pte_at(mm, address_m, pte_m, entry_m);
74590 + update_mmu_cache(vma_m, address_m, entry_m);
74591 +out:
74592 + if (ptl != ptl_m)
74593 + spin_unlock(ptl_m);
74594 + pte_unmap(pte_m);
74595 + unlock_page(page_m);
74596 +}
74597 +
74598 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74599 +{
74600 + struct mm_struct *mm = vma->vm_mm;
74601 + unsigned long address_m;
74602 + spinlock_t *ptl_m;
74603 + struct vm_area_struct *vma_m;
74604 + pmd_t *pmd_m;
74605 + pte_t *pte_m, entry_m;
74606 +
74607 + BUG_ON(!page_m || PageAnon(page_m));
74608 +
74609 + vma_m = pax_find_mirror_vma(vma);
74610 + if (!vma_m)
74611 + return;
74612 +
74613 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74614 + address_m = address + SEGMEXEC_TASK_SIZE;
74615 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74616 + pte_m = pte_offset_map(pmd_m, address_m);
74617 + ptl_m = pte_lockptr(mm, pmd_m);
74618 + if (ptl != ptl_m) {
74619 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74620 + if (!pte_none(*pte_m))
74621 + goto out;
74622 + }
74623 +
74624 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74625 + page_cache_get(page_m);
74626 + page_add_file_rmap(page_m);
74627 + inc_mm_counter_fast(mm, MM_FILEPAGES);
74628 + set_pte_at(mm, address_m, pte_m, entry_m);
74629 + update_mmu_cache(vma_m, address_m, entry_m);
74630 +out:
74631 + if (ptl != ptl_m)
74632 + spin_unlock(ptl_m);
74633 + pte_unmap(pte_m);
74634 +}
74635 +
74636 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
74637 +{
74638 + struct mm_struct *mm = vma->vm_mm;
74639 + unsigned long address_m;
74640 + spinlock_t *ptl_m;
74641 + struct vm_area_struct *vma_m;
74642 + pmd_t *pmd_m;
74643 + pte_t *pte_m, entry_m;
74644 +
74645 + vma_m = pax_find_mirror_vma(vma);
74646 + if (!vma_m)
74647 + return;
74648 +
74649 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74650 + address_m = address + SEGMEXEC_TASK_SIZE;
74651 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74652 + pte_m = pte_offset_map(pmd_m, address_m);
74653 + ptl_m = pte_lockptr(mm, pmd_m);
74654 + if (ptl != ptl_m) {
74655 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74656 + if (!pte_none(*pte_m))
74657 + goto out;
74658 + }
74659 +
74660 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
74661 + set_pte_at(mm, address_m, pte_m, entry_m);
74662 +out:
74663 + if (ptl != ptl_m)
74664 + spin_unlock(ptl_m);
74665 + pte_unmap(pte_m);
74666 +}
74667 +
74668 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
74669 +{
74670 + struct page *page_m;
74671 + pte_t entry;
74672 +
74673 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
74674 + goto out;
74675 +
74676 + entry = *pte;
74677 + page_m = vm_normal_page(vma, address, entry);
74678 + if (!page_m)
74679 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
74680 + else if (PageAnon(page_m)) {
74681 + if (pax_find_mirror_vma(vma)) {
74682 + pte_unmap_unlock(pte, ptl);
74683 + lock_page(page_m);
74684 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
74685 + if (pte_same(entry, *pte))
74686 + pax_mirror_anon_pte(vma, address, page_m, ptl);
74687 + else
74688 + unlock_page(page_m);
74689 + }
74690 + } else
74691 + pax_mirror_file_pte(vma, address, page_m, ptl);
74692 +
74693 +out:
74694 + pte_unmap_unlock(pte, ptl);
74695 +}
74696 +#endif
74697 +
74698 /*
74699 * This routine handles present pages, when users try to write
74700 * to a shared page. It is done by copying the page to a new address
74701 @@ -2715,6 +2911,12 @@ gotten:
74702 */
74703 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74704 if (likely(pte_same(*page_table, orig_pte))) {
74705 +
74706 +#ifdef CONFIG_PAX_SEGMEXEC
74707 + if (pax_find_mirror_vma(vma))
74708 + BUG_ON(!trylock_page(new_page));
74709 +#endif
74710 +
74711 if (old_page) {
74712 if (!PageAnon(old_page)) {
74713 dec_mm_counter_fast(mm, MM_FILEPAGES);
74714 @@ -2766,6 +2968,10 @@ gotten:
74715 page_remove_rmap(old_page);
74716 }
74717
74718 +#ifdef CONFIG_PAX_SEGMEXEC
74719 + pax_mirror_anon_pte(vma, address, new_page, ptl);
74720 +#endif
74721 +
74722 /* Free the old page.. */
74723 new_page = old_page;
74724 ret |= VM_FAULT_WRITE;
74725 @@ -3046,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74726 swap_free(entry);
74727 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
74728 try_to_free_swap(page);
74729 +
74730 +#ifdef CONFIG_PAX_SEGMEXEC
74731 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
74732 +#endif
74733 +
74734 unlock_page(page);
74735 if (swapcache) {
74736 /*
74737 @@ -3069,6 +3280,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74738
74739 /* No need to invalidate - it was non-present before */
74740 update_mmu_cache(vma, address, page_table);
74741 +
74742 +#ifdef CONFIG_PAX_SEGMEXEC
74743 + pax_mirror_anon_pte(vma, address, page, ptl);
74744 +#endif
74745 +
74746 unlock:
74747 pte_unmap_unlock(page_table, ptl);
74748 out:
74749 @@ -3088,40 +3304,6 @@ out_release:
74750 }
74751
74752 /*
74753 - * This is like a special single-page "expand_{down|up}wards()",
74754 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
74755 - * doesn't hit another vma.
74756 - */
74757 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
74758 -{
74759 - address &= PAGE_MASK;
74760 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
74761 - struct vm_area_struct *prev = vma->vm_prev;
74762 -
74763 - /*
74764 - * Is there a mapping abutting this one below?
74765 - *
74766 - * That's only ok if it's the same stack mapping
74767 - * that has gotten split..
74768 - */
74769 - if (prev && prev->vm_end == address)
74770 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
74771 -
74772 - expand_downwards(vma, address - PAGE_SIZE);
74773 - }
74774 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
74775 - struct vm_area_struct *next = vma->vm_next;
74776 -
74777 - /* As VM_GROWSDOWN but s/below/above/ */
74778 - if (next && next->vm_start == address + PAGE_SIZE)
74779 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
74780 -
74781 - expand_upwards(vma, address + PAGE_SIZE);
74782 - }
74783 - return 0;
74784 -}
74785 -
74786 -/*
74787 * We enter with non-exclusive mmap_sem (to exclude vma changes,
74788 * but allow concurrent faults), and pte mapped but not yet locked.
74789 * We return with mmap_sem still held, but pte unmapped and unlocked.
74790 @@ -3130,27 +3312,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74791 unsigned long address, pte_t *page_table, pmd_t *pmd,
74792 unsigned int flags)
74793 {
74794 - struct page *page;
74795 + struct page *page = NULL;
74796 spinlock_t *ptl;
74797 pte_t entry;
74798
74799 - pte_unmap(page_table);
74800 -
74801 - /* Check if we need to add a guard page to the stack */
74802 - if (check_stack_guard_page(vma, address) < 0)
74803 - return VM_FAULT_SIGBUS;
74804 -
74805 - /* Use the zero-page for reads */
74806 if (!(flags & FAULT_FLAG_WRITE)) {
74807 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
74808 vma->vm_page_prot));
74809 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74810 + ptl = pte_lockptr(mm, pmd);
74811 + spin_lock(ptl);
74812 if (!pte_none(*page_table))
74813 goto unlock;
74814 goto setpte;
74815 }
74816
74817 /* Allocate our own private page. */
74818 + pte_unmap(page_table);
74819 +
74820 if (unlikely(anon_vma_prepare(vma)))
74821 goto oom;
74822 page = alloc_zeroed_user_highpage_movable(vma, address);
74823 @@ -3169,6 +3347,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74824 if (!pte_none(*page_table))
74825 goto release;
74826
74827 +#ifdef CONFIG_PAX_SEGMEXEC
74828 + if (pax_find_mirror_vma(vma))
74829 + BUG_ON(!trylock_page(page));
74830 +#endif
74831 +
74832 inc_mm_counter_fast(mm, MM_ANONPAGES);
74833 page_add_new_anon_rmap(page, vma, address);
74834 setpte:
74835 @@ -3176,6 +3359,12 @@ setpte:
74836
74837 /* No need to invalidate - it was non-present before */
74838 update_mmu_cache(vma, address, page_table);
74839 +
74840 +#ifdef CONFIG_PAX_SEGMEXEC
74841 + if (page)
74842 + pax_mirror_anon_pte(vma, address, page, ptl);
74843 +#endif
74844 +
74845 unlock:
74846 pte_unmap_unlock(page_table, ptl);
74847 return 0;
74848 @@ -3319,6 +3508,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74849 */
74850 /* Only go through if we didn't race with anybody else... */
74851 if (likely(pte_same(*page_table, orig_pte))) {
74852 +
74853 +#ifdef CONFIG_PAX_SEGMEXEC
74854 + if (anon && pax_find_mirror_vma(vma))
74855 + BUG_ON(!trylock_page(page));
74856 +#endif
74857 +
74858 flush_icache_page(vma, page);
74859 entry = mk_pte(page, vma->vm_page_prot);
74860 if (flags & FAULT_FLAG_WRITE)
74861 @@ -3338,6 +3533,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74862
74863 /* no need to invalidate: a not-present page won't be cached */
74864 update_mmu_cache(vma, address, page_table);
74865 +
74866 +#ifdef CONFIG_PAX_SEGMEXEC
74867 + if (anon)
74868 + pax_mirror_anon_pte(vma, address, page, ptl);
74869 + else
74870 + pax_mirror_file_pte(vma, address, page, ptl);
74871 +#endif
74872 +
74873 } else {
74874 if (cow_page)
74875 mem_cgroup_uncharge_page(cow_page);
74876 @@ -3492,6 +3695,12 @@ int handle_pte_fault(struct mm_struct *mm,
74877 if (flags & FAULT_FLAG_WRITE)
74878 flush_tlb_fix_spurious_fault(vma, address);
74879 }
74880 +
74881 +#ifdef CONFIG_PAX_SEGMEXEC
74882 + pax_mirror_pte(vma, address, pte, pmd, ptl);
74883 + return 0;
74884 +#endif
74885 +
74886 unlock:
74887 pte_unmap_unlock(pte, ptl);
74888 return 0;
74889 @@ -3508,6 +3717,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74890 pmd_t *pmd;
74891 pte_t *pte;
74892
74893 +#ifdef CONFIG_PAX_SEGMEXEC
74894 + struct vm_area_struct *vma_m;
74895 +#endif
74896 +
74897 __set_current_state(TASK_RUNNING);
74898
74899 count_vm_event(PGFAULT);
74900 @@ -3519,6 +3732,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74901 if (unlikely(is_vm_hugetlb_page(vma)))
74902 return hugetlb_fault(mm, vma, address, flags);
74903
74904 +#ifdef CONFIG_PAX_SEGMEXEC
74905 + vma_m = pax_find_mirror_vma(vma);
74906 + if (vma_m) {
74907 + unsigned long address_m;
74908 + pgd_t *pgd_m;
74909 + pud_t *pud_m;
74910 + pmd_t *pmd_m;
74911 +
74912 + if (vma->vm_start > vma_m->vm_start) {
74913 + address_m = address;
74914 + address -= SEGMEXEC_TASK_SIZE;
74915 + vma = vma_m;
74916 + } else
74917 + address_m = address + SEGMEXEC_TASK_SIZE;
74918 +
74919 + pgd_m = pgd_offset(mm, address_m);
74920 + pud_m = pud_alloc(mm, pgd_m, address_m);
74921 + if (!pud_m)
74922 + return VM_FAULT_OOM;
74923 + pmd_m = pmd_alloc(mm, pud_m, address_m);
74924 + if (!pmd_m)
74925 + return VM_FAULT_OOM;
74926 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
74927 + return VM_FAULT_OOM;
74928 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
74929 + }
74930 +#endif
74931 +
74932 retry:
74933 pgd = pgd_offset(mm, address);
74934 pud = pud_alloc(mm, pgd, address);
74935 @@ -3560,7 +3801,7 @@ retry:
74936 * run pte_offset_map on the pmd, if an huge pmd could
74937 * materialize from under us from a different thread.
74938 */
74939 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
74940 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
74941 return VM_FAULT_OOM;
74942 /* if an huge pmd materialized from under us just retry later */
74943 if (unlikely(pmd_trans_huge(*pmd)))
74944 @@ -3597,6 +3838,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74945 spin_unlock(&mm->page_table_lock);
74946 return 0;
74947 }
74948 +
74949 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74950 +{
74951 + pud_t *new = pud_alloc_one(mm, address);
74952 + if (!new)
74953 + return -ENOMEM;
74954 +
74955 + smp_wmb(); /* See comment in __pte_alloc */
74956 +
74957 + spin_lock(&mm->page_table_lock);
74958 + if (pgd_present(*pgd)) /* Another has populated it */
74959 + pud_free(mm, new);
74960 + else
74961 + pgd_populate_kernel(mm, pgd, new);
74962 + spin_unlock(&mm->page_table_lock);
74963 + return 0;
74964 +}
74965 #endif /* __PAGETABLE_PUD_FOLDED */
74966
74967 #ifndef __PAGETABLE_PMD_FOLDED
74968 @@ -3627,6 +3885,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
74969 spin_unlock(&mm->page_table_lock);
74970 return 0;
74971 }
74972 +
74973 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
74974 +{
74975 + pmd_t *new = pmd_alloc_one(mm, address);
74976 + if (!new)
74977 + return -ENOMEM;
74978 +
74979 + smp_wmb(); /* See comment in __pte_alloc */
74980 +
74981 + spin_lock(&mm->page_table_lock);
74982 +#ifndef __ARCH_HAS_4LEVEL_HACK
74983 + if (pud_present(*pud)) /* Another has populated it */
74984 + pmd_free(mm, new);
74985 + else
74986 + pud_populate_kernel(mm, pud, new);
74987 +#else
74988 + if (pgd_present(*pud)) /* Another has populated it */
74989 + pmd_free(mm, new);
74990 + else
74991 + pgd_populate_kernel(mm, pud, new);
74992 +#endif /* __ARCH_HAS_4LEVEL_HACK */
74993 + spin_unlock(&mm->page_table_lock);
74994 + return 0;
74995 +}
74996 #endif /* __PAGETABLE_PMD_FOLDED */
74997
74998 int make_pages_present(unsigned long addr, unsigned long end)
74999 @@ -3664,7 +3946,7 @@ static int __init gate_vma_init(void)
75000 gate_vma.vm_start = FIXADDR_USER_START;
75001 gate_vma.vm_end = FIXADDR_USER_END;
75002 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
75003 - gate_vma.vm_page_prot = __P101;
75004 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
75005
75006 return 0;
75007 }
75008 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
75009 index 4ea600d..1cd61c2 100644
75010 --- a/mm/mempolicy.c
75011 +++ b/mm/mempolicy.c
75012 @@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
75013 unsigned long vmstart;
75014 unsigned long vmend;
75015
75016 +#ifdef CONFIG_PAX_SEGMEXEC
75017 + struct vm_area_struct *vma_m;
75018 +#endif
75019 +
75020 vma = find_vma(mm, start);
75021 if (!vma || vma->vm_start > start)
75022 return -EFAULT;
75023 @@ -691,9 +695,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
75024 if (err)
75025 goto out;
75026 }
75027 +
75028 err = vma_replace_policy(vma, new_pol);
75029 if (err)
75030 goto out;
75031 +
75032 +#ifdef CONFIG_PAX_SEGMEXEC
75033 + vma_m = pax_find_mirror_vma(vma);
75034 + if (vma_m) {
75035 + err = vma_replace_policy(vma_m, new_pol);
75036 + if (err)
75037 + goto out;
75038 + }
75039 +#endif
75040 +
75041 }
75042
75043 out:
75044 @@ -1150,6 +1165,17 @@ static long do_mbind(unsigned long start, unsigned long len,
75045
75046 if (end < start)
75047 return -EINVAL;
75048 +
75049 +#ifdef CONFIG_PAX_SEGMEXEC
75050 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
75051 + if (end > SEGMEXEC_TASK_SIZE)
75052 + return -EINVAL;
75053 + } else
75054 +#endif
75055 +
75056 + if (end > TASK_SIZE)
75057 + return -EINVAL;
75058 +
75059 if (end == start)
75060 return 0;
75061
75062 @@ -1373,8 +1399,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
75063 */
75064 tcred = __task_cred(task);
75065 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
75066 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
75067 - !capable(CAP_SYS_NICE)) {
75068 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
75069 rcu_read_unlock();
75070 err = -EPERM;
75071 goto out_put;
75072 @@ -1405,6 +1430,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
75073 goto out;
75074 }
75075
75076 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75077 + if (mm != current->mm &&
75078 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
75079 + mmput(mm);
75080 + err = -EPERM;
75081 + goto out;
75082 + }
75083 +#endif
75084 +
75085 err = do_migrate_pages(mm, old, new,
75086 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
75087
75088 diff --git a/mm/migrate.c b/mm/migrate.c
75089 index 77ed2d7..317d528 100644
75090 --- a/mm/migrate.c
75091 +++ b/mm/migrate.c
75092 @@ -1350,8 +1350,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
75093 */
75094 tcred = __task_cred(task);
75095 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
75096 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
75097 - !capable(CAP_SYS_NICE)) {
75098 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
75099 rcu_read_unlock();
75100 err = -EPERM;
75101 goto out;
75102 diff --git a/mm/mlock.c b/mm/mlock.c
75103 index f0b9ce5..da8d069 100644
75104 --- a/mm/mlock.c
75105 +++ b/mm/mlock.c
75106 @@ -13,6 +13,7 @@
75107 #include <linux/pagemap.h>
75108 #include <linux/mempolicy.h>
75109 #include <linux/syscalls.h>
75110 +#include <linux/security.h>
75111 #include <linux/sched.h>
75112 #include <linux/export.h>
75113 #include <linux/rmap.h>
75114 @@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
75115 {
75116 unsigned long nstart, end, tmp;
75117 struct vm_area_struct * vma, * prev;
75118 - int error;
75119 + int error = 0;
75120
75121 VM_BUG_ON(start & ~PAGE_MASK);
75122 VM_BUG_ON(len != PAGE_ALIGN(len));
75123 @@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
75124 return -EINVAL;
75125 if (end == start)
75126 return 0;
75127 + if (end > TASK_SIZE)
75128 + return -EINVAL;
75129 +
75130 vma = find_vma(current->mm, start);
75131 if (!vma || vma->vm_start > start)
75132 return -ENOMEM;
75133 @@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
75134 for (nstart = start ; ; ) {
75135 vm_flags_t newflags;
75136
75137 +#ifdef CONFIG_PAX_SEGMEXEC
75138 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
75139 + break;
75140 +#endif
75141 +
75142 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
75143
75144 newflags = vma->vm_flags | VM_LOCKED;
75145 @@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
75146 lock_limit >>= PAGE_SHIFT;
75147
75148 /* check against resource limits */
75149 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
75150 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
75151 error = do_mlock(start, len, 1);
75152 up_write(&current->mm->mmap_sem);
75153 @@ -517,17 +527,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
75154 static int do_mlockall(int flags)
75155 {
75156 struct vm_area_struct * vma, * prev = NULL;
75157 - unsigned int def_flags = 0;
75158
75159 if (flags & MCL_FUTURE)
75160 - def_flags = VM_LOCKED;
75161 - current->mm->def_flags = def_flags;
75162 + current->mm->def_flags |= VM_LOCKED;
75163 + else
75164 + current->mm->def_flags &= ~VM_LOCKED;
75165 if (flags == MCL_FUTURE)
75166 goto out;
75167
75168 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
75169 vm_flags_t newflags;
75170
75171 +#ifdef CONFIG_PAX_SEGMEXEC
75172 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
75173 + break;
75174 +#endif
75175 +
75176 + BUG_ON(vma->vm_end > TASK_SIZE);
75177 newflags = vma->vm_flags | VM_LOCKED;
75178 if (!(flags & MCL_CURRENT))
75179 newflags &= ~VM_LOCKED;
75180 @@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
75181 lock_limit >>= PAGE_SHIFT;
75182
75183 ret = -ENOMEM;
75184 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
75185 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
75186 capable(CAP_IPC_LOCK))
75187 ret = do_mlockall(flags);
75188 diff --git a/mm/mmap.c b/mm/mmap.c
75189 index 9a796c4..4fba820 100644
75190 --- a/mm/mmap.c
75191 +++ b/mm/mmap.c
75192 @@ -47,6 +47,16 @@
75193 #define arch_rebalance_pgtables(addr, len) (addr)
75194 #endif
75195
75196 +static inline void verify_mm_writelocked(struct mm_struct *mm)
75197 +{
75198 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
75199 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
75200 + up_read(&mm->mmap_sem);
75201 + BUG();
75202 + }
75203 +#endif
75204 +}
75205 +
75206 static void unmap_region(struct mm_struct *mm,
75207 struct vm_area_struct *vma, struct vm_area_struct *prev,
75208 unsigned long start, unsigned long end);
75209 @@ -66,22 +76,32 @@ static void unmap_region(struct mm_struct *mm,
75210 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
75211 *
75212 */
75213 -pgprot_t protection_map[16] = {
75214 +pgprot_t protection_map[16] __read_only = {
75215 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
75216 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
75217 };
75218
75219 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
75220 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
75221 {
75222 - return __pgprot(pgprot_val(protection_map[vm_flags &
75223 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
75224 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
75225 pgprot_val(arch_vm_get_page_prot(vm_flags)));
75226 +
75227 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75228 + if (!(__supported_pte_mask & _PAGE_NX) &&
75229 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
75230 + (vm_flags & (VM_READ | VM_WRITE)))
75231 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
75232 +#endif
75233 +
75234 + return prot;
75235 }
75236 EXPORT_SYMBOL(vm_get_page_prot);
75237
75238 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
75239 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
75240 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
75241 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
75242 /*
75243 * Make sure vm_committed_as in one cacheline and not cacheline shared with
75244 * other variables. It can be updated by several CPUs frequently.
75245 @@ -223,6 +243,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
75246 struct vm_area_struct *next = vma->vm_next;
75247
75248 might_sleep();
75249 + BUG_ON(vma->vm_mirror);
75250 if (vma->vm_ops && vma->vm_ops->close)
75251 vma->vm_ops->close(vma);
75252 if (vma->vm_file)
75253 @@ -266,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
75254 * not page aligned -Ram Gupta
75255 */
75256 rlim = rlimit(RLIMIT_DATA);
75257 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
75258 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
75259 (mm->end_data - mm->start_data) > rlim)
75260 goto out;
75261 @@ -736,6 +758,12 @@ static int
75262 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
75263 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
75264 {
75265 +
75266 +#ifdef CONFIG_PAX_SEGMEXEC
75267 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
75268 + return 0;
75269 +#endif
75270 +
75271 if (is_mergeable_vma(vma, file, vm_flags) &&
75272 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
75273 if (vma->vm_pgoff == vm_pgoff)
75274 @@ -755,6 +783,12 @@ static int
75275 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
75276 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
75277 {
75278 +
75279 +#ifdef CONFIG_PAX_SEGMEXEC
75280 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
75281 + return 0;
75282 +#endif
75283 +
75284 if (is_mergeable_vma(vma, file, vm_flags) &&
75285 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
75286 pgoff_t vm_pglen;
75287 @@ -797,13 +831,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
75288 struct vm_area_struct *vma_merge(struct mm_struct *mm,
75289 struct vm_area_struct *prev, unsigned long addr,
75290 unsigned long end, unsigned long vm_flags,
75291 - struct anon_vma *anon_vma, struct file *file,
75292 + struct anon_vma *anon_vma, struct file *file,
75293 pgoff_t pgoff, struct mempolicy *policy)
75294 {
75295 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
75296 struct vm_area_struct *area, *next;
75297 int err;
75298
75299 +#ifdef CONFIG_PAX_SEGMEXEC
75300 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
75301 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
75302 +
75303 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
75304 +#endif
75305 +
75306 /*
75307 * We later require that vma->vm_flags == vm_flags,
75308 * so this tests vma->vm_flags & VM_SPECIAL, too.
75309 @@ -819,6 +860,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75310 if (next && next->vm_end == end) /* cases 6, 7, 8 */
75311 next = next->vm_next;
75312
75313 +#ifdef CONFIG_PAX_SEGMEXEC
75314 + if (prev)
75315 + prev_m = pax_find_mirror_vma(prev);
75316 + if (area)
75317 + area_m = pax_find_mirror_vma(area);
75318 + if (next)
75319 + next_m = pax_find_mirror_vma(next);
75320 +#endif
75321 +
75322 /*
75323 * Can it merge with the predecessor?
75324 */
75325 @@ -838,9 +888,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75326 /* cases 1, 6 */
75327 err = vma_adjust(prev, prev->vm_start,
75328 next->vm_end, prev->vm_pgoff, NULL);
75329 - } else /* cases 2, 5, 7 */
75330 +
75331 +#ifdef CONFIG_PAX_SEGMEXEC
75332 + if (!err && prev_m)
75333 + err = vma_adjust(prev_m, prev_m->vm_start,
75334 + next_m->vm_end, prev_m->vm_pgoff, NULL);
75335 +#endif
75336 +
75337 + } else { /* cases 2, 5, 7 */
75338 err = vma_adjust(prev, prev->vm_start,
75339 end, prev->vm_pgoff, NULL);
75340 +
75341 +#ifdef CONFIG_PAX_SEGMEXEC
75342 + if (!err && prev_m)
75343 + err = vma_adjust(prev_m, prev_m->vm_start,
75344 + end_m, prev_m->vm_pgoff, NULL);
75345 +#endif
75346 +
75347 + }
75348 if (err)
75349 return NULL;
75350 khugepaged_enter_vma_merge(prev);
75351 @@ -854,12 +919,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75352 mpol_equal(policy, vma_policy(next)) &&
75353 can_vma_merge_before(next, vm_flags,
75354 anon_vma, file, pgoff+pglen)) {
75355 - if (prev && addr < prev->vm_end) /* case 4 */
75356 + if (prev && addr < prev->vm_end) { /* case 4 */
75357 err = vma_adjust(prev, prev->vm_start,
75358 addr, prev->vm_pgoff, NULL);
75359 - else /* cases 3, 8 */
75360 +
75361 +#ifdef CONFIG_PAX_SEGMEXEC
75362 + if (!err && prev_m)
75363 + err = vma_adjust(prev_m, prev_m->vm_start,
75364 + addr_m, prev_m->vm_pgoff, NULL);
75365 +#endif
75366 +
75367 + } else { /* cases 3, 8 */
75368 err = vma_adjust(area, addr, next->vm_end,
75369 next->vm_pgoff - pglen, NULL);
75370 +
75371 +#ifdef CONFIG_PAX_SEGMEXEC
75372 + if (!err && area_m)
75373 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
75374 + next_m->vm_pgoff - pglen, NULL);
75375 +#endif
75376 +
75377 + }
75378 if (err)
75379 return NULL;
75380 khugepaged_enter_vma_merge(area);
75381 @@ -968,16 +1048,13 @@ none:
75382 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
75383 struct file *file, long pages)
75384 {
75385 - const unsigned long stack_flags
75386 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
75387 -
75388 mm->total_vm += pages;
75389
75390 if (file) {
75391 mm->shared_vm += pages;
75392 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
75393 mm->exec_vm += pages;
75394 - } else if (flags & stack_flags)
75395 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
75396 mm->stack_vm += pages;
75397 }
75398 #endif /* CONFIG_PROC_FS */
75399 @@ -1013,7 +1090,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75400 * (the exception is when the underlying filesystem is noexec
75401 * mounted, in which case we dont add PROT_EXEC.)
75402 */
75403 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
75404 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
75405 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
75406 prot |= PROT_EXEC;
75407
75408 @@ -1039,7 +1116,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75409 /* Obtain the address to map to. we verify (or select) it and ensure
75410 * that it represents a valid section of the address space.
75411 */
75412 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
75413 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
75414 if (addr & ~PAGE_MASK)
75415 return addr;
75416
75417 @@ -1050,6 +1127,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75418 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
75419 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
75420
75421 +#ifdef CONFIG_PAX_MPROTECT
75422 + if (mm->pax_flags & MF_PAX_MPROTECT) {
75423 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
75424 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
75425 + gr_log_rwxmmap(file);
75426 +
75427 +#ifdef CONFIG_PAX_EMUPLT
75428 + vm_flags &= ~VM_EXEC;
75429 +#else
75430 + return -EPERM;
75431 +#endif
75432 +
75433 + }
75434 +
75435 + if (!(vm_flags & VM_EXEC))
75436 + vm_flags &= ~VM_MAYEXEC;
75437 +#else
75438 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
75439 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
75440 +#endif
75441 + else
75442 + vm_flags &= ~VM_MAYWRITE;
75443 + }
75444 +#endif
75445 +
75446 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75447 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
75448 + vm_flags &= ~VM_PAGEEXEC;
75449 +#endif
75450 +
75451 if (flags & MAP_LOCKED)
75452 if (!can_do_mlock())
75453 return -EPERM;
75454 @@ -1061,6 +1168,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75455 locked += mm->locked_vm;
75456 lock_limit = rlimit(RLIMIT_MEMLOCK);
75457 lock_limit >>= PAGE_SHIFT;
75458 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75459 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
75460 return -EAGAIN;
75461 }
75462 @@ -1127,6 +1235,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75463 }
75464 }
75465
75466 + if (!gr_acl_handle_mmap(file, prot))
75467 + return -EACCES;
75468 +
75469 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
75470 }
75471
75472 @@ -1203,7 +1314,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
75473 vm_flags_t vm_flags = vma->vm_flags;
75474
75475 /* If it was private or non-writable, the write bit is already clear */
75476 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
75477 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
75478 return 0;
75479
75480 /* The backer wishes to know when pages are first written to? */
75481 @@ -1252,13 +1363,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
75482 unsigned long charged = 0;
75483 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
75484
75485 +#ifdef CONFIG_PAX_SEGMEXEC
75486 + struct vm_area_struct *vma_m = NULL;
75487 +#endif
75488 +
75489 + /*
75490 + * mm->mmap_sem is required to protect against another thread
75491 + * changing the mappings in case we sleep.
75492 + */
75493 + verify_mm_writelocked(mm);
75494 +
75495 /* Clear old maps */
75496 error = -ENOMEM;
75497 -munmap_back:
75498 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
75499 if (do_munmap(mm, addr, len))
75500 return -ENOMEM;
75501 - goto munmap_back;
75502 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
75503 }
75504
75505 /* Check against address space limit. */
75506 @@ -1307,6 +1427,16 @@ munmap_back:
75507 goto unacct_error;
75508 }
75509
75510 +#ifdef CONFIG_PAX_SEGMEXEC
75511 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
75512 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
75513 + if (!vma_m) {
75514 + error = -ENOMEM;
75515 + goto free_vma;
75516 + }
75517 + }
75518 +#endif
75519 +
75520 vma->vm_mm = mm;
75521 vma->vm_start = addr;
75522 vma->vm_end = addr + len;
75523 @@ -1331,6 +1461,13 @@ munmap_back:
75524 if (error)
75525 goto unmap_and_free_vma;
75526
75527 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75528 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
75529 + vma->vm_flags |= VM_PAGEEXEC;
75530 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
75531 + }
75532 +#endif
75533 +
75534 /* Can addr have changed??
75535 *
75536 * Answer: Yes, several device drivers can do it in their
75537 @@ -1365,6 +1502,11 @@ munmap_back:
75538 vma_link(mm, vma, prev, rb_link, rb_parent);
75539 file = vma->vm_file;
75540
75541 +#ifdef CONFIG_PAX_SEGMEXEC
75542 + if (vma_m)
75543 + BUG_ON(pax_mirror_vma(vma_m, vma));
75544 +#endif
75545 +
75546 /* Once vma denies write, undo our temporary denial count */
75547 if (correct_wcount)
75548 atomic_inc(&inode->i_writecount);
75549 @@ -1372,6 +1514,7 @@ out:
75550 perf_event_mmap(vma);
75551
75552 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
75553 + track_exec_limit(mm, addr, addr + len, vm_flags);
75554 if (vm_flags & VM_LOCKED) {
75555 if (!mlock_vma_pages_range(vma, addr, addr + len))
75556 mm->locked_vm += (len >> PAGE_SHIFT);
75557 @@ -1393,6 +1536,12 @@ unmap_and_free_vma:
75558 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
75559 charged = 0;
75560 free_vma:
75561 +
75562 +#ifdef CONFIG_PAX_SEGMEXEC
75563 + if (vma_m)
75564 + kmem_cache_free(vm_area_cachep, vma_m);
75565 +#endif
75566 +
75567 kmem_cache_free(vm_area_cachep, vma);
75568 unacct_error:
75569 if (charged)
75570 @@ -1400,6 +1549,44 @@ unacct_error:
75571 return error;
75572 }
75573
75574 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
75575 +{
75576 + if (!vma) {
75577 +#ifdef CONFIG_STACK_GROWSUP
75578 + if (addr > sysctl_heap_stack_gap)
75579 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
75580 + else
75581 + vma = find_vma(current->mm, 0);
75582 + if (vma && (vma->vm_flags & VM_GROWSUP))
75583 + return false;
75584 +#endif
75585 + return true;
75586 + }
75587 +
75588 + if (addr + len > vma->vm_start)
75589 + return false;
75590 +
75591 + if (vma->vm_flags & VM_GROWSDOWN)
75592 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
75593 +#ifdef CONFIG_STACK_GROWSUP
75594 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
75595 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
75596 +#endif
75597 +
75598 + return true;
75599 +}
75600 +
75601 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
75602 +{
75603 + if (vma->vm_start < len)
75604 + return -ENOMEM;
75605 + if (!(vma->vm_flags & VM_GROWSDOWN))
75606 + return vma->vm_start - len;
75607 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
75608 + return vma->vm_start - len - sysctl_heap_stack_gap;
75609 + return -ENOMEM;
75610 +}
75611 +
75612 /* Get an address range which is currently unmapped.
75613 * For shmat() with addr=0.
75614 *
75615 @@ -1426,18 +1613,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
75616 if (flags & MAP_FIXED)
75617 return addr;
75618
75619 +#ifdef CONFIG_PAX_RANDMMAP
75620 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75621 +#endif
75622 +
75623 if (addr) {
75624 addr = PAGE_ALIGN(addr);
75625 - vma = find_vma(mm, addr);
75626 - if (TASK_SIZE - len >= addr &&
75627 - (!vma || addr + len <= vma->vm_start))
75628 - return addr;
75629 + if (TASK_SIZE - len >= addr) {
75630 + vma = find_vma(mm, addr);
75631 + if (check_heap_stack_gap(vma, addr, len))
75632 + return addr;
75633 + }
75634 }
75635 if (len > mm->cached_hole_size) {
75636 - start_addr = addr = mm->free_area_cache;
75637 + start_addr = addr = mm->free_area_cache;
75638 } else {
75639 - start_addr = addr = TASK_UNMAPPED_BASE;
75640 - mm->cached_hole_size = 0;
75641 + start_addr = addr = mm->mmap_base;
75642 + mm->cached_hole_size = 0;
75643 }
75644
75645 full_search:
75646 @@ -1448,34 +1640,40 @@ full_search:
75647 * Start a new search - just in case we missed
75648 * some holes.
75649 */
75650 - if (start_addr != TASK_UNMAPPED_BASE) {
75651 - addr = TASK_UNMAPPED_BASE;
75652 - start_addr = addr;
75653 + if (start_addr != mm->mmap_base) {
75654 + start_addr = addr = mm->mmap_base;
75655 mm->cached_hole_size = 0;
75656 goto full_search;
75657 }
75658 return -ENOMEM;
75659 }
75660 - if (!vma || addr + len <= vma->vm_start) {
75661 - /*
75662 - * Remember the place where we stopped the search:
75663 - */
75664 - mm->free_area_cache = addr + len;
75665 - return addr;
75666 - }
75667 + if (check_heap_stack_gap(vma, addr, len))
75668 + break;
75669 if (addr + mm->cached_hole_size < vma->vm_start)
75670 mm->cached_hole_size = vma->vm_start - addr;
75671 addr = vma->vm_end;
75672 }
75673 +
75674 + /*
75675 + * Remember the place where we stopped the search:
75676 + */
75677 + mm->free_area_cache = addr + len;
75678 + return addr;
75679 }
75680 #endif
75681
75682 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
75683 {
75684 +
75685 +#ifdef CONFIG_PAX_SEGMEXEC
75686 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75687 + return;
75688 +#endif
75689 +
75690 /*
75691 * Is this a new hole at the lowest possible address?
75692 */
75693 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
75694 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
75695 mm->free_area_cache = addr;
75696 }
75697
75698 @@ -1491,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75699 {
75700 struct vm_area_struct *vma;
75701 struct mm_struct *mm = current->mm;
75702 - unsigned long addr = addr0, start_addr;
75703 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
75704
75705 /* requested length too big for entire address space */
75706 if (len > TASK_SIZE)
75707 @@ -1500,13 +1698,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75708 if (flags & MAP_FIXED)
75709 return addr;
75710
75711 +#ifdef CONFIG_PAX_RANDMMAP
75712 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75713 +#endif
75714 +
75715 /* requesting a specific address */
75716 if (addr) {
75717 addr = PAGE_ALIGN(addr);
75718 - vma = find_vma(mm, addr);
75719 - if (TASK_SIZE - len >= addr &&
75720 - (!vma || addr + len <= vma->vm_start))
75721 - return addr;
75722 + if (TASK_SIZE - len >= addr) {
75723 + vma = find_vma(mm, addr);
75724 + if (check_heap_stack_gap(vma, addr, len))
75725 + return addr;
75726 + }
75727 }
75728
75729 /* check if free_area_cache is useful for us */
75730 @@ -1530,7 +1733,7 @@ try_again:
75731 * return with success:
75732 */
75733 vma = find_vma(mm, addr);
75734 - if (!vma || addr+len <= vma->vm_start)
75735 + if (check_heap_stack_gap(vma, addr, len))
75736 /* remember the address as a hint for next time */
75737 return (mm->free_area_cache = addr);
75738
75739 @@ -1539,8 +1742,8 @@ try_again:
75740 mm->cached_hole_size = vma->vm_start - addr;
75741
75742 /* try just below the current vma->vm_start */
75743 - addr = vma->vm_start-len;
75744 - } while (len < vma->vm_start);
75745 + addr = skip_heap_stack_gap(vma, len);
75746 + } while (!IS_ERR_VALUE(addr));
75747
75748 fail:
75749 /*
75750 @@ -1563,13 +1766,21 @@ fail:
75751 * can happen with large stack limits and large mmap()
75752 * allocations.
75753 */
75754 + mm->mmap_base = TASK_UNMAPPED_BASE;
75755 +
75756 +#ifdef CONFIG_PAX_RANDMMAP
75757 + if (mm->pax_flags & MF_PAX_RANDMMAP)
75758 + mm->mmap_base += mm->delta_mmap;
75759 +#endif
75760 +
75761 + mm->free_area_cache = mm->mmap_base;
75762 mm->cached_hole_size = ~0UL;
75763 - mm->free_area_cache = TASK_UNMAPPED_BASE;
75764 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
75765 /*
75766 * Restore the topdown base:
75767 */
75768 - mm->free_area_cache = mm->mmap_base;
75769 + mm->mmap_base = base;
75770 + mm->free_area_cache = base;
75771 mm->cached_hole_size = ~0UL;
75772
75773 return addr;
75774 @@ -1578,6 +1789,12 @@ fail:
75775
75776 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75777 {
75778 +
75779 +#ifdef CONFIG_PAX_SEGMEXEC
75780 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75781 + return;
75782 +#endif
75783 +
75784 /*
75785 * Is this a new hole at the highest possible address?
75786 */
75787 @@ -1585,8 +1802,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75788 mm->free_area_cache = addr;
75789
75790 /* dont allow allocations above current base */
75791 - if (mm->free_area_cache > mm->mmap_base)
75792 + if (mm->free_area_cache > mm->mmap_base) {
75793 mm->free_area_cache = mm->mmap_base;
75794 + mm->cached_hole_size = ~0UL;
75795 + }
75796 }
75797
75798 unsigned long
75799 @@ -1685,6 +1904,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
75800 return vma;
75801 }
75802
75803 +#ifdef CONFIG_PAX_SEGMEXEC
75804 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
75805 +{
75806 + struct vm_area_struct *vma_m;
75807 +
75808 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
75809 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
75810 + BUG_ON(vma->vm_mirror);
75811 + return NULL;
75812 + }
75813 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
75814 + vma_m = vma->vm_mirror;
75815 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
75816 + BUG_ON(vma->vm_file != vma_m->vm_file);
75817 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
75818 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
75819 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
75820 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
75821 + return vma_m;
75822 +}
75823 +#endif
75824 +
75825 /*
75826 * Verify that the stack growth is acceptable and
75827 * update accounting. This is shared with both the
75828 @@ -1701,6 +1942,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75829 return -ENOMEM;
75830
75831 /* Stack limit test */
75832 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
75833 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
75834 return -ENOMEM;
75835
75836 @@ -1711,6 +1953,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75837 locked = mm->locked_vm + grow;
75838 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
75839 limit >>= PAGE_SHIFT;
75840 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75841 if (locked > limit && !capable(CAP_IPC_LOCK))
75842 return -ENOMEM;
75843 }
75844 @@ -1740,37 +1983,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75845 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
75846 * vma is the last one with address > vma->vm_end. Have to extend vma.
75847 */
75848 +#ifndef CONFIG_IA64
75849 +static
75850 +#endif
75851 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75852 {
75853 int error;
75854 + bool locknext;
75855
75856 if (!(vma->vm_flags & VM_GROWSUP))
75857 return -EFAULT;
75858
75859 + /* Also guard against wrapping around to address 0. */
75860 + if (address < PAGE_ALIGN(address+1))
75861 + address = PAGE_ALIGN(address+1);
75862 + else
75863 + return -ENOMEM;
75864 +
75865 /*
75866 * We must make sure the anon_vma is allocated
75867 * so that the anon_vma locking is not a noop.
75868 */
75869 if (unlikely(anon_vma_prepare(vma)))
75870 return -ENOMEM;
75871 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
75872 + if (locknext && anon_vma_prepare(vma->vm_next))
75873 + return -ENOMEM;
75874 vma_lock_anon_vma(vma);
75875 + if (locknext)
75876 + vma_lock_anon_vma(vma->vm_next);
75877
75878 /*
75879 * vma->vm_start/vm_end cannot change under us because the caller
75880 * is required to hold the mmap_sem in read mode. We need the
75881 - * anon_vma lock to serialize against concurrent expand_stacks.
75882 - * Also guard against wrapping around to address 0.
75883 + * anon_vma locks to serialize against concurrent expand_stacks
75884 + * and expand_upwards.
75885 */
75886 - if (address < PAGE_ALIGN(address+4))
75887 - address = PAGE_ALIGN(address+4);
75888 - else {
75889 - vma_unlock_anon_vma(vma);
75890 - return -ENOMEM;
75891 - }
75892 error = 0;
75893
75894 /* Somebody else might have raced and expanded it already */
75895 - if (address > vma->vm_end) {
75896 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
75897 + error = -ENOMEM;
75898 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
75899 unsigned long size, grow;
75900
75901 size = address - vma->vm_start;
75902 @@ -1787,6 +2041,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75903 }
75904 }
75905 }
75906 + if (locknext)
75907 + vma_unlock_anon_vma(vma->vm_next);
75908 vma_unlock_anon_vma(vma);
75909 khugepaged_enter_vma_merge(vma);
75910 validate_mm(vma->vm_mm);
75911 @@ -1801,6 +2057,8 @@ int expand_downwards(struct vm_area_struct *vma,
75912 unsigned long address)
75913 {
75914 int error;
75915 + bool lockprev = false;
75916 + struct vm_area_struct *prev;
75917
75918 /*
75919 * We must make sure the anon_vma is allocated
75920 @@ -1814,6 +2072,15 @@ int expand_downwards(struct vm_area_struct *vma,
75921 if (error)
75922 return error;
75923
75924 + prev = vma->vm_prev;
75925 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
75926 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
75927 +#endif
75928 + if (lockprev && anon_vma_prepare(prev))
75929 + return -ENOMEM;
75930 + if (lockprev)
75931 + vma_lock_anon_vma(prev);
75932 +
75933 vma_lock_anon_vma(vma);
75934
75935 /*
75936 @@ -1823,9 +2090,17 @@ int expand_downwards(struct vm_area_struct *vma,
75937 */
75938
75939 /* Somebody else might have raced and expanded it already */
75940 - if (address < vma->vm_start) {
75941 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
75942 + error = -ENOMEM;
75943 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
75944 unsigned long size, grow;
75945
75946 +#ifdef CONFIG_PAX_SEGMEXEC
75947 + struct vm_area_struct *vma_m;
75948 +
75949 + vma_m = pax_find_mirror_vma(vma);
75950 +#endif
75951 +
75952 size = vma->vm_end - address;
75953 grow = (vma->vm_start - address) >> PAGE_SHIFT;
75954
75955 @@ -1837,6 +2112,17 @@ int expand_downwards(struct vm_area_struct *vma,
75956 vma->vm_start = address;
75957 vma->vm_pgoff -= grow;
75958 anon_vma_interval_tree_post_update_vma(vma);
75959 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
75960 +
75961 +#ifdef CONFIG_PAX_SEGMEXEC
75962 + if (vma_m) {
75963 + anon_vma_interval_tree_pre_update_vma(vma_m);
75964 + vma_m->vm_start -= grow << PAGE_SHIFT;
75965 + vma_m->vm_pgoff -= grow;
75966 + anon_vma_interval_tree_post_update_vma(vma_m);
75967 + }
75968 +#endif
75969 +
75970 perf_event_mmap(vma);
75971 }
75972 }
75973 @@ -1914,6 +2200,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
75974 do {
75975 long nrpages = vma_pages(vma);
75976
75977 +#ifdef CONFIG_PAX_SEGMEXEC
75978 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
75979 + vma = remove_vma(vma);
75980 + continue;
75981 + }
75982 +#endif
75983 +
75984 if (vma->vm_flags & VM_ACCOUNT)
75985 nr_accounted += nrpages;
75986 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
75987 @@ -1959,6 +2252,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
75988 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
75989 vma->vm_prev = NULL;
75990 do {
75991 +
75992 +#ifdef CONFIG_PAX_SEGMEXEC
75993 + if (vma->vm_mirror) {
75994 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
75995 + vma->vm_mirror->vm_mirror = NULL;
75996 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
75997 + vma->vm_mirror = NULL;
75998 + }
75999 +#endif
76000 +
76001 rb_erase(&vma->vm_rb, &mm->mm_rb);
76002 mm->map_count--;
76003 tail_vma = vma;
76004 @@ -1987,14 +2290,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76005 struct vm_area_struct *new;
76006 int err = -ENOMEM;
76007
76008 +#ifdef CONFIG_PAX_SEGMEXEC
76009 + struct vm_area_struct *vma_m, *new_m = NULL;
76010 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
76011 +#endif
76012 +
76013 if (is_vm_hugetlb_page(vma) && (addr &
76014 ~(huge_page_mask(hstate_vma(vma)))))
76015 return -EINVAL;
76016
76017 +#ifdef CONFIG_PAX_SEGMEXEC
76018 + vma_m = pax_find_mirror_vma(vma);
76019 +#endif
76020 +
76021 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
76022 if (!new)
76023 goto out_err;
76024
76025 +#ifdef CONFIG_PAX_SEGMEXEC
76026 + if (vma_m) {
76027 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
76028 + if (!new_m) {
76029 + kmem_cache_free(vm_area_cachep, new);
76030 + goto out_err;
76031 + }
76032 + }
76033 +#endif
76034 +
76035 /* most fields are the same, copy all, and then fixup */
76036 *new = *vma;
76037
76038 @@ -2007,6 +2329,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76039 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
76040 }
76041
76042 +#ifdef CONFIG_PAX_SEGMEXEC
76043 + if (vma_m) {
76044 + *new_m = *vma_m;
76045 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
76046 + new_m->vm_mirror = new;
76047 + new->vm_mirror = new_m;
76048 +
76049 + if (new_below)
76050 + new_m->vm_end = addr_m;
76051 + else {
76052 + new_m->vm_start = addr_m;
76053 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
76054 + }
76055 + }
76056 +#endif
76057 +
76058 pol = mpol_dup(vma_policy(vma));
76059 if (IS_ERR(pol)) {
76060 err = PTR_ERR(pol);
76061 @@ -2029,6 +2367,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76062 else
76063 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
76064
76065 +#ifdef CONFIG_PAX_SEGMEXEC
76066 + if (!err && vma_m) {
76067 + if (anon_vma_clone(new_m, vma_m))
76068 + goto out_free_mpol;
76069 +
76070 + mpol_get(pol);
76071 + vma_set_policy(new_m, pol);
76072 +
76073 + if (new_m->vm_file)
76074 + get_file(new_m->vm_file);
76075 +
76076 + if (new_m->vm_ops && new_m->vm_ops->open)
76077 + new_m->vm_ops->open(new_m);
76078 +
76079 + if (new_below)
76080 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
76081 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
76082 + else
76083 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
76084 +
76085 + if (err) {
76086 + if (new_m->vm_ops && new_m->vm_ops->close)
76087 + new_m->vm_ops->close(new_m);
76088 + if (new_m->vm_file)
76089 + fput(new_m->vm_file);
76090 + mpol_put(pol);
76091 + }
76092 + }
76093 +#endif
76094 +
76095 /* Success. */
76096 if (!err)
76097 return 0;
76098 @@ -2038,10 +2406,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76099 new->vm_ops->close(new);
76100 if (new->vm_file)
76101 fput(new->vm_file);
76102 - unlink_anon_vmas(new);
76103 out_free_mpol:
76104 mpol_put(pol);
76105 out_free_vma:
76106 +
76107 +#ifdef CONFIG_PAX_SEGMEXEC
76108 + if (new_m) {
76109 + unlink_anon_vmas(new_m);
76110 + kmem_cache_free(vm_area_cachep, new_m);
76111 + }
76112 +#endif
76113 +
76114 + unlink_anon_vmas(new);
76115 kmem_cache_free(vm_area_cachep, new);
76116 out_err:
76117 return err;
76118 @@ -2054,6 +2430,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76119 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76120 unsigned long addr, int new_below)
76121 {
76122 +
76123 +#ifdef CONFIG_PAX_SEGMEXEC
76124 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76125 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
76126 + if (mm->map_count >= sysctl_max_map_count-1)
76127 + return -ENOMEM;
76128 + } else
76129 +#endif
76130 +
76131 if (mm->map_count >= sysctl_max_map_count)
76132 return -ENOMEM;
76133
76134 @@ -2065,11 +2450,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76135 * work. This now handles partial unmappings.
76136 * Jeremy Fitzhardinge <jeremy@goop.org>
76137 */
76138 +#ifdef CONFIG_PAX_SEGMEXEC
76139 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76140 {
76141 + int ret = __do_munmap(mm, start, len);
76142 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
76143 + return ret;
76144 +
76145 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
76146 +}
76147 +
76148 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76149 +#else
76150 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76151 +#endif
76152 +{
76153 unsigned long end;
76154 struct vm_area_struct *vma, *prev, *last;
76155
76156 + /*
76157 + * mm->mmap_sem is required to protect against another thread
76158 + * changing the mappings in case we sleep.
76159 + */
76160 + verify_mm_writelocked(mm);
76161 +
76162 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
76163 return -EINVAL;
76164
76165 @@ -2144,6 +2548,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76166 /* Fix up all other VM information */
76167 remove_vma_list(mm, vma);
76168
76169 + track_exec_limit(mm, start, end, 0UL);
76170 +
76171 return 0;
76172 }
76173
76174 @@ -2152,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
76175 int ret;
76176 struct mm_struct *mm = current->mm;
76177
76178 +
76179 +#ifdef CONFIG_PAX_SEGMEXEC
76180 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
76181 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
76182 + return -EINVAL;
76183 +#endif
76184 +
76185 down_write(&mm->mmap_sem);
76186 ret = do_munmap(mm, start, len);
76187 up_write(&mm->mmap_sem);
76188 @@ -2165,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
76189 return vm_munmap(addr, len);
76190 }
76191
76192 -static inline void verify_mm_writelocked(struct mm_struct *mm)
76193 -{
76194 -#ifdef CONFIG_DEBUG_VM
76195 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76196 - WARN_ON(1);
76197 - up_read(&mm->mmap_sem);
76198 - }
76199 -#endif
76200 -}
76201 -
76202 /*
76203 * this is really a simplified "do_mmap". it only handles
76204 * anonymous maps. eventually we may be able to do some
76205 @@ -2188,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76206 struct rb_node ** rb_link, * rb_parent;
76207 pgoff_t pgoff = addr >> PAGE_SHIFT;
76208 int error;
76209 + unsigned long charged;
76210
76211 len = PAGE_ALIGN(len);
76212 if (!len)
76213 @@ -2195,16 +2599,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76214
76215 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
76216
76217 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
76218 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
76219 + flags &= ~VM_EXEC;
76220 +
76221 +#ifdef CONFIG_PAX_MPROTECT
76222 + if (mm->pax_flags & MF_PAX_MPROTECT)
76223 + flags &= ~VM_MAYEXEC;
76224 +#endif
76225 +
76226 + }
76227 +#endif
76228 +
76229 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
76230 if (error & ~PAGE_MASK)
76231 return error;
76232
76233 + charged = len >> PAGE_SHIFT;
76234 +
76235 /*
76236 * mlock MCL_FUTURE?
76237 */
76238 if (mm->def_flags & VM_LOCKED) {
76239 unsigned long locked, lock_limit;
76240 - locked = len >> PAGE_SHIFT;
76241 + locked = charged;
76242 locked += mm->locked_vm;
76243 lock_limit = rlimit(RLIMIT_MEMLOCK);
76244 lock_limit >>= PAGE_SHIFT;
76245 @@ -2221,21 +2639,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76246 /*
76247 * Clear old maps. this also does some error checking for us
76248 */
76249 - munmap_back:
76250 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
76251 if (do_munmap(mm, addr, len))
76252 return -ENOMEM;
76253 - goto munmap_back;
76254 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
76255 }
76256
76257 /* Check against address space limits *after* clearing old maps... */
76258 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
76259 + if (!may_expand_vm(mm, charged))
76260 return -ENOMEM;
76261
76262 if (mm->map_count > sysctl_max_map_count)
76263 return -ENOMEM;
76264
76265 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
76266 + if (security_vm_enough_memory_mm(mm, charged))
76267 return -ENOMEM;
76268
76269 /* Can we just expand an old private anonymous mapping? */
76270 @@ -2249,7 +2666,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76271 */
76272 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76273 if (!vma) {
76274 - vm_unacct_memory(len >> PAGE_SHIFT);
76275 + vm_unacct_memory(charged);
76276 return -ENOMEM;
76277 }
76278
76279 @@ -2263,11 +2680,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76280 vma_link(mm, vma, prev, rb_link, rb_parent);
76281 out:
76282 perf_event_mmap(vma);
76283 - mm->total_vm += len >> PAGE_SHIFT;
76284 + mm->total_vm += charged;
76285 if (flags & VM_LOCKED) {
76286 if (!mlock_vma_pages_range(vma, addr, addr + len))
76287 - mm->locked_vm += (len >> PAGE_SHIFT);
76288 + mm->locked_vm += charged;
76289 }
76290 + track_exec_limit(mm, addr, addr + len, flags);
76291 return addr;
76292 }
76293
76294 @@ -2325,6 +2743,7 @@ void exit_mmap(struct mm_struct *mm)
76295 while (vma) {
76296 if (vma->vm_flags & VM_ACCOUNT)
76297 nr_accounted += vma_pages(vma);
76298 + vma->vm_mirror = NULL;
76299 vma = remove_vma(vma);
76300 }
76301 vm_unacct_memory(nr_accounted);
76302 @@ -2341,6 +2760,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76303 struct vm_area_struct *prev;
76304 struct rb_node **rb_link, *rb_parent;
76305
76306 +#ifdef CONFIG_PAX_SEGMEXEC
76307 + struct vm_area_struct *vma_m = NULL;
76308 +#endif
76309 +
76310 + if (security_mmap_addr(vma->vm_start))
76311 + return -EPERM;
76312 +
76313 /*
76314 * The vm_pgoff of a purely anonymous vma should be irrelevant
76315 * until its first write fault, when page's anon_vma and index
76316 @@ -2364,7 +2790,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76317 security_vm_enough_memory_mm(mm, vma_pages(vma)))
76318 return -ENOMEM;
76319
76320 +#ifdef CONFIG_PAX_SEGMEXEC
76321 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
76322 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76323 + if (!vma_m)
76324 + return -ENOMEM;
76325 + }
76326 +#endif
76327 +
76328 vma_link(mm, vma, prev, rb_link, rb_parent);
76329 +
76330 +#ifdef CONFIG_PAX_SEGMEXEC
76331 + if (vma_m)
76332 + BUG_ON(pax_mirror_vma(vma_m, vma));
76333 +#endif
76334 +
76335 return 0;
76336 }
76337
76338 @@ -2384,6 +2824,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76339 struct mempolicy *pol;
76340 bool faulted_in_anon_vma = true;
76341
76342 + BUG_ON(vma->vm_mirror);
76343 +
76344 /*
76345 * If anonymous vma has not yet been faulted, update new pgoff
76346 * to match new location, to increase its chance of merging.
76347 @@ -2450,6 +2892,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76348 return NULL;
76349 }
76350
76351 +#ifdef CONFIG_PAX_SEGMEXEC
76352 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
76353 +{
76354 + struct vm_area_struct *prev_m;
76355 + struct rb_node **rb_link_m, *rb_parent_m;
76356 + struct mempolicy *pol_m;
76357 +
76358 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
76359 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
76360 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
76361 + *vma_m = *vma;
76362 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
76363 + if (anon_vma_clone(vma_m, vma))
76364 + return -ENOMEM;
76365 + pol_m = vma_policy(vma_m);
76366 + mpol_get(pol_m);
76367 + vma_set_policy(vma_m, pol_m);
76368 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
76369 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
76370 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
76371 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
76372 + if (vma_m->vm_file)
76373 + get_file(vma_m->vm_file);
76374 + if (vma_m->vm_ops && vma_m->vm_ops->open)
76375 + vma_m->vm_ops->open(vma_m);
76376 + BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
76377 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
76378 + vma_m->vm_mirror = vma;
76379 + vma->vm_mirror = vma_m;
76380 + return 0;
76381 +}
76382 +#endif
76383 +
76384 /*
76385 * Return true if the calling process may expand its vm space by the passed
76386 * number of pages
76387 @@ -2461,6 +2936,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
76388
76389 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
76390
76391 +#ifdef CONFIG_PAX_RANDMMAP
76392 + if (mm->pax_flags & MF_PAX_RANDMMAP)
76393 + cur -= mm->brk_gap;
76394 +#endif
76395 +
76396 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
76397 if (cur + npages > lim)
76398 return 0;
76399 return 1;
76400 @@ -2531,6 +3012,22 @@ int install_special_mapping(struct mm_struct *mm,
76401 vma->vm_start = addr;
76402 vma->vm_end = addr + len;
76403
76404 +#ifdef CONFIG_PAX_MPROTECT
76405 + if (mm->pax_flags & MF_PAX_MPROTECT) {
76406 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
76407 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
76408 + return -EPERM;
76409 + if (!(vm_flags & VM_EXEC))
76410 + vm_flags &= ~VM_MAYEXEC;
76411 +#else
76412 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76413 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76414 +#endif
76415 + else
76416 + vm_flags &= ~VM_MAYWRITE;
76417 + }
76418 +#endif
76419 +
76420 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
76421 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76422
76423 diff --git a/mm/mprotect.c b/mm/mprotect.c
76424 index a409926..8b32e6d 100644
76425 --- a/mm/mprotect.c
76426 +++ b/mm/mprotect.c
76427 @@ -23,10 +23,17 @@
76428 #include <linux/mmu_notifier.h>
76429 #include <linux/migrate.h>
76430 #include <linux/perf_event.h>
76431 +
76432 +#ifdef CONFIG_PAX_MPROTECT
76433 +#include <linux/elf.h>
76434 +#include <linux/binfmts.h>
76435 +#endif
76436 +
76437 #include <asm/uaccess.h>
76438 #include <asm/pgtable.h>
76439 #include <asm/cacheflush.h>
76440 #include <asm/tlbflush.h>
76441 +#include <asm/mmu_context.h>
76442
76443 #ifndef pgprot_modify
76444 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
76445 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
76446 flush_tlb_range(vma, start, end);
76447 }
76448
76449 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76450 +/* called while holding the mmap semaphor for writing except stack expansion */
76451 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
76452 +{
76453 + unsigned long oldlimit, newlimit = 0UL;
76454 +
76455 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
76456 + return;
76457 +
76458 + spin_lock(&mm->page_table_lock);
76459 + oldlimit = mm->context.user_cs_limit;
76460 + if ((prot & VM_EXEC) && oldlimit < end)
76461 + /* USER_CS limit moved up */
76462 + newlimit = end;
76463 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
76464 + /* USER_CS limit moved down */
76465 + newlimit = start;
76466 +
76467 + if (newlimit) {
76468 + mm->context.user_cs_limit = newlimit;
76469 +
76470 +#ifdef CONFIG_SMP
76471 + wmb();
76472 + cpus_clear(mm->context.cpu_user_cs_mask);
76473 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
76474 +#endif
76475 +
76476 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
76477 + }
76478 + spin_unlock(&mm->page_table_lock);
76479 + if (newlimit == end) {
76480 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
76481 +
76482 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
76483 + if (is_vm_hugetlb_page(vma))
76484 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
76485 + else
76486 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
76487 + }
76488 +}
76489 +#endif
76490 +
76491 int
76492 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76493 unsigned long start, unsigned long end, unsigned long newflags)
76494 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76495 int error;
76496 int dirty_accountable = 0;
76497
76498 +#ifdef CONFIG_PAX_SEGMEXEC
76499 + struct vm_area_struct *vma_m = NULL;
76500 + unsigned long start_m, end_m;
76501 +
76502 + start_m = start + SEGMEXEC_TASK_SIZE;
76503 + end_m = end + SEGMEXEC_TASK_SIZE;
76504 +#endif
76505 +
76506 if (newflags == oldflags) {
76507 *pprev = vma;
76508 return 0;
76509 }
76510
76511 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
76512 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
76513 +
76514 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
76515 + return -ENOMEM;
76516 +
76517 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
76518 + return -ENOMEM;
76519 + }
76520 +
76521 /*
76522 * If we make a private mapping writable we increase our commit;
76523 * but (without finer accounting) cannot reduce our commit if we
76524 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76525 }
76526 }
76527
76528 +#ifdef CONFIG_PAX_SEGMEXEC
76529 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
76530 + if (start != vma->vm_start) {
76531 + error = split_vma(mm, vma, start, 1);
76532 + if (error)
76533 + goto fail;
76534 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
76535 + *pprev = (*pprev)->vm_next;
76536 + }
76537 +
76538 + if (end != vma->vm_end) {
76539 + error = split_vma(mm, vma, end, 0);
76540 + if (error)
76541 + goto fail;
76542 + }
76543 +
76544 + if (pax_find_mirror_vma(vma)) {
76545 + error = __do_munmap(mm, start_m, end_m - start_m);
76546 + if (error)
76547 + goto fail;
76548 + } else {
76549 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76550 + if (!vma_m) {
76551 + error = -ENOMEM;
76552 + goto fail;
76553 + }
76554 + vma->vm_flags = newflags;
76555 + error = pax_mirror_vma(vma_m, vma);
76556 + if (error) {
76557 + vma->vm_flags = oldflags;
76558 + goto fail;
76559 + }
76560 + }
76561 + }
76562 +#endif
76563 +
76564 /*
76565 * First try to merge with previous and/or next vma.
76566 */
76567 @@ -204,9 +307,21 @@ success:
76568 * vm_flags and vm_page_prot are protected by the mmap_sem
76569 * held in write mode.
76570 */
76571 +
76572 +#ifdef CONFIG_PAX_SEGMEXEC
76573 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
76574 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
76575 +#endif
76576 +
76577 vma->vm_flags = newflags;
76578 +
76579 +#ifdef CONFIG_PAX_MPROTECT
76580 + if (mm->binfmt && mm->binfmt->handle_mprotect)
76581 + mm->binfmt->handle_mprotect(vma, newflags);
76582 +#endif
76583 +
76584 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
76585 - vm_get_page_prot(newflags));
76586 + vm_get_page_prot(vma->vm_flags));
76587
76588 if (vma_wants_writenotify(vma)) {
76589 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
76590 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76591 end = start + len;
76592 if (end <= start)
76593 return -ENOMEM;
76594 +
76595 +#ifdef CONFIG_PAX_SEGMEXEC
76596 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76597 + if (end > SEGMEXEC_TASK_SIZE)
76598 + return -EINVAL;
76599 + } else
76600 +#endif
76601 +
76602 + if (end > TASK_SIZE)
76603 + return -EINVAL;
76604 +
76605 if (!arch_validate_prot(prot))
76606 return -EINVAL;
76607
76608 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76609 /*
76610 * Does the application expect PROT_READ to imply PROT_EXEC:
76611 */
76612 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76613 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76614 prot |= PROT_EXEC;
76615
76616 vm_flags = calc_vm_prot_bits(prot);
76617 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76618 if (start > vma->vm_start)
76619 prev = vma;
76620
76621 +#ifdef CONFIG_PAX_MPROTECT
76622 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
76623 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
76624 +#endif
76625 +
76626 for (nstart = start ; ; ) {
76627 unsigned long newflags;
76628
76629 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76630
76631 /* newflags >> 4 shift VM_MAY% in place of VM_% */
76632 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
76633 + if (prot & (PROT_WRITE | PROT_EXEC))
76634 + gr_log_rwxmprotect(vma->vm_file);
76635 +
76636 + error = -EACCES;
76637 + goto out;
76638 + }
76639 +
76640 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
76641 error = -EACCES;
76642 goto out;
76643 }
76644 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76645 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
76646 if (error)
76647 goto out;
76648 +
76649 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
76650 +
76651 nstart = tmp;
76652
76653 if (nstart < prev->vm_end)
76654 diff --git a/mm/mremap.c b/mm/mremap.c
76655 index 1b61c2d..1cc0e3c 100644
76656 --- a/mm/mremap.c
76657 +++ b/mm/mremap.c
76658 @@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
76659 continue;
76660 pte = ptep_get_and_clear(mm, old_addr, old_pte);
76661 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
76662 +
76663 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76664 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
76665 + pte = pte_exprotect(pte);
76666 +#endif
76667 +
76668 set_pte_at(mm, new_addr, new_pte, pte);
76669 }
76670
76671 @@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
76672 if (is_vm_hugetlb_page(vma))
76673 goto Einval;
76674
76675 +#ifdef CONFIG_PAX_SEGMEXEC
76676 + if (pax_find_mirror_vma(vma))
76677 + goto Einval;
76678 +#endif
76679 +
76680 /* We can't remap across vm area boundaries */
76681 if (old_len > vma->vm_end - addr)
76682 goto Efault;
76683 @@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
76684 unsigned long ret = -EINVAL;
76685 unsigned long charged = 0;
76686 unsigned long map_flags;
76687 + unsigned long pax_task_size = TASK_SIZE;
76688
76689 if (new_addr & ~PAGE_MASK)
76690 goto out;
76691
76692 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
76693 +#ifdef CONFIG_PAX_SEGMEXEC
76694 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
76695 + pax_task_size = SEGMEXEC_TASK_SIZE;
76696 +#endif
76697 +
76698 + pax_task_size -= PAGE_SIZE;
76699 +
76700 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
76701 goto out;
76702
76703 /* Check if the location we're moving into overlaps the
76704 * old location at all, and fail if it does.
76705 */
76706 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
76707 - goto out;
76708 -
76709 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
76710 + if (addr + old_len > new_addr && new_addr + new_len > addr)
76711 goto out;
76712
76713 ret = do_munmap(mm, new_addr, new_len);
76714 @@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76715 struct vm_area_struct *vma;
76716 unsigned long ret = -EINVAL;
76717 unsigned long charged = 0;
76718 + unsigned long pax_task_size = TASK_SIZE;
76719
76720 down_write(&current->mm->mmap_sem);
76721
76722 @@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76723 if (!new_len)
76724 goto out;
76725
76726 +#ifdef CONFIG_PAX_SEGMEXEC
76727 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
76728 + pax_task_size = SEGMEXEC_TASK_SIZE;
76729 +#endif
76730 +
76731 + pax_task_size -= PAGE_SIZE;
76732 +
76733 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
76734 + old_len > pax_task_size || addr > pax_task_size-old_len)
76735 + goto out;
76736 +
76737 if (flags & MREMAP_FIXED) {
76738 if (flags & MREMAP_MAYMOVE)
76739 ret = mremap_to(addr, old_len, new_addr, new_len);
76740 @@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76741 addr + new_len);
76742 }
76743 ret = addr;
76744 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
76745 goto out;
76746 }
76747 }
76748 @@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76749 goto out;
76750 }
76751
76752 + map_flags = vma->vm_flags;
76753 ret = move_vma(vma, addr, old_len, new_len, new_addr);
76754 + if (!(ret & ~PAGE_MASK)) {
76755 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
76756 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
76757 + }
76758 }
76759 out:
76760 if (ret & ~PAGE_MASK)
76761 diff --git a/mm/nommu.c b/mm/nommu.c
76762 index 45131b4..c521665 100644
76763 --- a/mm/nommu.c
76764 +++ b/mm/nommu.c
76765 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76766 int sysctl_overcommit_ratio = 50; /* default is 50% */
76767 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
76768 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
76769 -int heap_stack_gap = 0;
76770
76771 atomic_long_t mmap_pages_allocated;
76772
76773 @@ -824,15 +823,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
76774 EXPORT_SYMBOL(find_vma);
76775
76776 /*
76777 - * find a VMA
76778 - * - we don't extend stack VMAs under NOMMU conditions
76779 - */
76780 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
76781 -{
76782 - return find_vma(mm, addr);
76783 -}
76784 -
76785 -/*
76786 * expand a stack to a given address
76787 * - not supported under NOMMU conditions
76788 */
76789 @@ -1540,6 +1530,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76790
76791 /* most fields are the same, copy all, and then fixup */
76792 *new = *vma;
76793 + INIT_LIST_HEAD(&new->anon_vma_chain);
76794 *region = *vma->vm_region;
76795 new->vm_region = region;
76796
76797 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
76798 index 7e208f0..d96f232 100644
76799 --- a/mm/page_alloc.c
76800 +++ b/mm/page_alloc.c
76801 @@ -340,7 +340,7 @@ out:
76802 * This usage means that zero-order pages may not be compound.
76803 */
76804
76805 -static void free_compound_page(struct page *page)
76806 +void free_compound_page(struct page *page)
76807 {
76808 __free_pages_ok(page, compound_order(page));
76809 }
76810 @@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76811 int i;
76812 int bad = 0;
76813
76814 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
76815 + unsigned long index = 1UL << order;
76816 +#endif
76817 +
76818 trace_mm_page_free(page, order);
76819 kmemcheck_free_shadow(page, order);
76820
76821 @@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76822 debug_check_no_obj_freed(page_address(page),
76823 PAGE_SIZE << order);
76824 }
76825 +
76826 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
76827 + for (; index; --index)
76828 + sanitize_highpage(page + index - 1);
76829 +#endif
76830 +
76831 arch_free_page(page, order);
76832 kernel_map_pages(page, 1 << order, 0);
76833
76834 @@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
76835 arch_alloc_page(page, order);
76836 kernel_map_pages(page, 1 << order, 1);
76837
76838 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
76839 if (gfp_flags & __GFP_ZERO)
76840 prep_zero_page(page, order, gfp_flags);
76841 +#endif
76842
76843 if (order && (gfp_flags & __GFP_COMP))
76844 prep_compound_page(page, order);
76845 @@ -3703,7 +3715,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
76846 unsigned long pfn;
76847
76848 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
76849 +#ifdef CONFIG_X86_32
76850 + /* boot failures in VMware 8 on 32bit vanilla since
76851 + this change */
76852 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
76853 +#else
76854 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
76855 +#endif
76856 return 1;
76857 }
76858 return 0;
76859 diff --git a/mm/percpu.c b/mm/percpu.c
76860 index ddc5efb..f632d2c 100644
76861 --- a/mm/percpu.c
76862 +++ b/mm/percpu.c
76863 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
76864 static unsigned int pcpu_high_unit_cpu __read_mostly;
76865
76866 /* the address of the first chunk which starts with the kernel static area */
76867 -void *pcpu_base_addr __read_mostly;
76868 +void *pcpu_base_addr __read_only;
76869 EXPORT_SYMBOL_GPL(pcpu_base_addr);
76870
76871 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
76872 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
76873 index 926b466..b23df53 100644
76874 --- a/mm/process_vm_access.c
76875 +++ b/mm/process_vm_access.c
76876 @@ -13,6 +13,7 @@
76877 #include <linux/uio.h>
76878 #include <linux/sched.h>
76879 #include <linux/highmem.h>
76880 +#include <linux/security.h>
76881 #include <linux/ptrace.h>
76882 #include <linux/slab.h>
76883 #include <linux/syscalls.h>
76884 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76885 size_t iov_l_curr_offset = 0;
76886 ssize_t iov_len;
76887
76888 + return -ENOSYS; // PaX: until properly audited
76889 +
76890 /*
76891 * Work out how many pages of struct pages we're going to need
76892 * when eventually calling get_user_pages
76893 */
76894 for (i = 0; i < riovcnt; i++) {
76895 iov_len = rvec[i].iov_len;
76896 - if (iov_len > 0) {
76897 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
76898 - + iov_len)
76899 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
76900 - / PAGE_SIZE + 1;
76901 - nr_pages = max(nr_pages, nr_pages_iov);
76902 - }
76903 + if (iov_len <= 0)
76904 + continue;
76905 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
76906 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
76907 + nr_pages = max(nr_pages, nr_pages_iov);
76908 }
76909
76910 if (nr_pages == 0)
76911 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76912 goto free_proc_pages;
76913 }
76914
76915 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
76916 + rc = -EPERM;
76917 + goto put_task_struct;
76918 + }
76919 +
76920 mm = mm_access(task, PTRACE_MODE_ATTACH);
76921 if (!mm || IS_ERR(mm)) {
76922 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
76923 diff --git a/mm/rmap.c b/mm/rmap.c
76924 index 2ee1ef0..2e175ba 100644
76925 --- a/mm/rmap.c
76926 +++ b/mm/rmap.c
76927 @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76928 struct anon_vma *anon_vma = vma->anon_vma;
76929 struct anon_vma_chain *avc;
76930
76931 +#ifdef CONFIG_PAX_SEGMEXEC
76932 + struct anon_vma_chain *avc_m = NULL;
76933 +#endif
76934 +
76935 might_sleep();
76936 if (unlikely(!anon_vma)) {
76937 struct mm_struct *mm = vma->vm_mm;
76938 @@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76939 if (!avc)
76940 goto out_enomem;
76941
76942 +#ifdef CONFIG_PAX_SEGMEXEC
76943 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
76944 + if (!avc_m)
76945 + goto out_enomem_free_avc;
76946 +#endif
76947 +
76948 anon_vma = find_mergeable_anon_vma(vma);
76949 allocated = NULL;
76950 if (!anon_vma) {
76951 @@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76952 /* page_table_lock to protect against threads */
76953 spin_lock(&mm->page_table_lock);
76954 if (likely(!vma->anon_vma)) {
76955 +
76956 +#ifdef CONFIG_PAX_SEGMEXEC
76957 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
76958 +
76959 + if (vma_m) {
76960 + BUG_ON(vma_m->anon_vma);
76961 + vma_m->anon_vma = anon_vma;
76962 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
76963 + avc_m = NULL;
76964 + }
76965 +#endif
76966 +
76967 vma->anon_vma = anon_vma;
76968 anon_vma_chain_link(vma, avc, anon_vma);
76969 allocated = NULL;
76970 @@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76971
76972 if (unlikely(allocated))
76973 put_anon_vma(allocated);
76974 +
76975 +#ifdef CONFIG_PAX_SEGMEXEC
76976 + if (unlikely(avc_m))
76977 + anon_vma_chain_free(avc_m);
76978 +#endif
76979 +
76980 if (unlikely(avc))
76981 anon_vma_chain_free(avc);
76982 }
76983 return 0;
76984
76985 out_enomem_free_avc:
76986 +
76987 +#ifdef CONFIG_PAX_SEGMEXEC
76988 + if (avc_m)
76989 + anon_vma_chain_free(avc_m);
76990 +#endif
76991 +
76992 anon_vma_chain_free(avc);
76993 out_enomem:
76994 return -ENOMEM;
76995 @@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
76996 * Attach the anon_vmas from src to dst.
76997 * Returns 0 on success, -ENOMEM on failure.
76998 */
76999 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
77000 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
77001 {
77002 struct anon_vma_chain *avc, *pavc;
77003 struct anon_vma *root = NULL;
77004 @@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
77005 * the corresponding VMA in the parent process is attached to.
77006 * Returns 0 on success, non-zero on failure.
77007 */
77008 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
77009 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
77010 {
77011 struct anon_vma_chain *avc;
77012 struct anon_vma *anon_vma;
77013 diff --git a/mm/shmem.c b/mm/shmem.c
77014 index 50c5b8f..0bc87f7 100644
77015 --- a/mm/shmem.c
77016 +++ b/mm/shmem.c
77017 @@ -31,7 +31,7 @@
77018 #include <linux/export.h>
77019 #include <linux/swap.h>
77020
77021 -static struct vfsmount *shm_mnt;
77022 +struct vfsmount *shm_mnt;
77023
77024 #ifdef CONFIG_SHMEM
77025 /*
77026 @@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
77027 #define BOGO_DIRENT_SIZE 20
77028
77029 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77030 -#define SHORT_SYMLINK_LEN 128
77031 +#define SHORT_SYMLINK_LEN 64
77032
77033 /*
77034 * shmem_fallocate and shmem_writepage communicate via inode->i_private
77035 @@ -2112,6 +2112,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
77036 static int shmem_xattr_validate(const char *name)
77037 {
77038 struct { const char *prefix; size_t len; } arr[] = {
77039 +
77040 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77041 + { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
77042 +#endif
77043 +
77044 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
77045 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
77046 };
77047 @@ -2167,6 +2172,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
77048 if (err)
77049 return err;
77050
77051 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77052 + if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
77053 + if (strcmp(name, XATTR_NAME_PAX_FLAGS))
77054 + return -EOPNOTSUPP;
77055 + if (size > 8)
77056 + return -EINVAL;
77057 + }
77058 +#endif
77059 +
77060 return simple_xattr_set(&info->xattrs, name, value, size, flags);
77061 }
77062
77063 @@ -2466,8 +2480,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
77064 int err = -ENOMEM;
77065
77066 /* Round up to L1_CACHE_BYTES to resist false sharing */
77067 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
77068 - L1_CACHE_BYTES), GFP_KERNEL);
77069 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
77070 if (!sbinfo)
77071 return -ENOMEM;
77072
77073 diff --git a/mm/slab.c b/mm/slab.c
77074 index 33d3363..93c6810 100644
77075 --- a/mm/slab.c
77076 +++ b/mm/slab.c
77077 @@ -164,7 +164,7 @@ static bool pfmemalloc_active __read_mostly;
77078
77079 /* Legal flag mask for kmem_cache_create(). */
77080 #if DEBUG
77081 -# define CREATE_MASK (SLAB_RED_ZONE | \
77082 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
77083 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
77084 SLAB_CACHE_DMA | \
77085 SLAB_STORE_USER | \
77086 @@ -172,7 +172,7 @@ static bool pfmemalloc_active __read_mostly;
77087 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
77088 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
77089 #else
77090 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
77091 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
77092 SLAB_CACHE_DMA | \
77093 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
77094 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
77095 @@ -322,7 +322,7 @@ struct kmem_list3 {
77096 * Need this for bootstrapping a per node allocator.
77097 */
77098 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
77099 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
77100 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
77101 #define CACHE_CACHE 0
77102 #define SIZE_AC MAX_NUMNODES
77103 #define SIZE_L3 (2 * MAX_NUMNODES)
77104 @@ -423,10 +423,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
77105 if ((x)->max_freeable < i) \
77106 (x)->max_freeable = i; \
77107 } while (0)
77108 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
77109 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
77110 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
77111 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
77112 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
77113 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
77114 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
77115 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
77116 #else
77117 #define STATS_INC_ACTIVE(x) do { } while (0)
77118 #define STATS_DEC_ACTIVE(x) do { } while (0)
77119 @@ -534,7 +534,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
77120 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
77121 */
77122 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
77123 - const struct slab *slab, void *obj)
77124 + const struct slab *slab, const void *obj)
77125 {
77126 u32 offset = (obj - slab->s_mem);
77127 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
77128 @@ -555,12 +555,13 @@ EXPORT_SYMBOL(malloc_sizes);
77129 struct cache_names {
77130 char *name;
77131 char *name_dma;
77132 + char *name_usercopy;
77133 };
77134
77135 static struct cache_names __initdata cache_names[] = {
77136 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
77137 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
77138 #include <linux/kmalloc_sizes.h>
77139 - {NULL,}
77140 + {NULL}
77141 #undef CACHE
77142 };
77143
77144 @@ -721,6 +722,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
77145 if (unlikely(gfpflags & GFP_DMA))
77146 return csizep->cs_dmacachep;
77147 #endif
77148 +
77149 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77150 + if (unlikely(gfpflags & GFP_USERCOPY))
77151 + return csizep->cs_usercopycachep;
77152 +#endif
77153 +
77154 return csizep->cs_cachep;
77155 }
77156
77157 @@ -1676,7 +1683,7 @@ void __init kmem_cache_init(void)
77158 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
77159 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
77160 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
77161 - __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
77162 + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
77163 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
77164
77165 if (INDEX_AC != INDEX_L3) {
77166 @@ -1685,7 +1692,7 @@ void __init kmem_cache_init(void)
77167 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
77168 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
77169 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
77170 - __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
77171 + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
77172 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
77173 }
77174
77175 @@ -1705,7 +1712,7 @@ void __init kmem_cache_init(void)
77176 sizes->cs_cachep->size = sizes->cs_size;
77177 sizes->cs_cachep->object_size = sizes->cs_size;
77178 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
77179 - __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
77180 + __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
77181 list_add(&sizes->cs_cachep->list, &slab_caches);
77182 }
77183 #ifdef CONFIG_ZONE_DMA
77184 @@ -1718,6 +1725,17 @@ void __init kmem_cache_init(void)
77185 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
77186 list_add(&sizes->cs_dmacachep->list, &slab_caches);
77187 #endif
77188 +
77189 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77190 + sizes->cs_usercopycachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
77191 + sizes->cs_usercopycachep->name = names->name_dma;
77192 + sizes->cs_usercopycachep->size = sizes->cs_size;
77193 + sizes->cs_usercopycachep->object_size = sizes->cs_size;
77194 + sizes->cs_usercopycachep->align = ARCH_KMALLOC_MINALIGN;
77195 + __kmem_cache_create(sizes->cs_usercopycachep, ARCH_KMALLOC_FLAGS| SLAB_PANIC|SLAB_USERCOPY);
77196 + list_add(&sizes->cs_usercopycachep->list, &slab_caches);
77197 +#endif
77198 +
77199 sizes++;
77200 names++;
77201 }
77202 @@ -4405,10 +4423,10 @@ static int s_show(struct seq_file *m, void *p)
77203 }
77204 /* cpu stats */
77205 {
77206 - unsigned long allochit = atomic_read(&cachep->allochit);
77207 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
77208 - unsigned long freehit = atomic_read(&cachep->freehit);
77209 - unsigned long freemiss = atomic_read(&cachep->freemiss);
77210 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
77211 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
77212 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
77213 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
77214
77215 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
77216 allochit, allocmiss, freehit, freemiss);
77217 @@ -4667,13 +4685,71 @@ static int __init slab_proc_init(void)
77218 {
77219 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
77220 #ifdef CONFIG_DEBUG_SLAB_LEAK
77221 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
77222 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
77223 #endif
77224 return 0;
77225 }
77226 module_init(slab_proc_init);
77227 #endif
77228
77229 +bool is_usercopy_object(const void *ptr)
77230 +{
77231 + struct page *page;
77232 + struct kmem_cache *cachep;
77233 +
77234 + if (ZERO_OR_NULL_PTR(ptr))
77235 + return false;
77236 +
77237 + if (!slab_is_available())
77238 + return false;
77239 +
77240 + if (!virt_addr_valid(ptr))
77241 + return false;
77242 +
77243 + page = virt_to_head_page(ptr);
77244 +
77245 + if (!PageSlab(page))
77246 + return false;
77247 +
77248 + cachep = page->slab_cache;
77249 + return cachep->flags & SLAB_USERCOPY;
77250 +}
77251 +
77252 +#ifdef CONFIG_PAX_USERCOPY
77253 +const char *check_heap_object(const void *ptr, unsigned long n)
77254 +{
77255 + struct page *page;
77256 + struct kmem_cache *cachep;
77257 + struct slab *slabp;
77258 + unsigned int objnr;
77259 + unsigned long offset;
77260 +
77261 + if (ZERO_OR_NULL_PTR(ptr))
77262 + return "<null>";
77263 +
77264 + if (!virt_addr_valid(ptr))
77265 + return NULL;
77266 +
77267 + page = virt_to_head_page(ptr);
77268 +
77269 + if (!PageSlab(page))
77270 + return NULL;
77271 +
77272 + cachep = page->slab_cache;
77273 + if (!(cachep->flags & SLAB_USERCOPY))
77274 + return cachep->name;
77275 +
77276 + slabp = page->slab_page;
77277 + objnr = obj_to_index(cachep, slabp, ptr);
77278 + BUG_ON(objnr >= cachep->num);
77279 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
77280 + if (offset <= cachep->object_size && n <= cachep->object_size - offset)
77281 + return NULL;
77282 +
77283 + return cachep->name;
77284 +}
77285 +#endif
77286 +
77287 /**
77288 * ksize - get the actual amount of memory allocated for a given object
77289 * @objp: Pointer to the object
77290 diff --git a/mm/slab_common.c b/mm/slab_common.c
77291 index 069a24e6..226a310 100644
77292 --- a/mm/slab_common.c
77293 +++ b/mm/slab_common.c
77294 @@ -127,7 +127,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
77295 err = __kmem_cache_create(s, flags);
77296 if (!err) {
77297
77298 - s->refcount = 1;
77299 + atomic_set(&s->refcount, 1);
77300 list_add(&s->list, &slab_caches);
77301
77302 } else {
77303 @@ -163,8 +163,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
77304 {
77305 get_online_cpus();
77306 mutex_lock(&slab_mutex);
77307 - s->refcount--;
77308 - if (!s->refcount) {
77309 + if (atomic_dec_and_test(&s->refcount)) {
77310 list_del(&s->list);
77311
77312 if (!__kmem_cache_shutdown(s)) {
77313 diff --git a/mm/slob.c b/mm/slob.c
77314 index 1e921c5..1ce12c2 100644
77315 --- a/mm/slob.c
77316 +++ b/mm/slob.c
77317 @@ -159,7 +159,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
77318 /*
77319 * Return the size of a slob block.
77320 */
77321 -static slobidx_t slob_units(slob_t *s)
77322 +static slobidx_t slob_units(const slob_t *s)
77323 {
77324 if (s->units > 0)
77325 return s->units;
77326 @@ -169,7 +169,7 @@ static slobidx_t slob_units(slob_t *s)
77327 /*
77328 * Return the next free slob block pointer after this one.
77329 */
77330 -static slob_t *slob_next(slob_t *s)
77331 +static slob_t *slob_next(const slob_t *s)
77332 {
77333 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
77334 slobidx_t next;
77335 @@ -184,14 +184,14 @@ static slob_t *slob_next(slob_t *s)
77336 /*
77337 * Returns true if s is the last free block in its page.
77338 */
77339 -static int slob_last(slob_t *s)
77340 +static int slob_last(const slob_t *s)
77341 {
77342 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
77343 }
77344
77345 -static void *slob_new_pages(gfp_t gfp, int order, int node)
77346 +static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
77347 {
77348 - void *page;
77349 + struct page *page;
77350
77351 #ifdef CONFIG_NUMA
77352 if (node != NUMA_NO_NODE)
77353 @@ -203,14 +203,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
77354 if (!page)
77355 return NULL;
77356
77357 - return page_address(page);
77358 + __SetPageSlab(page);
77359 + return page;
77360 }
77361
77362 -static void slob_free_pages(void *b, int order)
77363 +static void slob_free_pages(struct page *sp, int order)
77364 {
77365 if (current->reclaim_state)
77366 current->reclaim_state->reclaimed_slab += 1 << order;
77367 - free_pages((unsigned long)b, order);
77368 + __ClearPageSlab(sp);
77369 + reset_page_mapcount(sp);
77370 + sp->private = 0;
77371 + __free_pages(sp, order);
77372 }
77373
77374 /*
77375 @@ -315,15 +319,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
77376
77377 /* Not enough space: must allocate a new page */
77378 if (!b) {
77379 - b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77380 - if (!b)
77381 + sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77382 + if (!sp)
77383 return NULL;
77384 - sp = virt_to_page(b);
77385 - __SetPageSlab(sp);
77386 + b = page_address(sp);
77387
77388 spin_lock_irqsave(&slob_lock, flags);
77389 sp->units = SLOB_UNITS(PAGE_SIZE);
77390 sp->freelist = b;
77391 + sp->private = 0;
77392 INIT_LIST_HEAD(&sp->list);
77393 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
77394 set_slob_page_free(sp, slob_list);
77395 @@ -361,9 +365,7 @@ static void slob_free(void *block, int size)
77396 if (slob_page_free(sp))
77397 clear_slob_page_free(sp);
77398 spin_unlock_irqrestore(&slob_lock, flags);
77399 - __ClearPageSlab(sp);
77400 - reset_page_mapcount(sp);
77401 - slob_free_pages(b, 0);
77402 + slob_free_pages(sp, 0);
77403 return;
77404 }
77405
77406 @@ -426,11 +428,10 @@ out:
77407 */
77408
77409 static __always_inline void *
77410 -__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77411 +__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
77412 {
77413 - unsigned int *m;
77414 - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77415 - void *ret;
77416 + slob_t *m;
77417 + void *ret = NULL;
77418
77419 gfp &= gfp_allowed_mask;
77420
77421 @@ -444,20 +445,23 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77422
77423 if (!m)
77424 return NULL;
77425 - *m = size;
77426 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
77427 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
77428 + m[0].units = size;
77429 + m[1].units = align;
77430 ret = (void *)m + align;
77431
77432 trace_kmalloc_node(caller, ret,
77433 size, size + align, gfp, node);
77434 } else {
77435 unsigned int order = get_order(size);
77436 + struct page *page;
77437
77438 if (likely(order))
77439 gfp |= __GFP_COMP;
77440 - ret = slob_new_pages(gfp, order, node);
77441 - if (ret) {
77442 - struct page *page;
77443 - page = virt_to_page(ret);
77444 + page = slob_new_pages(gfp, order, node);
77445 + if (page) {
77446 + ret = page_address(page);
77447 page->private = size;
77448 }
77449
77450 @@ -465,7 +469,17 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77451 size, PAGE_SIZE << order, gfp, node);
77452 }
77453
77454 - kmemleak_alloc(ret, size, 1, gfp);
77455 + return ret;
77456 +}
77457 +
77458 +static __always_inline void *
77459 +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77460 +{
77461 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77462 + void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
77463 +
77464 + if (!ZERO_OR_NULL_PTR(ret))
77465 + kmemleak_alloc(ret, size, 1, gfp);
77466 return ret;
77467 }
77468
77469 @@ -501,15 +515,91 @@ void kfree(const void *block)
77470 kmemleak_free(block);
77471
77472 sp = virt_to_page(block);
77473 - if (PageSlab(sp)) {
77474 + VM_BUG_ON(!PageSlab(sp));
77475 + if (!sp->private) {
77476 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77477 - unsigned int *m = (unsigned int *)(block - align);
77478 - slob_free(m, *m + align);
77479 - } else
77480 + slob_t *m = (slob_t *)(block - align);
77481 + slob_free(m, m[0].units + align);
77482 + } else {
77483 + __ClearPageSlab(sp);
77484 + reset_page_mapcount(sp);
77485 + sp->private = 0;
77486 put_page(sp);
77487 + }
77488 }
77489 EXPORT_SYMBOL(kfree);
77490
77491 +bool is_usercopy_object(const void *ptr)
77492 +{
77493 + if (!slab_is_available())
77494 + return false;
77495 +
77496 + // PAX: TODO
77497 +
77498 + return false;
77499 +}
77500 +
77501 +#ifdef CONFIG_PAX_USERCOPY
77502 +const char *check_heap_object(const void *ptr, unsigned long n)
77503 +{
77504 + struct page *page;
77505 + const slob_t *free;
77506 + const void *base;
77507 + unsigned long flags;
77508 +
77509 + if (ZERO_OR_NULL_PTR(ptr))
77510 + return "<null>";
77511 +
77512 + if (!virt_addr_valid(ptr))
77513 + return NULL;
77514 +
77515 + page = virt_to_head_page(ptr);
77516 + if (!PageSlab(page))
77517 + return NULL;
77518 +
77519 + if (page->private) {
77520 + base = page;
77521 + if (base <= ptr && n <= page->private - (ptr - base))
77522 + return NULL;
77523 + return "<slob>";
77524 + }
77525 +
77526 + /* some tricky double walking to find the chunk */
77527 + spin_lock_irqsave(&slob_lock, flags);
77528 + base = (void *)((unsigned long)ptr & PAGE_MASK);
77529 + free = page->freelist;
77530 +
77531 + while (!slob_last(free) && (void *)free <= ptr) {
77532 + base = free + slob_units(free);
77533 + free = slob_next(free);
77534 + }
77535 +
77536 + while (base < (void *)free) {
77537 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
77538 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
77539 + int offset;
77540 +
77541 + if (ptr < base + align)
77542 + break;
77543 +
77544 + offset = ptr - base - align;
77545 + if (offset >= m) {
77546 + base += size;
77547 + continue;
77548 + }
77549 +
77550 + if (n > m - offset)
77551 + break;
77552 +
77553 + spin_unlock_irqrestore(&slob_lock, flags);
77554 + return NULL;
77555 + }
77556 +
77557 + spin_unlock_irqrestore(&slob_lock, flags);
77558 + return "<slob>";
77559 +}
77560 +#endif
77561 +
77562 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
77563 size_t ksize(const void *block)
77564 {
77565 @@ -520,10 +610,11 @@ size_t ksize(const void *block)
77566 return 0;
77567
77568 sp = virt_to_page(block);
77569 - if (PageSlab(sp)) {
77570 + VM_BUG_ON(!PageSlab(sp));
77571 + if (!sp->private) {
77572 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77573 - unsigned int *m = (unsigned int *)(block - align);
77574 - return SLOB_UNITS(*m) * SLOB_UNIT;
77575 + slob_t *m = (slob_t *)(block - align);
77576 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
77577 } else
77578 return sp->private;
77579 }
77580 @@ -550,23 +641,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
77581
77582 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
77583 {
77584 - void *b;
77585 + void *b = NULL;
77586
77587 flags &= gfp_allowed_mask;
77588
77589 lockdep_trace_alloc(flags);
77590
77591 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77592 + b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
77593 +#else
77594 if (c->size < PAGE_SIZE) {
77595 b = slob_alloc(c->size, flags, c->align, node);
77596 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77597 SLOB_UNITS(c->size) * SLOB_UNIT,
77598 flags, node);
77599 } else {
77600 - b = slob_new_pages(flags, get_order(c->size), node);
77601 + struct page *sp;
77602 +
77603 + sp = slob_new_pages(flags, get_order(c->size), node);
77604 + if (sp) {
77605 + b = page_address(sp);
77606 + sp->private = c->size;
77607 + }
77608 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77609 PAGE_SIZE << get_order(c->size),
77610 flags, node);
77611 }
77612 +#endif
77613
77614 if (c->ctor)
77615 c->ctor(b);
77616 @@ -578,10 +679,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
77617
77618 static void __kmem_cache_free(void *b, int size)
77619 {
77620 - if (size < PAGE_SIZE)
77621 + struct page *sp;
77622 +
77623 + sp = virt_to_page(b);
77624 + BUG_ON(!PageSlab(sp));
77625 + if (!sp->private)
77626 slob_free(b, size);
77627 else
77628 - slob_free_pages(b, get_order(size));
77629 + slob_free_pages(sp, get_order(size));
77630 }
77631
77632 static void kmem_rcu_free(struct rcu_head *head)
77633 @@ -594,17 +699,31 @@ static void kmem_rcu_free(struct rcu_head *head)
77634
77635 void kmem_cache_free(struct kmem_cache *c, void *b)
77636 {
77637 + int size = c->size;
77638 +
77639 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77640 + if (size + c->align < PAGE_SIZE) {
77641 + size += c->align;
77642 + b -= c->align;
77643 + }
77644 +#endif
77645 +
77646 kmemleak_free_recursive(b, c->flags);
77647 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
77648 struct slob_rcu *slob_rcu;
77649 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
77650 - slob_rcu->size = c->size;
77651 + slob_rcu = b + (size - sizeof(struct slob_rcu));
77652 + slob_rcu->size = size;
77653 call_rcu(&slob_rcu->head, kmem_rcu_free);
77654 } else {
77655 - __kmem_cache_free(b, c->size);
77656 + __kmem_cache_free(b, size);
77657 }
77658
77659 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77660 + trace_kfree(_RET_IP_, b);
77661 +#else
77662 trace_kmem_cache_free(_RET_IP_, b);
77663 +#endif
77664 +
77665 }
77666 EXPORT_SYMBOL(kmem_cache_free);
77667
77668 diff --git a/mm/slub.c b/mm/slub.c
77669 index a0d6984..e280e5d 100644
77670 --- a/mm/slub.c
77671 +++ b/mm/slub.c
77672 @@ -201,7 +201,7 @@ struct track {
77673
77674 enum track_item { TRACK_ALLOC, TRACK_FREE };
77675
77676 -#ifdef CONFIG_SYSFS
77677 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77678 static int sysfs_slab_add(struct kmem_cache *);
77679 static int sysfs_slab_alias(struct kmem_cache *, const char *);
77680 static void sysfs_slab_remove(struct kmem_cache *);
77681 @@ -521,7 +521,7 @@ static void print_track(const char *s, struct track *t)
77682 if (!t->addr)
77683 return;
77684
77685 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
77686 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
77687 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
77688 #ifdef CONFIG_STACKTRACE
77689 {
77690 @@ -2623,6 +2623,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
77691
77692 page = virt_to_head_page(x);
77693
77694 + BUG_ON(!PageSlab(page));
77695 +
77696 if (kmem_cache_debug(s) && page->slab != s) {
77697 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
77698 " is from %s\n", page->slab->name, s->name);
77699 @@ -2663,7 +2665,7 @@ static int slub_min_objects;
77700 * Merge control. If this is set then no merging of slab caches will occur.
77701 * (Could be removed. This was introduced to pacify the merge skeptics.)
77702 */
77703 -static int slub_nomerge;
77704 +static int slub_nomerge = 1;
77705
77706 /*
77707 * Calculate the order of allocation given an slab object size.
77708 @@ -3225,6 +3227,10 @@ EXPORT_SYMBOL(kmalloc_caches);
77709 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
77710 #endif
77711
77712 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77713 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
77714 +#endif
77715 +
77716 static int __init setup_slub_min_order(char *str)
77717 {
77718 get_option(&str, &slub_min_order);
77719 @@ -3342,6 +3348,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
77720 return kmalloc_dma_caches[index];
77721
77722 #endif
77723 +
77724 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77725 + if (flags & SLAB_USERCOPY)
77726 + return kmalloc_usercopy_caches[index];
77727 +
77728 +#endif
77729 +
77730 return kmalloc_caches[index];
77731 }
77732
77733 @@ -3410,6 +3423,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
77734 EXPORT_SYMBOL(__kmalloc_node);
77735 #endif
77736
77737 +bool is_usercopy_object(const void *ptr)
77738 +{
77739 + struct page *page;
77740 + struct kmem_cache *s;
77741 +
77742 + if (ZERO_OR_NULL_PTR(ptr))
77743 + return false;
77744 +
77745 + if (!slab_is_available())
77746 + return false;
77747 +
77748 + if (!virt_addr_valid(ptr))
77749 + return false;
77750 +
77751 + page = virt_to_head_page(ptr);
77752 +
77753 + if (!PageSlab(page))
77754 + return false;
77755 +
77756 + s = page->slab;
77757 + return s->flags & SLAB_USERCOPY;
77758 +}
77759 +
77760 +#ifdef CONFIG_PAX_USERCOPY
77761 +const char *check_heap_object(const void *ptr, unsigned long n)
77762 +{
77763 + struct page *page;
77764 + struct kmem_cache *s;
77765 + unsigned long offset;
77766 +
77767 + if (ZERO_OR_NULL_PTR(ptr))
77768 + return "<null>";
77769 +
77770 + if (!virt_addr_valid(ptr))
77771 + return NULL;
77772 +
77773 + page = virt_to_head_page(ptr);
77774 +
77775 + if (!PageSlab(page))
77776 + return NULL;
77777 +
77778 + s = page->slab;
77779 + if (!(s->flags & SLAB_USERCOPY))
77780 + return s->name;
77781 +
77782 + offset = (ptr - page_address(page)) % s->size;
77783 + if (offset <= s->object_size && n <= s->object_size - offset)
77784 + return NULL;
77785 +
77786 + return s->name;
77787 +}
77788 +#endif
77789 +
77790 size_t ksize(const void *object)
77791 {
77792 struct page *page;
77793 @@ -3684,7 +3750,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
77794 int node;
77795
77796 list_add(&s->list, &slab_caches);
77797 - s->refcount = -1;
77798 + atomic_set(&s->refcount, -1);
77799
77800 for_each_node_state(node, N_NORMAL_MEMORY) {
77801 struct kmem_cache_node *n = get_node(s, node);
77802 @@ -3807,17 +3873,17 @@ void __init kmem_cache_init(void)
77803
77804 /* Caches that are not of the two-to-the-power-of size */
77805 if (KMALLOC_MIN_SIZE <= 32) {
77806 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
77807 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
77808 caches++;
77809 }
77810
77811 if (KMALLOC_MIN_SIZE <= 64) {
77812 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
77813 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
77814 caches++;
77815 }
77816
77817 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
77818 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
77819 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
77820 caches++;
77821 }
77822
77823 @@ -3859,6 +3925,22 @@ void __init kmem_cache_init(void)
77824 }
77825 }
77826 #endif
77827 +
77828 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77829 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
77830 + struct kmem_cache *s = kmalloc_caches[i];
77831 +
77832 + if (s && s->size) {
77833 + char *name = kasprintf(GFP_NOWAIT,
77834 + "usercopy-kmalloc-%d", s->object_size);
77835 +
77836 + BUG_ON(!name);
77837 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
77838 + s->object_size, SLAB_USERCOPY);
77839 + }
77840 + }
77841 +#endif
77842 +
77843 printk(KERN_INFO
77844 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
77845 " CPUs=%d, Nodes=%d\n",
77846 @@ -3885,7 +3967,7 @@ static int slab_unmergeable(struct kmem_cache *s)
77847 /*
77848 * We may have set a slab to be unmergeable during bootstrap.
77849 */
77850 - if (s->refcount < 0)
77851 + if (atomic_read(&s->refcount) < 0)
77852 return 1;
77853
77854 return 0;
77855 @@ -3939,7 +4021,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77856
77857 s = find_mergeable(size, align, flags, name, ctor);
77858 if (s) {
77859 - s->refcount++;
77860 + atomic_inc(&s->refcount);
77861 /*
77862 * Adjust the object sizes so that we clear
77863 * the complete object on kzalloc.
77864 @@ -3948,7 +4030,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77865 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
77866
77867 if (sysfs_slab_alias(s, name)) {
77868 - s->refcount--;
77869 + atomic_dec(&s->refcount);
77870 s = NULL;
77871 }
77872 }
77873 @@ -4063,7 +4145,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
77874 }
77875 #endif
77876
77877 -#ifdef CONFIG_SYSFS
77878 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77879 static int count_inuse(struct page *page)
77880 {
77881 return page->inuse;
77882 @@ -4450,12 +4532,12 @@ static void resiliency_test(void)
77883 validate_slab_cache(kmalloc_caches[9]);
77884 }
77885 #else
77886 -#ifdef CONFIG_SYSFS
77887 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77888 static void resiliency_test(void) {};
77889 #endif
77890 #endif
77891
77892 -#ifdef CONFIG_SYSFS
77893 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77894 enum slab_stat_type {
77895 SL_ALL, /* All slabs */
77896 SL_PARTIAL, /* Only partially allocated slabs */
77897 @@ -4699,7 +4781,7 @@ SLAB_ATTR_RO(ctor);
77898
77899 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
77900 {
77901 - return sprintf(buf, "%d\n", s->refcount - 1);
77902 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
77903 }
77904 SLAB_ATTR_RO(aliases);
77905
77906 @@ -5261,6 +5343,7 @@ static char *create_unique_id(struct kmem_cache *s)
77907 return name;
77908 }
77909
77910 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77911 static int sysfs_slab_add(struct kmem_cache *s)
77912 {
77913 int err;
77914 @@ -5323,6 +5406,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
77915 kobject_del(&s->kobj);
77916 kobject_put(&s->kobj);
77917 }
77918 +#endif
77919
77920 /*
77921 * Need to buffer aliases during bootup until sysfs becomes
77922 @@ -5336,6 +5420,7 @@ struct saved_alias {
77923
77924 static struct saved_alias *alias_list;
77925
77926 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77927 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77928 {
77929 struct saved_alias *al;
77930 @@ -5358,6 +5443,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77931 alias_list = al;
77932 return 0;
77933 }
77934 +#endif
77935
77936 static int __init slab_sysfs_init(void)
77937 {
77938 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
77939 index 1b7e22a..3fcd4f3 100644
77940 --- a/mm/sparse-vmemmap.c
77941 +++ b/mm/sparse-vmemmap.c
77942 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
77943 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77944 if (!p)
77945 return NULL;
77946 - pud_populate(&init_mm, pud, p);
77947 + pud_populate_kernel(&init_mm, pud, p);
77948 }
77949 return pud;
77950 }
77951 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
77952 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77953 if (!p)
77954 return NULL;
77955 - pgd_populate(&init_mm, pgd, p);
77956 + pgd_populate_kernel(&init_mm, pgd, p);
77957 }
77958 return pgd;
77959 }
77960 diff --git a/mm/swap.c b/mm/swap.c
77961 index 6310dc2..3662b3f 100644
77962 --- a/mm/swap.c
77963 +++ b/mm/swap.c
77964 @@ -30,6 +30,7 @@
77965 #include <linux/backing-dev.h>
77966 #include <linux/memcontrol.h>
77967 #include <linux/gfp.h>
77968 +#include <linux/hugetlb.h>
77969
77970 #include "internal.h"
77971
77972 @@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
77973
77974 __page_cache_release(page);
77975 dtor = get_compound_page_dtor(page);
77976 + if (!PageHuge(page))
77977 + BUG_ON(dtor != free_compound_page);
77978 (*dtor)(page);
77979 }
77980
77981 diff --git a/mm/swapfile.c b/mm/swapfile.c
77982 index f91a255..9dcac21 100644
77983 --- a/mm/swapfile.c
77984 +++ b/mm/swapfile.c
77985 @@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
77986
77987 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
77988 /* Activity counter to indicate that a swapon or swapoff has occurred */
77989 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
77990 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
77991
77992 static inline unsigned char swap_count(unsigned char ent)
77993 {
77994 @@ -1601,7 +1601,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
77995 }
77996 filp_close(swap_file, NULL);
77997 err = 0;
77998 - atomic_inc(&proc_poll_event);
77999 + atomic_inc_unchecked(&proc_poll_event);
78000 wake_up_interruptible(&proc_poll_wait);
78001
78002 out_dput:
78003 @@ -1618,8 +1618,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
78004
78005 poll_wait(file, &proc_poll_wait, wait);
78006
78007 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
78008 - seq->poll_event = atomic_read(&proc_poll_event);
78009 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
78010 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
78011 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
78012 }
78013
78014 @@ -1717,7 +1717,7 @@ static int swaps_open(struct inode *inode, struct file *file)
78015 return ret;
78016
78017 seq = file->private_data;
78018 - seq->poll_event = atomic_read(&proc_poll_event);
78019 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
78020 return 0;
78021 }
78022
78023 @@ -2059,7 +2059,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
78024 (frontswap_map) ? "FS" : "");
78025
78026 mutex_unlock(&swapon_mutex);
78027 - atomic_inc(&proc_poll_event);
78028 + atomic_inc_unchecked(&proc_poll_event);
78029 wake_up_interruptible(&proc_poll_wait);
78030
78031 if (S_ISREG(inode->i_mode))
78032 diff --git a/mm/util.c b/mm/util.c
78033 index dc3036c..b6c7c9d 100644
78034 --- a/mm/util.c
78035 +++ b/mm/util.c
78036 @@ -292,6 +292,12 @@ done:
78037 void arch_pick_mmap_layout(struct mm_struct *mm)
78038 {
78039 mm->mmap_base = TASK_UNMAPPED_BASE;
78040 +
78041 +#ifdef CONFIG_PAX_RANDMMAP
78042 + if (mm->pax_flags & MF_PAX_RANDMMAP)
78043 + mm->mmap_base += mm->delta_mmap;
78044 +#endif
78045 +
78046 mm->get_unmapped_area = arch_get_unmapped_area;
78047 mm->unmap_area = arch_unmap_area;
78048 }
78049 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
78050 index 78e0830..bc6bbd8 100644
78051 --- a/mm/vmalloc.c
78052 +++ b/mm/vmalloc.c
78053 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
78054
78055 pte = pte_offset_kernel(pmd, addr);
78056 do {
78057 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78058 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78059 +
78060 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78061 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
78062 + BUG_ON(!pte_exec(*pte));
78063 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
78064 + continue;
78065 + }
78066 +#endif
78067 +
78068 + {
78069 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78070 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78071 + }
78072 } while (pte++, addr += PAGE_SIZE, addr != end);
78073 }
78074
78075 @@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78076 pte = pte_alloc_kernel(pmd, addr);
78077 if (!pte)
78078 return -ENOMEM;
78079 +
78080 + pax_open_kernel();
78081 do {
78082 struct page *page = pages[*nr];
78083
78084 - if (WARN_ON(!pte_none(*pte)))
78085 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78086 + if (pgprot_val(prot) & _PAGE_NX)
78087 +#endif
78088 +
78089 + if (!pte_none(*pte)) {
78090 + pax_close_kernel();
78091 + WARN_ON(1);
78092 return -EBUSY;
78093 - if (WARN_ON(!page))
78094 + }
78095 + if (!page) {
78096 + pax_close_kernel();
78097 + WARN_ON(1);
78098 return -ENOMEM;
78099 + }
78100 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
78101 (*nr)++;
78102 } while (pte++, addr += PAGE_SIZE, addr != end);
78103 + pax_close_kernel();
78104 return 0;
78105 }
78106
78107 @@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
78108 pmd_t *pmd;
78109 unsigned long next;
78110
78111 - pmd = pmd_alloc(&init_mm, pud, addr);
78112 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
78113 if (!pmd)
78114 return -ENOMEM;
78115 do {
78116 @@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
78117 pud_t *pud;
78118 unsigned long next;
78119
78120 - pud = pud_alloc(&init_mm, pgd, addr);
78121 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
78122 if (!pud)
78123 return -ENOMEM;
78124 do {
78125 @@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
78126 * and fall back on vmalloc() if that fails. Others
78127 * just put it in the vmalloc space.
78128 */
78129 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
78130 +#ifdef CONFIG_MODULES
78131 +#ifdef MODULES_VADDR
78132 unsigned long addr = (unsigned long)x;
78133 if (addr >= MODULES_VADDR && addr < MODULES_END)
78134 return 1;
78135 #endif
78136 +
78137 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78138 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
78139 + return 1;
78140 +#endif
78141 +
78142 +#endif
78143 +
78144 return is_vmalloc_addr(x);
78145 }
78146
78147 @@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
78148
78149 if (!pgd_none(*pgd)) {
78150 pud_t *pud = pud_offset(pgd, addr);
78151 +#ifdef CONFIG_X86
78152 + if (!pud_large(*pud))
78153 +#endif
78154 if (!pud_none(*pud)) {
78155 pmd_t *pmd = pmd_offset(pud, addr);
78156 +#ifdef CONFIG_X86
78157 + if (!pmd_large(*pmd))
78158 +#endif
78159 if (!pmd_none(*pmd)) {
78160 pte_t *ptep, pte;
78161
78162 @@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
78163 * Allocate a region of KVA of the specified size and alignment, within the
78164 * vstart and vend.
78165 */
78166 -static struct vmap_area *alloc_vmap_area(unsigned long size,
78167 +static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
78168 unsigned long align,
78169 unsigned long vstart, unsigned long vend,
78170 int node, gfp_t gfp_mask)
78171 @@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
78172 struct vm_struct *area;
78173
78174 BUG_ON(in_interrupt());
78175 +
78176 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78177 + if (flags & VM_KERNEXEC) {
78178 + if (start != VMALLOC_START || end != VMALLOC_END)
78179 + return NULL;
78180 + start = (unsigned long)MODULES_EXEC_VADDR;
78181 + end = (unsigned long)MODULES_EXEC_END;
78182 + }
78183 +#endif
78184 +
78185 if (flags & VM_IOREMAP) {
78186 int bit = fls(size);
78187
78188 @@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
78189 if (count > totalram_pages)
78190 return NULL;
78191
78192 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78193 + if (!(pgprot_val(prot) & _PAGE_NX))
78194 + flags |= VM_KERNEXEC;
78195 +#endif
78196 +
78197 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
78198 __builtin_return_address(0));
78199 if (!area)
78200 @@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
78201 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
78202 goto fail;
78203
78204 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78205 + if (!(pgprot_val(prot) & _PAGE_NX))
78206 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
78207 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
78208 + else
78209 +#endif
78210 +
78211 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
78212 start, end, node, gfp_mask, caller);
78213 if (!area)
78214 @@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
78215 * For tight control over page level allocator and protection flags
78216 * use __vmalloc() instead.
78217 */
78218 -
78219 void *vmalloc_exec(unsigned long size)
78220 {
78221 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
78222 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
78223 -1, __builtin_return_address(0));
78224 }
78225
78226 @@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
78227 unsigned long uaddr = vma->vm_start;
78228 unsigned long usize = vma->vm_end - vma->vm_start;
78229
78230 + BUG_ON(vma->vm_mirror);
78231 +
78232 if ((PAGE_SIZE-1) & (unsigned long)addr)
78233 return -EINVAL;
78234
78235 @@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
78236 v->addr, v->addr + v->size, v->size);
78237
78238 if (v->caller)
78239 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78240 + seq_printf(m, " %pK", v->caller);
78241 +#else
78242 seq_printf(m, " %pS", v->caller);
78243 +#endif
78244
78245 if (v->nr_pages)
78246 seq_printf(m, " pages=%d", v->nr_pages);
78247 diff --git a/mm/vmstat.c b/mm/vmstat.c
78248 index c737057..a49753a 100644
78249 --- a/mm/vmstat.c
78250 +++ b/mm/vmstat.c
78251 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
78252 *
78253 * vm_stat contains the global counters
78254 */
78255 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
78256 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
78257 EXPORT_SYMBOL(vm_stat);
78258
78259 #ifdef CONFIG_SMP
78260 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
78261 v = p->vm_stat_diff[i];
78262 p->vm_stat_diff[i] = 0;
78263 local_irq_restore(flags);
78264 - atomic_long_add(v, &zone->vm_stat[i]);
78265 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
78266 global_diff[i] += v;
78267 #ifdef CONFIG_NUMA
78268 /* 3 seconds idle till flush */
78269 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
78270
78271 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
78272 if (global_diff[i])
78273 - atomic_long_add(global_diff[i], &vm_stat[i]);
78274 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
78275 }
78276
78277 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
78278 @@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
78279 if (pset->vm_stat_diff[i]) {
78280 int v = pset->vm_stat_diff[i];
78281 pset->vm_stat_diff[i] = 0;
78282 - atomic_long_add(v, &zone->vm_stat[i]);
78283 - atomic_long_add(v, &vm_stat[i]);
78284 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
78285 + atomic_long_add_unchecked(v, &vm_stat[i]);
78286 }
78287 }
78288 #endif
78289 @@ -1224,10 +1224,20 @@ static int __init setup_vmstat(void)
78290 start_cpu_timer(cpu);
78291 #endif
78292 #ifdef CONFIG_PROC_FS
78293 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
78294 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
78295 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
78296 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
78297 + {
78298 + mode_t gr_mode = S_IRUGO;
78299 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
78300 + gr_mode = S_IRUSR;
78301 +#endif
78302 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
78303 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
78304 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78305 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
78306 +#else
78307 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
78308 +#endif
78309 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
78310 + }
78311 #endif
78312 return 0;
78313 }
78314 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
78315 index ee07072..593e3fd 100644
78316 --- a/net/8021q/vlan.c
78317 +++ b/net/8021q/vlan.c
78318 @@ -484,7 +484,7 @@ out:
78319 return NOTIFY_DONE;
78320 }
78321
78322 -static struct notifier_block vlan_notifier_block __read_mostly = {
78323 +static struct notifier_block vlan_notifier_block = {
78324 .notifier_call = vlan_device_event,
78325 };
78326
78327 @@ -559,8 +559,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
78328 err = -EPERM;
78329 if (!capable(CAP_NET_ADMIN))
78330 break;
78331 - if ((args.u.name_type >= 0) &&
78332 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
78333 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
78334 struct vlan_net *vn;
78335
78336 vn = net_generic(net, vlan_net_id);
78337 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
78338 index 02efb25..41541a9 100644
78339 --- a/net/9p/trans_fd.c
78340 +++ b/net/9p/trans_fd.c
78341 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
78342 oldfs = get_fs();
78343 set_fs(get_ds());
78344 /* The cast to a user pointer is valid due to the set_fs() */
78345 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
78346 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
78347 set_fs(oldfs);
78348
78349 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
78350 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
78351 index 876fbe8..8bbea9f 100644
78352 --- a/net/atm/atm_misc.c
78353 +++ b/net/atm/atm_misc.c
78354 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
78355 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
78356 return 1;
78357 atm_return(vcc, truesize);
78358 - atomic_inc(&vcc->stats->rx_drop);
78359 + atomic_inc_unchecked(&vcc->stats->rx_drop);
78360 return 0;
78361 }
78362 EXPORT_SYMBOL(atm_charge);
78363 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
78364 }
78365 }
78366 atm_return(vcc, guess);
78367 - atomic_inc(&vcc->stats->rx_drop);
78368 + atomic_inc_unchecked(&vcc->stats->rx_drop);
78369 return NULL;
78370 }
78371 EXPORT_SYMBOL(atm_alloc_charge);
78372 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
78373
78374 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78375 {
78376 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78377 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78378 __SONET_ITEMS
78379 #undef __HANDLE_ITEM
78380 }
78381 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
78382
78383 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78384 {
78385 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78386 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
78387 __SONET_ITEMS
78388 #undef __HANDLE_ITEM
78389 }
78390 diff --git a/net/atm/lec.h b/net/atm/lec.h
78391 index a86aff9..3a0d6f6 100644
78392 --- a/net/atm/lec.h
78393 +++ b/net/atm/lec.h
78394 @@ -48,7 +48,7 @@ struct lane2_ops {
78395 const u8 *tlvs, u32 sizeoftlvs);
78396 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
78397 const u8 *tlvs, u32 sizeoftlvs);
78398 -};
78399 +} __no_const;
78400
78401 /*
78402 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
78403 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
78404 index 0919a88..a23d54e 100644
78405 --- a/net/atm/mpc.h
78406 +++ b/net/atm/mpc.h
78407 @@ -33,7 +33,7 @@ struct mpoa_client {
78408 struct mpc_parameters parameters; /* parameters for this client */
78409
78410 const struct net_device_ops *old_ops;
78411 - struct net_device_ops new_ops;
78412 + net_device_ops_no_const new_ops;
78413 };
78414
78415
78416 diff --git a/net/atm/proc.c b/net/atm/proc.c
78417 index 0d020de..011c7bb 100644
78418 --- a/net/atm/proc.c
78419 +++ b/net/atm/proc.c
78420 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
78421 const struct k_atm_aal_stats *stats)
78422 {
78423 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
78424 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
78425 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
78426 - atomic_read(&stats->rx_drop));
78427 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
78428 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
78429 + atomic_read_unchecked(&stats->rx_drop));
78430 }
78431
78432 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
78433 diff --git a/net/atm/resources.c b/net/atm/resources.c
78434 index 0447d5d..3cf4728 100644
78435 --- a/net/atm/resources.c
78436 +++ b/net/atm/resources.c
78437 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
78438 static void copy_aal_stats(struct k_atm_aal_stats *from,
78439 struct atm_aal_stats *to)
78440 {
78441 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78442 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78443 __AAL_STAT_ITEMS
78444 #undef __HANDLE_ITEM
78445 }
78446 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
78447 static void subtract_aal_stats(struct k_atm_aal_stats *from,
78448 struct atm_aal_stats *to)
78449 {
78450 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78451 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
78452 __AAL_STAT_ITEMS
78453 #undef __HANDLE_ITEM
78454 }
78455 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
78456 index b02b75d..0a9636e 100644
78457 --- a/net/batman-adv/bat_iv_ogm.c
78458 +++ b/net/batman-adv/bat_iv_ogm.c
78459 @@ -62,7 +62,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
78460
78461 /* randomize initial seqno to avoid collision */
78462 get_random_bytes(&random_seqno, sizeof(random_seqno));
78463 - atomic_set(&hard_iface->seqno, random_seqno);
78464 + atomic_set_unchecked(&hard_iface->seqno, random_seqno);
78465
78466 hard_iface->packet_len = BATADV_OGM_HLEN;
78467 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
78468 @@ -608,9 +608,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
78469 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
78470
78471 /* change sequence number to network order */
78472 - seqno = (uint32_t)atomic_read(&hard_iface->seqno);
78473 + seqno = (uint32_t)atomic_read_unchecked(&hard_iface->seqno);
78474 batadv_ogm_packet->seqno = htonl(seqno);
78475 - atomic_inc(&hard_iface->seqno);
78476 + atomic_inc_unchecked(&hard_iface->seqno);
78477
78478 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
78479 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
78480 @@ -1015,7 +1015,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
78481 return;
78482
78483 /* could be changed by schedule_own_packet() */
78484 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
78485 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
78486
78487 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
78488 has_directlink_flag = 1;
78489 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
78490 index d112fd6..686a447 100644
78491 --- a/net/batman-adv/hard-interface.c
78492 +++ b/net/batman-adv/hard-interface.c
78493 @@ -327,7 +327,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
78494 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
78495 dev_add_pack(&hard_iface->batman_adv_ptype);
78496
78497 - atomic_set(&hard_iface->frag_seqno, 1);
78498 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
78499 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
78500 hard_iface->net_dev->name);
78501
78502 @@ -450,7 +450,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
78503 /* This can't be called via a bat_priv callback because
78504 * we have no bat_priv yet.
78505 */
78506 - atomic_set(&hard_iface->seqno, 1);
78507 + atomic_set_unchecked(&hard_iface->seqno, 1);
78508 hard_iface->packet_buff = NULL;
78509
78510 return hard_iface;
78511 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
78512 index ce0684a..4a0cbf1 100644
78513 --- a/net/batman-adv/soft-interface.c
78514 +++ b/net/batman-adv/soft-interface.c
78515 @@ -234,7 +234,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
78516 primary_if->net_dev->dev_addr, ETH_ALEN);
78517
78518 /* set broadcast sequence number */
78519 - seqno = atomic_inc_return(&bat_priv->bcast_seqno);
78520 + seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
78521 bcast_packet->seqno = htonl(seqno);
78522
78523 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
78524 @@ -427,7 +427,7 @@ struct net_device *batadv_softif_create(const char *name)
78525 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
78526
78527 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
78528 - atomic_set(&bat_priv->bcast_seqno, 1);
78529 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
78530 atomic_set(&bat_priv->tt.vn, 0);
78531 atomic_set(&bat_priv->tt.local_changes, 0);
78532 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
78533 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
78534 index ac1e07a..4c846e2 100644
78535 --- a/net/batman-adv/types.h
78536 +++ b/net/batman-adv/types.h
78537 @@ -33,8 +33,8 @@ struct batadv_hard_iface {
78538 int16_t if_num;
78539 char if_status;
78540 struct net_device *net_dev;
78541 - atomic_t seqno;
78542 - atomic_t frag_seqno;
78543 + atomic_unchecked_t seqno;
78544 + atomic_unchecked_t frag_seqno;
78545 unsigned char *packet_buff;
78546 int packet_len;
78547 struct kobject *hardif_obj;
78548 @@ -244,7 +244,7 @@ struct batadv_priv {
78549 atomic_t orig_interval; /* uint */
78550 atomic_t hop_penalty; /* uint */
78551 atomic_t log_level; /* uint */
78552 - atomic_t bcast_seqno;
78553 + atomic_unchecked_t bcast_seqno;
78554 atomic_t bcast_queue_left;
78555 atomic_t batman_queue_left;
78556 char num_ifaces;
78557 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
78558 index f397232..3206a33 100644
78559 --- a/net/batman-adv/unicast.c
78560 +++ b/net/batman-adv/unicast.c
78561 @@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
78562 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
78563 frag2->flags = large_tail;
78564
78565 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
78566 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
78567 frag1->seqno = htons(seqno - 1);
78568 frag2->seqno = htons(seqno);
78569
78570 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
78571 index 07f0739..3c42e34 100644
78572 --- a/net/bluetooth/hci_sock.c
78573 +++ b/net/bluetooth/hci_sock.c
78574 @@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
78575 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
78576 }
78577
78578 - len = min_t(unsigned int, len, sizeof(uf));
78579 + len = min((size_t)len, sizeof(uf));
78580 if (copy_from_user(&uf, optval, len)) {
78581 err = -EFAULT;
78582 break;
78583 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
78584 index a91239d..d7ed533 100644
78585 --- a/net/bluetooth/l2cap_core.c
78586 +++ b/net/bluetooth/l2cap_core.c
78587 @@ -3183,8 +3183,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
78588 break;
78589
78590 case L2CAP_CONF_RFC:
78591 - if (olen == sizeof(rfc))
78592 - memcpy(&rfc, (void *)val, olen);
78593 + if (olen != sizeof(rfc))
78594 + break;
78595 +
78596 + memcpy(&rfc, (void *)val, olen);
78597
78598 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
78599 rfc.mode != chan->mode)
78600 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
78601 index 083f2bf..799f9448 100644
78602 --- a/net/bluetooth/l2cap_sock.c
78603 +++ b/net/bluetooth/l2cap_sock.c
78604 @@ -471,7 +471,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78605 struct sock *sk = sock->sk;
78606 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
78607 struct l2cap_options opts;
78608 - int len, err = 0;
78609 + int err = 0;
78610 + size_t len = optlen;
78611 u32 opt;
78612
78613 BT_DBG("sk %p", sk);
78614 @@ -493,7 +494,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78615 opts.max_tx = chan->max_tx;
78616 opts.txwin_size = chan->tx_win;
78617
78618 - len = min_t(unsigned int, sizeof(opts), optlen);
78619 + len = min(sizeof(opts), len);
78620 if (copy_from_user((char *) &opts, optval, len)) {
78621 err = -EFAULT;
78622 break;
78623 @@ -571,7 +572,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78624 struct bt_security sec;
78625 struct bt_power pwr;
78626 struct l2cap_conn *conn;
78627 - int len, err = 0;
78628 + int err = 0;
78629 + size_t len = optlen;
78630 u32 opt;
78631
78632 BT_DBG("sk %p", sk);
78633 @@ -594,7 +596,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78634
78635 sec.level = BT_SECURITY_LOW;
78636
78637 - len = min_t(unsigned int, sizeof(sec), optlen);
78638 + len = min(sizeof(sec), len);
78639 if (copy_from_user((char *) &sec, optval, len)) {
78640 err = -EFAULT;
78641 break;
78642 @@ -691,7 +693,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78643
78644 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
78645
78646 - len = min_t(unsigned int, sizeof(pwr), optlen);
78647 + len = min(sizeof(pwr), len);
78648 if (copy_from_user((char *) &pwr, optval, len)) {
78649 err = -EFAULT;
78650 break;
78651 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
78652 index b3226f3..8916f43 100644
78653 --- a/net/bluetooth/rfcomm/sock.c
78654 +++ b/net/bluetooth/rfcomm/sock.c
78655 @@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78656 struct sock *sk = sock->sk;
78657 struct bt_security sec;
78658 int err = 0;
78659 - size_t len;
78660 + size_t len = optlen;
78661 u32 opt;
78662
78663 BT_DBG("sk %p", sk);
78664 @@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78665
78666 sec.level = BT_SECURITY_LOW;
78667
78668 - len = min_t(unsigned int, sizeof(sec), optlen);
78669 + len = min(sizeof(sec), len);
78670 if (copy_from_user((char *) &sec, optval, len)) {
78671 err = -EFAULT;
78672 break;
78673 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
78674 index ccc2487..921073d 100644
78675 --- a/net/bluetooth/rfcomm/tty.c
78676 +++ b/net/bluetooth/rfcomm/tty.c
78677 @@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
78678 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
78679
78680 spin_lock_irqsave(&dev->port.lock, flags);
78681 - if (dev->port.count > 0) {
78682 + if (atomic_read(&dev->port.count) > 0) {
78683 spin_unlock_irqrestore(&dev->port.lock, flags);
78684 return;
78685 }
78686 @@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
78687 return -ENODEV;
78688
78689 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
78690 - dev->channel, dev->port.count);
78691 + dev->channel, atomic_read(&dev->port.count));
78692
78693 spin_lock_irqsave(&dev->port.lock, flags);
78694 - if (++dev->port.count > 1) {
78695 + if (atomic_inc_return(&dev->port.count) > 1) {
78696 spin_unlock_irqrestore(&dev->port.lock, flags);
78697 return 0;
78698 }
78699 @@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
78700 return;
78701
78702 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
78703 - dev->port.count);
78704 + atomic_read(&dev->port.count));
78705
78706 spin_lock_irqsave(&dev->port.lock, flags);
78707 - if (!--dev->port.count) {
78708 + if (!atomic_dec_return(&dev->port.count)) {
78709 spin_unlock_irqrestore(&dev->port.lock, flags);
78710 if (dev->tty_dev->parent)
78711 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
78712 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
78713 index 5fe2ff3..121d696 100644
78714 --- a/net/bridge/netfilter/ebtables.c
78715 +++ b/net/bridge/netfilter/ebtables.c
78716 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
78717 tmp.valid_hooks = t->table->valid_hooks;
78718 }
78719 mutex_unlock(&ebt_mutex);
78720 - if (copy_to_user(user, &tmp, *len) != 0){
78721 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
78722 BUGPRINT("c2u Didn't work\n");
78723 ret = -EFAULT;
78724 break;
78725 @@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78726 goto out;
78727 tmp.valid_hooks = t->valid_hooks;
78728
78729 - if (copy_to_user(user, &tmp, *len) != 0) {
78730 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78731 ret = -EFAULT;
78732 break;
78733 }
78734 @@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78735 tmp.entries_size = t->table->entries_size;
78736 tmp.valid_hooks = t->table->valid_hooks;
78737
78738 - if (copy_to_user(user, &tmp, *len) != 0) {
78739 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78740 ret = -EFAULT;
78741 break;
78742 }
78743 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
78744 index 44f270f..1f5602d 100644
78745 --- a/net/caif/cfctrl.c
78746 +++ b/net/caif/cfctrl.c
78747 @@ -10,6 +10,7 @@
78748 #include <linux/spinlock.h>
78749 #include <linux/slab.h>
78750 #include <linux/pkt_sched.h>
78751 +#include <linux/sched.h>
78752 #include <net/caif/caif_layer.h>
78753 #include <net/caif/cfpkt.h>
78754 #include <net/caif/cfctrl.h>
78755 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
78756 memset(&dev_info, 0, sizeof(dev_info));
78757 dev_info.id = 0xff;
78758 cfsrvl_init(&this->serv, 0, &dev_info, false);
78759 - atomic_set(&this->req_seq_no, 1);
78760 - atomic_set(&this->rsp_seq_no, 1);
78761 + atomic_set_unchecked(&this->req_seq_no, 1);
78762 + atomic_set_unchecked(&this->rsp_seq_no, 1);
78763 this->serv.layer.receive = cfctrl_recv;
78764 sprintf(this->serv.layer.name, "ctrl");
78765 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
78766 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
78767 struct cfctrl_request_info *req)
78768 {
78769 spin_lock_bh(&ctrl->info_list_lock);
78770 - atomic_inc(&ctrl->req_seq_no);
78771 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
78772 + atomic_inc_unchecked(&ctrl->req_seq_no);
78773 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
78774 list_add_tail(&req->list, &ctrl->list);
78775 spin_unlock_bh(&ctrl->info_list_lock);
78776 }
78777 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
78778 if (p != first)
78779 pr_warn("Requests are not received in order\n");
78780
78781 - atomic_set(&ctrl->rsp_seq_no,
78782 + atomic_set_unchecked(&ctrl->rsp_seq_no,
78783 p->sequence_no);
78784 list_del(&p->list);
78785 goto out;
78786 diff --git a/net/can/af_can.c b/net/can/af_can.c
78787 index ddac1ee..3ee0a78 100644
78788 --- a/net/can/af_can.c
78789 +++ b/net/can/af_can.c
78790 @@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
78791 };
78792
78793 /* notifier block for netdevice event */
78794 -static struct notifier_block can_netdev_notifier __read_mostly = {
78795 +static struct notifier_block can_netdev_notifier = {
78796 .notifier_call = can_notifier,
78797 };
78798
78799 diff --git a/net/can/bcm.c b/net/can/bcm.c
78800 index 969b7cd..f69fccb 100644
78801 --- a/net/can/bcm.c
78802 +++ b/net/can/bcm.c
78803 @@ -119,7 +119,7 @@ struct bcm_sock {
78804 struct sock sk;
78805 int bound;
78806 int ifindex;
78807 - struct notifier_block notifier;
78808 + notifier_block_no_const notifier;
78809 struct list_head rx_ops;
78810 struct list_head tx_ops;
78811 unsigned long dropped_usr_msgs;
78812 diff --git a/net/can/gw.c b/net/can/gw.c
78813 index 1f5c978..30b397f 100644
78814 --- a/net/can/gw.c
78815 +++ b/net/can/gw.c
78816 @@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
78817 MODULE_ALIAS("can-gw");
78818
78819 static HLIST_HEAD(cgw_list);
78820 -static struct notifier_block notifier;
78821
78822 static struct kmem_cache *cgw_cache __read_mostly;
78823
78824 @@ -96,7 +95,7 @@ struct cf_mod {
78825 struct {
78826 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
78827 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
78828 - } csumfunc;
78829 + } __no_const csumfunc;
78830 };
78831
78832
78833 @@ -887,6 +886,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
78834 return err;
78835 }
78836
78837 +static struct notifier_block notifier = {
78838 + .notifier_call = cgw_notifier
78839 +};
78840 +
78841 static __init int cgw_module_init(void)
78842 {
78843 printk(banner);
78844 @@ -898,7 +901,6 @@ static __init int cgw_module_init(void)
78845 return -ENOMEM;
78846
78847 /* set notifier */
78848 - notifier.notifier_call = cgw_notifier;
78849 register_netdevice_notifier(&notifier);
78850
78851 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
78852 diff --git a/net/can/raw.c b/net/can/raw.c
78853 index 5b0e3e3..615c72b 100644
78854 --- a/net/can/raw.c
78855 +++ b/net/can/raw.c
78856 @@ -79,7 +79,7 @@ struct raw_sock {
78857 struct sock sk;
78858 int bound;
78859 int ifindex;
78860 - struct notifier_block notifier;
78861 + notifier_block_no_const notifier;
78862 int loopback;
78863 int recv_own_msgs;
78864 int fd_frames;
78865 diff --git a/net/compat.c b/net/compat.c
78866 index 79ae884..17c5c09 100644
78867 --- a/net/compat.c
78868 +++ b/net/compat.c
78869 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
78870 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
78871 __get_user(kmsg->msg_flags, &umsg->msg_flags))
78872 return -EFAULT;
78873 - kmsg->msg_name = compat_ptr(tmp1);
78874 - kmsg->msg_iov = compat_ptr(tmp2);
78875 - kmsg->msg_control = compat_ptr(tmp3);
78876 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
78877 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
78878 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
78879 return 0;
78880 }
78881
78882 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78883
78884 if (kern_msg->msg_namelen) {
78885 if (mode == VERIFY_READ) {
78886 - int err = move_addr_to_kernel(kern_msg->msg_name,
78887 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
78888 kern_msg->msg_namelen,
78889 kern_address);
78890 if (err < 0)
78891 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78892 kern_msg->msg_name = NULL;
78893
78894 tot_len = iov_from_user_compat_to_kern(kern_iov,
78895 - (struct compat_iovec __user *)kern_msg->msg_iov,
78896 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
78897 kern_msg->msg_iovlen);
78898 if (tot_len >= 0)
78899 kern_msg->msg_iov = kern_iov;
78900 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78901
78902 #define CMSG_COMPAT_FIRSTHDR(msg) \
78903 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
78904 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
78905 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
78906 (struct compat_cmsghdr __user *)NULL)
78907
78908 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
78909 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
78910 (ucmlen) <= (unsigned long) \
78911 ((mhdr)->msg_controllen - \
78912 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
78913 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
78914
78915 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
78916 struct compat_cmsghdr __user *cmsg, int cmsg_len)
78917 {
78918 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
78919 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
78920 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
78921 msg->msg_controllen)
78922 return NULL;
78923 return (struct compat_cmsghdr __user *)ptr;
78924 @@ -219,7 +219,7 @@ Efault:
78925
78926 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
78927 {
78928 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78929 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78930 struct compat_cmsghdr cmhdr;
78931 struct compat_timeval ctv;
78932 struct compat_timespec cts[3];
78933 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
78934
78935 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
78936 {
78937 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78938 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78939 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
78940 int fdnum = scm->fp->count;
78941 struct file **fp = scm->fp->fp;
78942 @@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
78943 return -EFAULT;
78944 old_fs = get_fs();
78945 set_fs(KERNEL_DS);
78946 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
78947 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
78948 set_fs(old_fs);
78949
78950 return err;
78951 @@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
78952 len = sizeof(ktime);
78953 old_fs = get_fs();
78954 set_fs(KERNEL_DS);
78955 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
78956 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
78957 set_fs(old_fs);
78958
78959 if (!err) {
78960 @@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78961 case MCAST_JOIN_GROUP:
78962 case MCAST_LEAVE_GROUP:
78963 {
78964 - struct compat_group_req __user *gr32 = (void *)optval;
78965 + struct compat_group_req __user *gr32 = (void __user *)optval;
78966 struct group_req __user *kgr =
78967 compat_alloc_user_space(sizeof(struct group_req));
78968 u32 interface;
78969 @@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78970 case MCAST_BLOCK_SOURCE:
78971 case MCAST_UNBLOCK_SOURCE:
78972 {
78973 - struct compat_group_source_req __user *gsr32 = (void *)optval;
78974 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
78975 struct group_source_req __user *kgsr = compat_alloc_user_space(
78976 sizeof(struct group_source_req));
78977 u32 interface;
78978 @@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78979 }
78980 case MCAST_MSFILTER:
78981 {
78982 - struct compat_group_filter __user *gf32 = (void *)optval;
78983 + struct compat_group_filter __user *gf32 = (void __user *)optval;
78984 struct group_filter __user *kgf;
78985 u32 interface, fmode, numsrc;
78986
78987 @@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
78988 char __user *optval, int __user *optlen,
78989 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
78990 {
78991 - struct compat_group_filter __user *gf32 = (void *)optval;
78992 + struct compat_group_filter __user *gf32 = (void __user *)optval;
78993 struct group_filter __user *kgf;
78994 int __user *koptlen;
78995 u32 interface, fmode, numsrc;
78996 @@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
78997
78998 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
78999 return -EINVAL;
79000 - if (copy_from_user(a, args, nas[call]))
79001 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
79002 return -EFAULT;
79003 a0 = a[0];
79004 a1 = a[1];
79005 diff --git a/net/core/datagram.c b/net/core/datagram.c
79006 index 0337e2b..47914a0 100644
79007 --- a/net/core/datagram.c
79008 +++ b/net/core/datagram.c
79009 @@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
79010 }
79011
79012 kfree_skb(skb);
79013 - atomic_inc(&sk->sk_drops);
79014 + atomic_inc_unchecked(&sk->sk_drops);
79015 sk_mem_reclaim_partial(sk);
79016
79017 return err;
79018 diff --git a/net/core/dev.c b/net/core/dev.c
79019 index e5942bf..25998c3 100644
79020 --- a/net/core/dev.c
79021 +++ b/net/core/dev.c
79022 @@ -1162,9 +1162,13 @@ void dev_load(struct net *net, const char *name)
79023 if (no_module && capable(CAP_NET_ADMIN))
79024 no_module = request_module("netdev-%s", name);
79025 if (no_module && capable(CAP_SYS_MODULE)) {
79026 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
79027 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
79028 +#else
79029 if (!request_module("%s", name))
79030 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
79031 name);
79032 +#endif
79033 }
79034 }
79035 EXPORT_SYMBOL(dev_load);
79036 @@ -1627,7 +1631,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
79037 {
79038 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
79039 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
79040 - atomic_long_inc(&dev->rx_dropped);
79041 + atomic_long_inc_unchecked(&dev->rx_dropped);
79042 kfree_skb(skb);
79043 return NET_RX_DROP;
79044 }
79045 @@ -1637,7 +1641,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
79046 nf_reset(skb);
79047
79048 if (unlikely(!is_skb_forwardable(dev, skb))) {
79049 - atomic_long_inc(&dev->rx_dropped);
79050 + atomic_long_inc_unchecked(&dev->rx_dropped);
79051 kfree_skb(skb);
79052 return NET_RX_DROP;
79053 }
79054 @@ -2093,7 +2097,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
79055
79056 struct dev_gso_cb {
79057 void (*destructor)(struct sk_buff *skb);
79058 -};
79059 +} __no_const;
79060
79061 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
79062
79063 @@ -2955,7 +2959,7 @@ enqueue:
79064
79065 local_irq_restore(flags);
79066
79067 - atomic_long_inc(&skb->dev->rx_dropped);
79068 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
79069 kfree_skb(skb);
79070 return NET_RX_DROP;
79071 }
79072 @@ -3027,7 +3031,7 @@ int netif_rx_ni(struct sk_buff *skb)
79073 }
79074 EXPORT_SYMBOL(netif_rx_ni);
79075
79076 -static void net_tx_action(struct softirq_action *h)
79077 +static void net_tx_action(void)
79078 {
79079 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79080
79081 @@ -3358,7 +3362,7 @@ ncls:
79082 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
79083 } else {
79084 drop:
79085 - atomic_long_inc(&skb->dev->rx_dropped);
79086 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
79087 kfree_skb(skb);
79088 /* Jamal, now you will not able to escape explaining
79089 * me how you were going to use this. :-)
79090 @@ -3944,7 +3948,7 @@ void netif_napi_del(struct napi_struct *napi)
79091 }
79092 EXPORT_SYMBOL(netif_napi_del);
79093
79094 -static void net_rx_action(struct softirq_action *h)
79095 +static void net_rx_action(void)
79096 {
79097 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79098 unsigned long time_limit = jiffies + 2;
79099 @@ -4423,8 +4427,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
79100 else
79101 seq_printf(seq, "%04x", ntohs(pt->type));
79102
79103 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79104 + seq_printf(seq, " %-8s %p\n",
79105 + pt->dev ? pt->dev->name : "", NULL);
79106 +#else
79107 seq_printf(seq, " %-8s %pF\n",
79108 pt->dev ? pt->dev->name : "", pt->func);
79109 +#endif
79110 }
79111
79112 return 0;
79113 @@ -5987,7 +5996,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
79114 } else {
79115 netdev_stats_to_stats64(storage, &dev->stats);
79116 }
79117 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
79118 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
79119 return storage;
79120 }
79121 EXPORT_SYMBOL(dev_get_stats);
79122 diff --git a/net/core/flow.c b/net/core/flow.c
79123 index e318c7e..98aee7d 100644
79124 --- a/net/core/flow.c
79125 +++ b/net/core/flow.c
79126 @@ -55,13 +55,13 @@ struct flow_flush_info {
79127 struct flow_cache {
79128 u32 hash_shift;
79129 struct flow_cache_percpu __percpu *percpu;
79130 - struct notifier_block hotcpu_notifier;
79131 + notifier_block_no_const hotcpu_notifier;
79132 int low_watermark;
79133 int high_watermark;
79134 struct timer_list rnd_timer;
79135 };
79136
79137 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
79138 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
79139 EXPORT_SYMBOL(flow_cache_genid);
79140 static struct flow_cache flow_cache_global;
79141 static struct kmem_cache *flow_cachep __read_mostly;
79142 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
79143
79144 static int flow_entry_valid(struct flow_cache_entry *fle)
79145 {
79146 - if (atomic_read(&flow_cache_genid) != fle->genid)
79147 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
79148 return 0;
79149 if (fle->object && !fle->object->ops->check(fle->object))
79150 return 0;
79151 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
79152 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
79153 fcp->hash_count++;
79154 }
79155 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
79156 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
79157 flo = fle->object;
79158 if (!flo)
79159 goto ret_object;
79160 @@ -280,7 +280,7 @@ nocache:
79161 }
79162 flo = resolver(net, key, family, dir, flo, ctx);
79163 if (fle) {
79164 - fle->genid = atomic_read(&flow_cache_genid);
79165 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
79166 if (!IS_ERR(flo))
79167 fle->object = flo;
79168 else
79169 diff --git a/net/core/iovec.c b/net/core/iovec.c
79170 index 7e7aeb0..2a998cb 100644
79171 --- a/net/core/iovec.c
79172 +++ b/net/core/iovec.c
79173 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
79174 if (m->msg_namelen) {
79175 if (mode == VERIFY_READ) {
79176 void __user *namep;
79177 - namep = (void __user __force *) m->msg_name;
79178 + namep = (void __force_user *) m->msg_name;
79179 err = move_addr_to_kernel(namep, m->msg_namelen,
79180 address);
79181 if (err < 0)
79182 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
79183 }
79184
79185 size = m->msg_iovlen * sizeof(struct iovec);
79186 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
79187 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
79188 return -EFAULT;
79189
79190 m->msg_iov = iov;
79191 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
79192 index fad649a..df5891e 100644
79193 --- a/net/core/rtnetlink.c
79194 +++ b/net/core/rtnetlink.c
79195 @@ -58,7 +58,7 @@ struct rtnl_link {
79196 rtnl_doit_func doit;
79197 rtnl_dumpit_func dumpit;
79198 rtnl_calcit_func calcit;
79199 -};
79200 +} __no_const;
79201
79202 static DEFINE_MUTEX(rtnl_mutex);
79203
79204 diff --git a/net/core/scm.c b/net/core/scm.c
79205 index ab57084..0190c8f 100644
79206 --- a/net/core/scm.c
79207 +++ b/net/core/scm.c
79208 @@ -223,7 +223,7 @@ EXPORT_SYMBOL(__scm_send);
79209 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79210 {
79211 struct cmsghdr __user *cm
79212 - = (__force struct cmsghdr __user *)msg->msg_control;
79213 + = (struct cmsghdr __force_user *)msg->msg_control;
79214 struct cmsghdr cmhdr;
79215 int cmlen = CMSG_LEN(len);
79216 int err;
79217 @@ -246,7 +246,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79218 err = -EFAULT;
79219 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
79220 goto out;
79221 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
79222 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
79223 goto out;
79224 cmlen = CMSG_SPACE(len);
79225 if (msg->msg_controllen < cmlen)
79226 @@ -262,7 +262,7 @@ EXPORT_SYMBOL(put_cmsg);
79227 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79228 {
79229 struct cmsghdr __user *cm
79230 - = (__force struct cmsghdr __user*)msg->msg_control;
79231 + = (struct cmsghdr __force_user *)msg->msg_control;
79232
79233 int fdmax = 0;
79234 int fdnum = scm->fp->count;
79235 @@ -282,7 +282,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79236 if (fdnum < fdmax)
79237 fdmax = fdnum;
79238
79239 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
79240 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
79241 i++, cmfptr++)
79242 {
79243 struct socket *sock;
79244 diff --git a/net/core/sock.c b/net/core/sock.c
79245 index 8a146cf..ee08914d 100644
79246 --- a/net/core/sock.c
79247 +++ b/net/core/sock.c
79248 @@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79249 struct sk_buff_head *list = &sk->sk_receive_queue;
79250
79251 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
79252 - atomic_inc(&sk->sk_drops);
79253 + atomic_inc_unchecked(&sk->sk_drops);
79254 trace_sock_rcvqueue_full(sk, skb);
79255 return -ENOMEM;
79256 }
79257 @@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79258 return err;
79259
79260 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
79261 - atomic_inc(&sk->sk_drops);
79262 + atomic_inc_unchecked(&sk->sk_drops);
79263 return -ENOBUFS;
79264 }
79265
79266 @@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79267 skb_dst_force(skb);
79268
79269 spin_lock_irqsave(&list->lock, flags);
79270 - skb->dropcount = atomic_read(&sk->sk_drops);
79271 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
79272 __skb_queue_tail(list, skb);
79273 spin_unlock_irqrestore(&list->lock, flags);
79274
79275 @@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
79276 skb->dev = NULL;
79277
79278 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
79279 - atomic_inc(&sk->sk_drops);
79280 + atomic_inc_unchecked(&sk->sk_drops);
79281 goto discard_and_relse;
79282 }
79283 if (nested)
79284 @@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
79285 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
79286 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
79287 bh_unlock_sock(sk);
79288 - atomic_inc(&sk->sk_drops);
79289 + atomic_inc_unchecked(&sk->sk_drops);
79290 goto discard_and_relse;
79291 }
79292
79293 @@ -875,12 +875,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79294 struct timeval tm;
79295 } v;
79296
79297 - int lv = sizeof(int);
79298 - int len;
79299 + unsigned int lv = sizeof(int);
79300 + unsigned int len;
79301
79302 if (get_user(len, optlen))
79303 return -EFAULT;
79304 - if (len < 0)
79305 + if (len > INT_MAX)
79306 return -EINVAL;
79307
79308 memset(&v, 0, sizeof(v));
79309 @@ -1028,11 +1028,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79310
79311 case SO_PEERNAME:
79312 {
79313 - char address[128];
79314 + char address[_K_SS_MAXSIZE];
79315
79316 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
79317 return -ENOTCONN;
79318 - if (lv < len)
79319 + if (lv < len || sizeof address < len)
79320 return -EINVAL;
79321 if (copy_to_user(optval, address, len))
79322 return -EFAULT;
79323 @@ -1080,7 +1080,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79324
79325 if (len > lv)
79326 len = lv;
79327 - if (copy_to_user(optval, &v, len))
79328 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
79329 return -EFAULT;
79330 lenout:
79331 if (put_user(len, optlen))
79332 @@ -2212,7 +2212,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79333 */
79334 smp_wmb();
79335 atomic_set(&sk->sk_refcnt, 1);
79336 - atomic_set(&sk->sk_drops, 0);
79337 + atomic_set_unchecked(&sk->sk_drops, 0);
79338 }
79339 EXPORT_SYMBOL(sock_init_data);
79340
79341 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
79342 index 602cd63..05c6c60 100644
79343 --- a/net/core/sock_diag.c
79344 +++ b/net/core/sock_diag.c
79345 @@ -15,20 +15,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
79346
79347 int sock_diag_check_cookie(void *sk, __u32 *cookie)
79348 {
79349 +#ifndef CONFIG_GRKERNSEC_HIDESYM
79350 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
79351 cookie[1] != INET_DIAG_NOCOOKIE) &&
79352 ((u32)(unsigned long)sk != cookie[0] ||
79353 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
79354 return -ESTALE;
79355 else
79356 +#endif
79357 return 0;
79358 }
79359 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
79360
79361 void sock_diag_save_cookie(void *sk, __u32 *cookie)
79362 {
79363 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79364 + cookie[0] = 0;
79365 + cookie[1] = 0;
79366 +#else
79367 cookie[0] = (u32)(unsigned long)sk;
79368 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79369 +#endif
79370 }
79371 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
79372
79373 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79374 index a55eecc..dd8428c 100644
79375 --- a/net/decnet/sysctl_net_decnet.c
79376 +++ b/net/decnet/sysctl_net_decnet.c
79377 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79378
79379 if (len > *lenp) len = *lenp;
79380
79381 - if (copy_to_user(buffer, addr, len))
79382 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
79383 return -EFAULT;
79384
79385 *lenp = len;
79386 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79387
79388 if (len > *lenp) len = *lenp;
79389
79390 - if (copy_to_user(buffer, devname, len))
79391 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
79392 return -EFAULT;
79393
79394 *lenp = len;
79395 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
79396 index 825c608..750ff29 100644
79397 --- a/net/ipv4/fib_frontend.c
79398 +++ b/net/ipv4/fib_frontend.c
79399 @@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
79400 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79401 fib_sync_up(dev);
79402 #endif
79403 - atomic_inc(&net->ipv4.dev_addr_genid);
79404 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79405 rt_cache_flush(dev_net(dev));
79406 break;
79407 case NETDEV_DOWN:
79408 fib_del_ifaddr(ifa, NULL);
79409 - atomic_inc(&net->ipv4.dev_addr_genid);
79410 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79411 if (ifa->ifa_dev->ifa_list == NULL) {
79412 /* Last address was deleted from this interface.
79413 * Disable IP.
79414 @@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
79415 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79416 fib_sync_up(dev);
79417 #endif
79418 - atomic_inc(&net->ipv4.dev_addr_genid);
79419 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79420 rt_cache_flush(net);
79421 break;
79422 case NETDEV_DOWN:
79423 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
79424 index 71b125c..f4c70b0 100644
79425 --- a/net/ipv4/fib_semantics.c
79426 +++ b/net/ipv4/fib_semantics.c
79427 @@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
79428 nh->nh_saddr = inet_select_addr(nh->nh_dev,
79429 nh->nh_gw,
79430 nh->nh_parent->fib_scope);
79431 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
79432 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
79433
79434 return nh->nh_saddr;
79435 }
79436 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79437 index 7880af9..70f92a3 100644
79438 --- a/net/ipv4/inet_hashtables.c
79439 +++ b/net/ipv4/inet_hashtables.c
79440 @@ -18,12 +18,15 @@
79441 #include <linux/sched.h>
79442 #include <linux/slab.h>
79443 #include <linux/wait.h>
79444 +#include <linux/security.h>
79445
79446 #include <net/inet_connection_sock.h>
79447 #include <net/inet_hashtables.h>
79448 #include <net/secure_seq.h>
79449 #include <net/ip.h>
79450
79451 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79452 +
79453 /*
79454 * Allocate and initialize a new local port bind bucket.
79455 * The bindhash mutex for snum's hash chain must be held here.
79456 @@ -530,6 +533,8 @@ ok:
79457 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
79458 spin_unlock(&head->lock);
79459
79460 + gr_update_task_in_ip_table(current, inet_sk(sk));
79461 +
79462 if (tw) {
79463 inet_twsk_deschedule(tw, death_row);
79464 while (twrefcnt) {
79465 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79466 index 000e3d2..5472da3 100644
79467 --- a/net/ipv4/inetpeer.c
79468 +++ b/net/ipv4/inetpeer.c
79469 @@ -503,8 +503,8 @@ relookup:
79470 if (p) {
79471 p->daddr = *daddr;
79472 atomic_set(&p->refcnt, 1);
79473 - atomic_set(&p->rid, 0);
79474 - atomic_set(&p->ip_id_count,
79475 + atomic_set_unchecked(&p->rid, 0);
79476 + atomic_set_unchecked(&p->ip_id_count,
79477 (daddr->family == AF_INET) ?
79478 secure_ip_id(daddr->addr.a4) :
79479 secure_ipv6_id(daddr->addr.a6));
79480 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
79481 index 8d5cc75..821fd11 100644
79482 --- a/net/ipv4/ip_fragment.c
79483 +++ b/net/ipv4/ip_fragment.c
79484 @@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
79485 return 0;
79486
79487 start = qp->rid;
79488 - end = atomic_inc_return(&peer->rid);
79489 + end = atomic_inc_return_unchecked(&peer->rid);
79490 qp->rid = end;
79491
79492 rc = qp->q.fragments && (end - start) > max;
79493 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
79494 index 14bbfcf..644f472 100644
79495 --- a/net/ipv4/ip_sockglue.c
79496 +++ b/net/ipv4/ip_sockglue.c
79497 @@ -1151,7 +1151,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79498 len = min_t(unsigned int, len, opt->optlen);
79499 if (put_user(len, optlen))
79500 return -EFAULT;
79501 - if (copy_to_user(optval, opt->__data, len))
79502 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
79503 + copy_to_user(optval, opt->__data, len))
79504 return -EFAULT;
79505 return 0;
79506 }
79507 @@ -1282,7 +1283,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79508 if (sk->sk_type != SOCK_STREAM)
79509 return -ENOPROTOOPT;
79510
79511 - msg.msg_control = optval;
79512 + msg.msg_control = (void __force_kernel *)optval;
79513 msg.msg_controllen = len;
79514 msg.msg_flags = flags;
79515
79516 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
79517 index 798358b..73570b7 100644
79518 --- a/net/ipv4/ipconfig.c
79519 +++ b/net/ipv4/ipconfig.c
79520 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
79521
79522 mm_segment_t oldfs = get_fs();
79523 set_fs(get_ds());
79524 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79525 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79526 set_fs(oldfs);
79527 return res;
79528 }
79529 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
79530
79531 mm_segment_t oldfs = get_fs();
79532 set_fs(get_ds());
79533 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79534 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79535 set_fs(oldfs);
79536 return res;
79537 }
79538 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
79539
79540 mm_segment_t oldfs = get_fs();
79541 set_fs(get_ds());
79542 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
79543 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
79544 set_fs(oldfs);
79545 return res;
79546 }
79547 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
79548 index 97e61ea..cac1bbb 100644
79549 --- a/net/ipv4/netfilter/arp_tables.c
79550 +++ b/net/ipv4/netfilter/arp_tables.c
79551 @@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
79552 #endif
79553
79554 static int get_info(struct net *net, void __user *user,
79555 - const int *len, int compat)
79556 + int len, int compat)
79557 {
79558 char name[XT_TABLE_MAXNAMELEN];
79559 struct xt_table *t;
79560 int ret;
79561
79562 - if (*len != sizeof(struct arpt_getinfo)) {
79563 - duprintf("length %u != %Zu\n", *len,
79564 + if (len != sizeof(struct arpt_getinfo)) {
79565 + duprintf("length %u != %Zu\n", len,
79566 sizeof(struct arpt_getinfo));
79567 return -EINVAL;
79568 }
79569 @@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
79570 info.size = private->size;
79571 strcpy(info.name, name);
79572
79573 - if (copy_to_user(user, &info, *len) != 0)
79574 + if (copy_to_user(user, &info, len) != 0)
79575 ret = -EFAULT;
79576 else
79577 ret = 0;
79578 @@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
79579
79580 switch (cmd) {
79581 case ARPT_SO_GET_INFO:
79582 - ret = get_info(sock_net(sk), user, len, 1);
79583 + ret = get_info(sock_net(sk), user, *len, 1);
79584 break;
79585 case ARPT_SO_GET_ENTRIES:
79586 ret = compat_get_entries(sock_net(sk), user, len);
79587 @@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
79588
79589 switch (cmd) {
79590 case ARPT_SO_GET_INFO:
79591 - ret = get_info(sock_net(sk), user, len, 0);
79592 + ret = get_info(sock_net(sk), user, *len, 0);
79593 break;
79594
79595 case ARPT_SO_GET_ENTRIES:
79596 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
79597 index 170b1fd..6105b91 100644
79598 --- a/net/ipv4/netfilter/ip_tables.c
79599 +++ b/net/ipv4/netfilter/ip_tables.c
79600 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
79601 #endif
79602
79603 static int get_info(struct net *net, void __user *user,
79604 - const int *len, int compat)
79605 + int len, int compat)
79606 {
79607 char name[XT_TABLE_MAXNAMELEN];
79608 struct xt_table *t;
79609 int ret;
79610
79611 - if (*len != sizeof(struct ipt_getinfo)) {
79612 - duprintf("length %u != %zu\n", *len,
79613 + if (len != sizeof(struct ipt_getinfo)) {
79614 + duprintf("length %u != %zu\n", len,
79615 sizeof(struct ipt_getinfo));
79616 return -EINVAL;
79617 }
79618 @@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
79619 info.size = private->size;
79620 strcpy(info.name, name);
79621
79622 - if (copy_to_user(user, &info, *len) != 0)
79623 + if (copy_to_user(user, &info, len) != 0)
79624 ret = -EFAULT;
79625 else
79626 ret = 0;
79627 @@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79628
79629 switch (cmd) {
79630 case IPT_SO_GET_INFO:
79631 - ret = get_info(sock_net(sk), user, len, 1);
79632 + ret = get_info(sock_net(sk), user, *len, 1);
79633 break;
79634 case IPT_SO_GET_ENTRIES:
79635 ret = compat_get_entries(sock_net(sk), user, len);
79636 @@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79637
79638 switch (cmd) {
79639 case IPT_SO_GET_INFO:
79640 - ret = get_info(sock_net(sk), user, len, 0);
79641 + ret = get_info(sock_net(sk), user, *len, 0);
79642 break;
79643
79644 case IPT_SO_GET_ENTRIES:
79645 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
79646 index 8f3d054..c58d05d 100644
79647 --- a/net/ipv4/ping.c
79648 +++ b/net/ipv4/ping.c
79649 @@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
79650 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
79651 0, sock_i_ino(sp),
79652 atomic_read(&sp->sk_refcnt), sp,
79653 - atomic_read(&sp->sk_drops), len);
79654 + atomic_read_unchecked(&sp->sk_drops), len);
79655 }
79656
79657 static int ping_seq_show(struct seq_file *seq, void *v)
79658 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
79659 index 73d1e4d..3af0e8f 100644
79660 --- a/net/ipv4/raw.c
79661 +++ b/net/ipv4/raw.c
79662 @@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79663 int raw_rcv(struct sock *sk, struct sk_buff *skb)
79664 {
79665 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
79666 - atomic_inc(&sk->sk_drops);
79667 + atomic_inc_unchecked(&sk->sk_drops);
79668 kfree_skb(skb);
79669 return NET_RX_DROP;
79670 }
79671 @@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
79672
79673 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
79674 {
79675 + struct icmp_filter filter;
79676 +
79677 if (optlen > sizeof(struct icmp_filter))
79678 optlen = sizeof(struct icmp_filter);
79679 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
79680 + if (copy_from_user(&filter, optval, optlen))
79681 return -EFAULT;
79682 + raw_sk(sk)->filter = filter;
79683 return 0;
79684 }
79685
79686 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
79687 {
79688 int len, ret = -EFAULT;
79689 + struct icmp_filter filter;
79690
79691 if (get_user(len, optlen))
79692 goto out;
79693 @@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
79694 if (len > sizeof(struct icmp_filter))
79695 len = sizeof(struct icmp_filter);
79696 ret = -EFAULT;
79697 - if (put_user(len, optlen) ||
79698 - copy_to_user(optval, &raw_sk(sk)->filter, len))
79699 + filter = raw_sk(sk)->filter;
79700 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
79701 goto out;
79702 ret = 0;
79703 out: return ret;
79704 @@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
79705 0, 0L, 0,
79706 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
79707 0, sock_i_ino(sp),
79708 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
79709 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
79710 }
79711
79712 static int raw_seq_show(struct seq_file *seq, void *v)
79713 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
79714 index df25142..e92a82a 100644
79715 --- a/net/ipv4/route.c
79716 +++ b/net/ipv4/route.c
79717 @@ -2529,7 +2529,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
79718
79719 static __net_init int rt_genid_init(struct net *net)
79720 {
79721 - atomic_set(&net->rt_genid, 0);
79722 + atomic_set_unchecked(&net->rt_genid, 0);
79723 get_random_bytes(&net->ipv4.dev_addr_genid,
79724 sizeof(net->ipv4.dev_addr_genid));
79725 return 0;
79726 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
79727 index 181fc82..f211869 100644
79728 --- a/net/ipv4/tcp_input.c
79729 +++ b/net/ipv4/tcp_input.c
79730 @@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
79731 * simplifies code)
79732 */
79733 static void
79734 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79735 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79736 struct sk_buff *head, struct sk_buff *tail,
79737 u32 start, u32 end)
79738 {
79739 @@ -5536,6 +5536,9 @@ slow_path:
79740 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
79741 goto csum_error;
79742
79743 + if (!th->ack)
79744 + goto discard;
79745 +
79746 /*
79747 * Standard slow path.
79748 */
79749 @@ -5544,7 +5547,7 @@ slow_path:
79750 return 0;
79751
79752 step5:
79753 - if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79754 + if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79755 goto discard;
79756
79757 /* ts_recent update must be made after we are sure that the packet
79758 @@ -5930,7 +5933,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79759 goto discard;
79760
79761 if (th->syn) {
79762 - if (th->fin)
79763 + if (th->fin || th->urg || th->psh)
79764 goto discard;
79765 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
79766 return 1;
79767 @@ -5977,11 +5980,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79768 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
79769 goto discard;
79770 }
79771 +
79772 + if (!th->ack)
79773 + goto discard;
79774 +
79775 if (!tcp_validate_incoming(sk, skb, th, 0))
79776 return 0;
79777
79778 /* step 5: check the ACK field */
79779 - if (th->ack) {
79780 + if (true) {
79781 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
79782
79783 switch (sk->sk_state) {
79784 @@ -6131,8 +6138,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79785 }
79786 break;
79787 }
79788 - } else
79789 - goto discard;
79790 + }
79791
79792 /* ts_recent update must be made after we are sure that the packet
79793 * is in window.
79794 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
79795 index 0c4a643..e584990 100644
79796 --- a/net/ipv4/tcp_ipv4.c
79797 +++ b/net/ipv4/tcp_ipv4.c
79798 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
79799 EXPORT_SYMBOL(sysctl_tcp_low_latency);
79800
79801
79802 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79803 +extern int grsec_enable_blackhole;
79804 +#endif
79805 +
79806 #ifdef CONFIG_TCP_MD5SIG
79807 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
79808 __be32 daddr, __be32 saddr, const struct tcphdr *th);
79809 @@ -1901,6 +1905,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
79810 return 0;
79811
79812 reset:
79813 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79814 + if (!grsec_enable_blackhole)
79815 +#endif
79816 tcp_v4_send_reset(rsk, skb);
79817 discard:
79818 kfree_skb(skb);
79819 @@ -2001,12 +2008,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
79820 TCP_SKB_CB(skb)->sacked = 0;
79821
79822 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
79823 - if (!sk)
79824 + if (!sk) {
79825 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79826 + ret = 1;
79827 +#endif
79828 goto no_tcp_socket;
79829 -
79830 + }
79831 process:
79832 - if (sk->sk_state == TCP_TIME_WAIT)
79833 + if (sk->sk_state == TCP_TIME_WAIT) {
79834 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79835 + ret = 2;
79836 +#endif
79837 goto do_time_wait;
79838 + }
79839
79840 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
79841 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
79842 @@ -2057,6 +2071,10 @@ no_tcp_socket:
79843 bad_packet:
79844 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
79845 } else {
79846 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79847 + if (!grsec_enable_blackhole || (ret == 1 &&
79848 + (skb->dev->flags & IFF_LOOPBACK)))
79849 +#endif
79850 tcp_v4_send_reset(NULL, skb);
79851 }
79852
79853 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
79854 index a7302d9..e3ec754 100644
79855 --- a/net/ipv4/tcp_minisocks.c
79856 +++ b/net/ipv4/tcp_minisocks.c
79857 @@ -27,6 +27,10 @@
79858 #include <net/inet_common.h>
79859 #include <net/xfrm.h>
79860
79861 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79862 +extern int grsec_enable_blackhole;
79863 +#endif
79864 +
79865 int sysctl_tcp_syncookies __read_mostly = 1;
79866 EXPORT_SYMBOL(sysctl_tcp_syncookies);
79867
79868 @@ -742,7 +746,10 @@ embryonic_reset:
79869 * avoid becoming vulnerable to outside attack aiming at
79870 * resetting legit local connections.
79871 */
79872 - req->rsk_ops->send_reset(sk, skb);
79873 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79874 + if (!grsec_enable_blackhole)
79875 +#endif
79876 + req->rsk_ops->send_reset(sk, skb);
79877 } else if (fastopen) { /* received a valid RST pkt */
79878 reqsk_fastopen_remove(sk, req, true);
79879 tcp_reset(sk);
79880 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
79881 index 4526fe6..1a34e43 100644
79882 --- a/net/ipv4/tcp_probe.c
79883 +++ b/net/ipv4/tcp_probe.c
79884 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
79885 if (cnt + width >= len)
79886 break;
79887
79888 - if (copy_to_user(buf + cnt, tbuf, width))
79889 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
79890 return -EFAULT;
79891 cnt += width;
79892 }
79893 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
79894 index d47c1b4..b0584de 100644
79895 --- a/net/ipv4/tcp_timer.c
79896 +++ b/net/ipv4/tcp_timer.c
79897 @@ -22,6 +22,10 @@
79898 #include <linux/gfp.h>
79899 #include <net/tcp.h>
79900
79901 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79902 +extern int grsec_lastack_retries;
79903 +#endif
79904 +
79905 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
79906 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
79907 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
79908 @@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
79909 }
79910 }
79911
79912 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79913 + if ((sk->sk_state == TCP_LAST_ACK) &&
79914 + (grsec_lastack_retries > 0) &&
79915 + (grsec_lastack_retries < retry_until))
79916 + retry_until = grsec_lastack_retries;
79917 +#endif
79918 +
79919 if (retransmits_timed_out(sk, retry_until,
79920 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
79921 /* Has it gone just too far? */
79922 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
79923 index 79c8dbe..aceb1b6 100644
79924 --- a/net/ipv4/udp.c
79925 +++ b/net/ipv4/udp.c
79926 @@ -87,6 +87,7 @@
79927 #include <linux/types.h>
79928 #include <linux/fcntl.h>
79929 #include <linux/module.h>
79930 +#include <linux/security.h>
79931 #include <linux/socket.h>
79932 #include <linux/sockios.h>
79933 #include <linux/igmp.h>
79934 @@ -111,6 +112,10 @@
79935 #include <trace/events/skb.h>
79936 #include "udp_impl.h"
79937
79938 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79939 +extern int grsec_enable_blackhole;
79940 +#endif
79941 +
79942 struct udp_table udp_table __read_mostly;
79943 EXPORT_SYMBOL(udp_table);
79944
79945 @@ -569,6 +574,9 @@ found:
79946 return s;
79947 }
79948
79949 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
79950 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
79951 +
79952 /*
79953 * This routine is called by the ICMP module when it gets some
79954 * sort of error condition. If err < 0 then the socket should
79955 @@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
79956 dport = usin->sin_port;
79957 if (dport == 0)
79958 return -EINVAL;
79959 +
79960 + err = gr_search_udp_sendmsg(sk, usin);
79961 + if (err)
79962 + return err;
79963 } else {
79964 if (sk->sk_state != TCP_ESTABLISHED)
79965 return -EDESTADDRREQ;
79966 +
79967 + err = gr_search_udp_sendmsg(sk, NULL);
79968 + if (err)
79969 + return err;
79970 +
79971 daddr = inet->inet_daddr;
79972 dport = inet->inet_dport;
79973 /* Open fast path for connected socket.
79974 @@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
79975 udp_lib_checksum_complete(skb)) {
79976 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
79977 IS_UDPLITE(sk));
79978 - atomic_inc(&sk->sk_drops);
79979 + atomic_inc_unchecked(&sk->sk_drops);
79980 __skb_unlink(skb, rcvq);
79981 __skb_queue_tail(&list_kill, skb);
79982 }
79983 @@ -1194,6 +1211,10 @@ try_again:
79984 if (!skb)
79985 goto out;
79986
79987 + err = gr_search_udp_recvmsg(sk, skb);
79988 + if (err)
79989 + goto out_free;
79990 +
79991 ulen = skb->len - sizeof(struct udphdr);
79992 copied = len;
79993 if (copied > ulen)
79994 @@ -1227,7 +1248,7 @@ try_again:
79995 if (unlikely(err)) {
79996 trace_kfree_skb(skb, udp_recvmsg);
79997 if (!peeked) {
79998 - atomic_inc(&sk->sk_drops);
79999 + atomic_inc_unchecked(&sk->sk_drops);
80000 UDP_INC_STATS_USER(sock_net(sk),
80001 UDP_MIB_INERRORS, is_udplite);
80002 }
80003 @@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80004
80005 drop:
80006 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
80007 - atomic_inc(&sk->sk_drops);
80008 + atomic_inc_unchecked(&sk->sk_drops);
80009 kfree_skb(skb);
80010 return -1;
80011 }
80012 @@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
80013 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
80014
80015 if (!skb1) {
80016 - atomic_inc(&sk->sk_drops);
80017 + atomic_inc_unchecked(&sk->sk_drops);
80018 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80019 IS_UDPLITE(sk));
80020 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80021 @@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80022 goto csum_error;
80023
80024 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80025 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80026 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80027 +#endif
80028 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
80029
80030 /*
80031 @@ -2119,7 +2143,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
80032 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
80033 0, sock_i_ino(sp),
80034 atomic_read(&sp->sk_refcnt), sp,
80035 - atomic_read(&sp->sk_drops), len);
80036 + atomic_read_unchecked(&sp->sk_drops), len);
80037 }
80038
80039 int udp4_seq_show(struct seq_file *seq, void *v)
80040 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
80041 index 0424e4e..308dd43 100644
80042 --- a/net/ipv6/addrconf.c
80043 +++ b/net/ipv6/addrconf.c
80044 @@ -2121,7 +2121,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
80045 p.iph.ihl = 5;
80046 p.iph.protocol = IPPROTO_IPV6;
80047 p.iph.ttl = 64;
80048 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
80049 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
80050
80051 if (ops->ndo_do_ioctl) {
80052 mm_segment_t oldfs = get_fs();
80053 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
80054 index d5cb3c4..b3e38d0 100644
80055 --- a/net/ipv6/ip6_gre.c
80056 +++ b/net/ipv6/ip6_gre.c
80057 @@ -1353,7 +1353,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
80058 }
80059
80060
80061 -static struct inet6_protocol ip6gre_protocol __read_mostly = {
80062 +static struct inet6_protocol ip6gre_protocol = {
80063 .handler = ip6gre_rcv,
80064 .err_handler = ip6gre_err,
80065 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
80066 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
80067 index e02faed..9780f28 100644
80068 --- a/net/ipv6/ipv6_sockglue.c
80069 +++ b/net/ipv6/ipv6_sockglue.c
80070 @@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80071 if (sk->sk_type != SOCK_STREAM)
80072 return -ENOPROTOOPT;
80073
80074 - msg.msg_control = optval;
80075 + msg.msg_control = (void __force_kernel *)optval;
80076 msg.msg_controllen = len;
80077 msg.msg_flags = flags;
80078
80079 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
80080 index d7cb045..8c0ded6 100644
80081 --- a/net/ipv6/netfilter/ip6_tables.c
80082 +++ b/net/ipv6/netfilter/ip6_tables.c
80083 @@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
80084 #endif
80085
80086 static int get_info(struct net *net, void __user *user,
80087 - const int *len, int compat)
80088 + int len, int compat)
80089 {
80090 char name[XT_TABLE_MAXNAMELEN];
80091 struct xt_table *t;
80092 int ret;
80093
80094 - if (*len != sizeof(struct ip6t_getinfo)) {
80095 - duprintf("length %u != %zu\n", *len,
80096 + if (len != sizeof(struct ip6t_getinfo)) {
80097 + duprintf("length %u != %zu\n", len,
80098 sizeof(struct ip6t_getinfo));
80099 return -EINVAL;
80100 }
80101 @@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
80102 info.size = private->size;
80103 strcpy(info.name, name);
80104
80105 - if (copy_to_user(user, &info, *len) != 0)
80106 + if (copy_to_user(user, &info, len) != 0)
80107 ret = -EFAULT;
80108 else
80109 ret = 0;
80110 @@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
80111
80112 switch (cmd) {
80113 case IP6T_SO_GET_INFO:
80114 - ret = get_info(sock_net(sk), user, len, 1);
80115 + ret = get_info(sock_net(sk), user, *len, 1);
80116 break;
80117 case IP6T_SO_GET_ENTRIES:
80118 ret = compat_get_entries(sock_net(sk), user, len);
80119 @@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
80120
80121 switch (cmd) {
80122 case IP6T_SO_GET_INFO:
80123 - ret = get_info(sock_net(sk), user, len, 0);
80124 + ret = get_info(sock_net(sk), user, *len, 0);
80125 break;
80126
80127 case IP6T_SO_GET_ENTRIES:
80128 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
80129 index d8e95c7..81422bc 100644
80130 --- a/net/ipv6/raw.c
80131 +++ b/net/ipv6/raw.c
80132 @@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
80133 {
80134 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
80135 skb_checksum_complete(skb)) {
80136 - atomic_inc(&sk->sk_drops);
80137 + atomic_inc_unchecked(&sk->sk_drops);
80138 kfree_skb(skb);
80139 return NET_RX_DROP;
80140 }
80141 @@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80142 struct raw6_sock *rp = raw6_sk(sk);
80143
80144 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
80145 - atomic_inc(&sk->sk_drops);
80146 + atomic_inc_unchecked(&sk->sk_drops);
80147 kfree_skb(skb);
80148 return NET_RX_DROP;
80149 }
80150 @@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80151
80152 if (inet->hdrincl) {
80153 if (skb_checksum_complete(skb)) {
80154 - atomic_inc(&sk->sk_drops);
80155 + atomic_inc_unchecked(&sk->sk_drops);
80156 kfree_skb(skb);
80157 return NET_RX_DROP;
80158 }
80159 @@ -604,7 +604,7 @@ out:
80160 return err;
80161 }
80162
80163 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
80164 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
80165 struct flowi6 *fl6, struct dst_entry **dstp,
80166 unsigned int flags)
80167 {
80168 @@ -916,12 +916,15 @@ do_confirm:
80169 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
80170 char __user *optval, int optlen)
80171 {
80172 + struct icmp6_filter filter;
80173 +
80174 switch (optname) {
80175 case ICMPV6_FILTER:
80176 if (optlen > sizeof(struct icmp6_filter))
80177 optlen = sizeof(struct icmp6_filter);
80178 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
80179 + if (copy_from_user(&filter, optval, optlen))
80180 return -EFAULT;
80181 + raw6_sk(sk)->filter = filter;
80182 return 0;
80183 default:
80184 return -ENOPROTOOPT;
80185 @@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80186 char __user *optval, int __user *optlen)
80187 {
80188 int len;
80189 + struct icmp6_filter filter;
80190
80191 switch (optname) {
80192 case ICMPV6_FILTER:
80193 @@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80194 len = sizeof(struct icmp6_filter);
80195 if (put_user(len, optlen))
80196 return -EFAULT;
80197 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
80198 + filter = raw6_sk(sk)->filter;
80199 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
80200 return -EFAULT;
80201 return 0;
80202 default:
80203 @@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80204 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
80205 0,
80206 sock_i_ino(sp),
80207 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80208 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
80209 }
80210
80211 static int raw6_seq_show(struct seq_file *seq, void *v)
80212 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
80213 index 26175bf..fc3e4fb 100644
80214 --- a/net/ipv6/tcp_ipv6.c
80215 +++ b/net/ipv6/tcp_ipv6.c
80216 @@ -106,6 +106,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
80217 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
80218 }
80219
80220 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80221 +extern int grsec_enable_blackhole;
80222 +#endif
80223 +
80224 static void tcp_v6_hash(struct sock *sk)
80225 {
80226 if (sk->sk_state != TCP_CLOSE) {
80227 @@ -1524,6 +1528,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
80228 return 0;
80229
80230 reset:
80231 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80232 + if (!grsec_enable_blackhole)
80233 +#endif
80234 tcp_v6_send_reset(sk, skb);
80235 discard:
80236 if (opt_skb)
80237 @@ -1605,12 +1612,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
80238 TCP_SKB_CB(skb)->sacked = 0;
80239
80240 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80241 - if (!sk)
80242 + if (!sk) {
80243 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80244 + ret = 1;
80245 +#endif
80246 goto no_tcp_socket;
80247 + }
80248
80249 process:
80250 - if (sk->sk_state == TCP_TIME_WAIT)
80251 + if (sk->sk_state == TCP_TIME_WAIT) {
80252 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80253 + ret = 2;
80254 +#endif
80255 goto do_time_wait;
80256 + }
80257
80258 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
80259 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
80260 @@ -1659,6 +1674,10 @@ no_tcp_socket:
80261 bad_packet:
80262 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80263 } else {
80264 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80265 + if (!grsec_enable_blackhole || (ret == 1 &&
80266 + (skb->dev->flags & IFF_LOOPBACK)))
80267 +#endif
80268 tcp_v6_send_reset(NULL, skb);
80269 }
80270
80271 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
80272 index fc99972..69397e8 100644
80273 --- a/net/ipv6/udp.c
80274 +++ b/net/ipv6/udp.c
80275 @@ -51,6 +51,10 @@
80276 #include <trace/events/skb.h>
80277 #include "udp_impl.h"
80278
80279 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80280 +extern int grsec_enable_blackhole;
80281 +#endif
80282 +
80283 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
80284 {
80285 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80286 @@ -395,7 +399,7 @@ try_again:
80287 if (unlikely(err)) {
80288 trace_kfree_skb(skb, udpv6_recvmsg);
80289 if (!peeked) {
80290 - atomic_inc(&sk->sk_drops);
80291 + atomic_inc_unchecked(&sk->sk_drops);
80292 if (is_udp4)
80293 UDP_INC_STATS_USER(sock_net(sk),
80294 UDP_MIB_INERRORS,
80295 @@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80296 return rc;
80297 drop:
80298 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
80299 - atomic_inc(&sk->sk_drops);
80300 + atomic_inc_unchecked(&sk->sk_drops);
80301 kfree_skb(skb);
80302 return -1;
80303 }
80304 @@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
80305 if (likely(skb1 == NULL))
80306 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
80307 if (!skb1) {
80308 - atomic_inc(&sk->sk_drops);
80309 + atomic_inc_unchecked(&sk->sk_drops);
80310 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80311 IS_UDPLITE(sk));
80312 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80313 @@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80314 goto discard;
80315
80316 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80317 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80318 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80319 +#endif
80320 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
80321
80322 kfree_skb(skb);
80323 @@ -1473,7 +1480,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80324 0,
80325 sock_i_ino(sp),
80326 atomic_read(&sp->sk_refcnt), sp,
80327 - atomic_read(&sp->sk_drops));
80328 + atomic_read_unchecked(&sp->sk_drops));
80329 }
80330
80331 int udp6_seq_show(struct seq_file *seq, void *v)
80332 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80333 index 496ce2c..f79fac8 100644
80334 --- a/net/irda/ircomm/ircomm_tty.c
80335 +++ b/net/irda/ircomm/ircomm_tty.c
80336 @@ -311,12 +311,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80337 add_wait_queue(&port->open_wait, &wait);
80338
80339 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80340 - __FILE__, __LINE__, tty->driver->name, port->count);
80341 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80342
80343 spin_lock_irqsave(&port->lock, flags);
80344 if (!tty_hung_up_p(filp)) {
80345 extra_count = 1;
80346 - port->count--;
80347 + atomic_dec(&port->count);
80348 }
80349 spin_unlock_irqrestore(&port->lock, flags);
80350 port->blocked_open++;
80351 @@ -352,7 +352,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80352 }
80353
80354 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80355 - __FILE__, __LINE__, tty->driver->name, port->count);
80356 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80357
80358 schedule();
80359 }
80360 @@ -363,13 +363,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80361 if (extra_count) {
80362 /* ++ is not atomic, so this should be protected - Jean II */
80363 spin_lock_irqsave(&port->lock, flags);
80364 - port->count++;
80365 + atomic_inc(&port->count);
80366 spin_unlock_irqrestore(&port->lock, flags);
80367 }
80368 port->blocked_open--;
80369
80370 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80371 - __FILE__, __LINE__, tty->driver->name, port->count);
80372 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80373
80374 if (!retval)
80375 port->flags |= ASYNC_NORMAL_ACTIVE;
80376 @@ -443,12 +443,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80377
80378 /* ++ is not atomic, so this should be protected - Jean II */
80379 spin_lock_irqsave(&self->port.lock, flags);
80380 - self->port.count++;
80381 + atomic_inc(&self->port.count);
80382 spin_unlock_irqrestore(&self->port.lock, flags);
80383 tty_port_tty_set(&self->port, tty);
80384
80385 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80386 - self->line, self->port.count);
80387 + self->line, atomic_read(&self->port.count));
80388
80389 /* Not really used by us, but lets do it anyway */
80390 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80391 @@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
80392 tty_kref_put(port->tty);
80393 }
80394 port->tty = NULL;
80395 - port->count = 0;
80396 + atomic_set(&port->count, 0);
80397 spin_unlock_irqrestore(&port->lock, flags);
80398
80399 wake_up_interruptible(&port->open_wait);
80400 @@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
80401 seq_putc(m, '\n');
80402
80403 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
80404 - seq_printf(m, "Open count: %d\n", self->port.count);
80405 + seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
80406 seq_printf(m, "Max data size: %d\n", self->max_data_size);
80407 seq_printf(m, "Max header size: %d\n", self->max_header_size);
80408
80409 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
80410 index cd6f7a9..e63fe89 100644
80411 --- a/net/iucv/af_iucv.c
80412 +++ b/net/iucv/af_iucv.c
80413 @@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
80414
80415 write_lock_bh(&iucv_sk_list.lock);
80416
80417 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
80418 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80419 while (__iucv_get_sock_by_name(name)) {
80420 sprintf(name, "%08x",
80421 - atomic_inc_return(&iucv_sk_list.autobind_name));
80422 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80423 }
80424
80425 write_unlock_bh(&iucv_sk_list.lock);
80426 diff --git a/net/key/af_key.c b/net/key/af_key.c
80427 index 08897a3..0b812ab 100644
80428 --- a/net/key/af_key.c
80429 +++ b/net/key/af_key.c
80430 @@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
80431 static u32 get_acqseq(void)
80432 {
80433 u32 res;
80434 - static atomic_t acqseq;
80435 + static atomic_unchecked_t acqseq;
80436
80437 do {
80438 - res = atomic_inc_return(&acqseq);
80439 + res = atomic_inc_return_unchecked(&acqseq);
80440 } while (!res);
80441 return res;
80442 }
80443 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
80444 index 7371f67..9897314 100644
80445 --- a/net/mac80211/cfg.c
80446 +++ b/net/mac80211/cfg.c
80447 @@ -2594,7 +2594,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
80448 else
80449 local->probe_req_reg--;
80450
80451 - if (!local->open_count)
80452 + if (!local_read(&local->open_count))
80453 break;
80454
80455 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
80456 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
80457 index 156e583..076e28a 100644
80458 --- a/net/mac80211/ieee80211_i.h
80459 +++ b/net/mac80211/ieee80211_i.h
80460 @@ -28,6 +28,7 @@
80461 #include <net/ieee80211_radiotap.h>
80462 #include <net/cfg80211.h>
80463 #include <net/mac80211.h>
80464 +#include <asm/local.h>
80465 #include "key.h"
80466 #include "sta_info.h"
80467 #include "debug.h"
80468 @@ -848,7 +849,7 @@ struct ieee80211_local {
80469 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
80470 spinlock_t queue_stop_reason_lock;
80471
80472 - int open_count;
80473 + local_t open_count;
80474 int monitors, cooked_mntrs;
80475 /* number of interfaces with corresponding FIF_ flags */
80476 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
80477 @@ -1047,8 +1048,8 @@ struct ieee80211_local {
80478 struct work_struct dynamic_ps_enable_work;
80479 struct work_struct dynamic_ps_disable_work;
80480 struct timer_list dynamic_ps_timer;
80481 - struct notifier_block network_latency_notifier;
80482 - struct notifier_block ifa_notifier;
80483 + notifier_block_no_const network_latency_notifier;
80484 + notifier_block_no_const ifa_notifier;
80485
80486 /*
80487 * The dynamic ps timeout configured from user space via WEXT -
80488 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
80489 index 7de7717..3de8e97 100644
80490 --- a/net/mac80211/iface.c
80491 +++ b/net/mac80211/iface.c
80492 @@ -465,7 +465,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80493 break;
80494 }
80495
80496 - if (local->open_count == 0) {
80497 + if (local_read(&local->open_count) == 0) {
80498 res = drv_start(local);
80499 if (res)
80500 goto err_del_bss;
80501 @@ -508,7 +508,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80502 break;
80503 }
80504
80505 - if (local->monitors == 0 && local->open_count == 0) {
80506 + if (local->monitors == 0 && local_read(&local->open_count) == 0) {
80507 res = ieee80211_add_virtual_monitor(local);
80508 if (res)
80509 goto err_stop;
80510 @@ -616,7 +616,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80511 mutex_unlock(&local->mtx);
80512
80513 if (coming_up)
80514 - local->open_count++;
80515 + local_inc(&local->open_count);
80516
80517 if (hw_reconf_flags)
80518 ieee80211_hw_config(local, hw_reconf_flags);
80519 @@ -630,7 +630,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80520 err_del_interface:
80521 drv_remove_interface(local, sdata);
80522 err_stop:
80523 - if (!local->open_count)
80524 + if (!local_read(&local->open_count))
80525 drv_stop(local);
80526 err_del_bss:
80527 sdata->bss = NULL;
80528 @@ -762,7 +762,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80529 }
80530
80531 if (going_down)
80532 - local->open_count--;
80533 + local_dec(&local->open_count);
80534
80535 switch (sdata->vif.type) {
80536 case NL80211_IFTYPE_AP_VLAN:
80537 @@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80538
80539 ieee80211_recalc_ps(local, -1);
80540
80541 - if (local->open_count == 0) {
80542 + if (local_read(&local->open_count) == 0) {
80543 if (local->ops->napi_poll)
80544 napi_disable(&local->napi);
80545 ieee80211_clear_tx_pending(local);
80546 @@ -859,7 +859,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80547 }
80548 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
80549
80550 - if (local->monitors == local->open_count && local->monitors > 0)
80551 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
80552 ieee80211_add_virtual_monitor(local);
80553 }
80554
80555 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
80556 index f57f597..e0a7c03 100644
80557 --- a/net/mac80211/main.c
80558 +++ b/net/mac80211/main.c
80559 @@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
80560 local->hw.conf.power_level = power;
80561 }
80562
80563 - if (changed && local->open_count) {
80564 + if (changed && local_read(&local->open_count)) {
80565 ret = drv_config(local, changed);
80566 /*
80567 * Goal:
80568 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
80569 index 5c572e7..ecf75ce 100644
80570 --- a/net/mac80211/pm.c
80571 +++ b/net/mac80211/pm.c
80572 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80573 struct ieee80211_sub_if_data *sdata;
80574 struct sta_info *sta;
80575
80576 - if (!local->open_count)
80577 + if (!local_read(&local->open_count))
80578 goto suspend;
80579
80580 ieee80211_scan_cancel(local);
80581 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80582 cancel_work_sync(&local->dynamic_ps_enable_work);
80583 del_timer_sync(&local->dynamic_ps_timer);
80584
80585 - local->wowlan = wowlan && local->open_count;
80586 + local->wowlan = wowlan && local_read(&local->open_count);
80587 if (local->wowlan) {
80588 int err = drv_suspend(local, wowlan);
80589 if (err < 0) {
80590 @@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80591 drv_remove_interface(local, sdata);
80592
80593 /* stop hardware - this must stop RX */
80594 - if (local->open_count)
80595 + if (local_read(&local->open_count))
80596 ieee80211_stop_device(local);
80597
80598 suspend:
80599 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
80600 index 3313c11..bec9f17 100644
80601 --- a/net/mac80211/rate.c
80602 +++ b/net/mac80211/rate.c
80603 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
80604
80605 ASSERT_RTNL();
80606
80607 - if (local->open_count)
80608 + if (local_read(&local->open_count))
80609 return -EBUSY;
80610
80611 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
80612 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
80613 index c97a065..ff61928 100644
80614 --- a/net/mac80211/rc80211_pid_debugfs.c
80615 +++ b/net/mac80211/rc80211_pid_debugfs.c
80616 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
80617
80618 spin_unlock_irqrestore(&events->lock, status);
80619
80620 - if (copy_to_user(buf, pb, p))
80621 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
80622 return -EFAULT;
80623
80624 return p;
80625 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
80626 index 0151ae3..26709d3 100644
80627 --- a/net/mac80211/util.c
80628 +++ b/net/mac80211/util.c
80629 @@ -1332,7 +1332,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
80630 }
80631 #endif
80632 /* everything else happens only if HW was up & running */
80633 - if (!local->open_count)
80634 + if (!local_read(&local->open_count))
80635 goto wake_up;
80636
80637 /*
80638 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
80639 index fefa514..0755f23 100644
80640 --- a/net/netfilter/Kconfig
80641 +++ b/net/netfilter/Kconfig
80642 @@ -929,6 +929,16 @@ config NETFILTER_XT_MATCH_ESP
80643
80644 To compile it as a module, choose M here. If unsure, say N.
80645
80646 +config NETFILTER_XT_MATCH_GRADM
80647 + tristate '"gradm" match support'
80648 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
80649 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
80650 + ---help---
80651 + The gradm match allows to match on grsecurity RBAC being enabled.
80652 + It is useful when iptables rules are applied early on bootup to
80653 + prevent connections to the machine (except from a trusted host)
80654 + while the RBAC system is disabled.
80655 +
80656 config NETFILTER_XT_MATCH_HASHLIMIT
80657 tristate '"hashlimit" match support'
80658 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
80659 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
80660 index 3259697..54d5393 100644
80661 --- a/net/netfilter/Makefile
80662 +++ b/net/netfilter/Makefile
80663 @@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
80664 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
80665 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
80666 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
80667 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
80668 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
80669 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
80670 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
80671 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
80672 index 1548df9..98ad9b4 100644
80673 --- a/net/netfilter/ipvs/ip_vs_conn.c
80674 +++ b/net/netfilter/ipvs/ip_vs_conn.c
80675 @@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
80676 /* Increase the refcnt counter of the dest */
80677 atomic_inc(&dest->refcnt);
80678
80679 - conn_flags = atomic_read(&dest->conn_flags);
80680 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
80681 if (cp->protocol != IPPROTO_UDP)
80682 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
80683 flags = cp->flags;
80684 @@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
80685 atomic_set(&cp->refcnt, 1);
80686
80687 atomic_set(&cp->n_control, 0);
80688 - atomic_set(&cp->in_pkts, 0);
80689 + atomic_set_unchecked(&cp->in_pkts, 0);
80690
80691 atomic_inc(&ipvs->conn_count);
80692 if (flags & IP_VS_CONN_F_NO_CPORT)
80693 @@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
80694
80695 /* Don't drop the entry if its number of incoming packets is not
80696 located in [0, 8] */
80697 - i = atomic_read(&cp->in_pkts);
80698 + i = atomic_read_unchecked(&cp->in_pkts);
80699 if (i > 8 || i < 0) return 0;
80700
80701 if (!todrop_rate[i]) return 0;
80702 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
80703 index 58918e2..4d177a9 100644
80704 --- a/net/netfilter/ipvs/ip_vs_core.c
80705 +++ b/net/netfilter/ipvs/ip_vs_core.c
80706 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
80707 ret = cp->packet_xmit(skb, cp, pd->pp);
80708 /* do not touch skb anymore */
80709
80710 - atomic_inc(&cp->in_pkts);
80711 + atomic_inc_unchecked(&cp->in_pkts);
80712 ip_vs_conn_put(cp);
80713 return ret;
80714 }
80715 @@ -1681,7 +1681,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
80716 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
80717 pkts = sysctl_sync_threshold(ipvs);
80718 else
80719 - pkts = atomic_add_return(1, &cp->in_pkts);
80720 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80721
80722 if (ipvs->sync_state & IP_VS_STATE_MASTER)
80723 ip_vs_sync_conn(net, cp, pkts);
80724 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
80725 index c4ee437..a774a74 100644
80726 --- a/net/netfilter/ipvs/ip_vs_ctl.c
80727 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
80728 @@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
80729 ip_vs_rs_hash(ipvs, dest);
80730 write_unlock_bh(&ipvs->rs_lock);
80731 }
80732 - atomic_set(&dest->conn_flags, conn_flags);
80733 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
80734
80735 /* bind the service */
80736 if (!dest->svc) {
80737 @@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80738 " %-7s %-6d %-10d %-10d\n",
80739 &dest->addr.in6,
80740 ntohs(dest->port),
80741 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80742 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80743 atomic_read(&dest->weight),
80744 atomic_read(&dest->activeconns),
80745 atomic_read(&dest->inactconns));
80746 @@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80747 "%-7s %-6d %-10d %-10d\n",
80748 ntohl(dest->addr.ip),
80749 ntohs(dest->port),
80750 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80751 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80752 atomic_read(&dest->weight),
80753 atomic_read(&dest->activeconns),
80754 atomic_read(&dest->inactconns));
80755 @@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
80756
80757 entry.addr = dest->addr.ip;
80758 entry.port = dest->port;
80759 - entry.conn_flags = atomic_read(&dest->conn_flags);
80760 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
80761 entry.weight = atomic_read(&dest->weight);
80762 entry.u_threshold = dest->u_threshold;
80763 entry.l_threshold = dest->l_threshold;
80764 @@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
80765 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
80766 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
80767 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
80768 - (atomic_read(&dest->conn_flags) &
80769 + (atomic_read_unchecked(&dest->conn_flags) &
80770 IP_VS_CONN_F_FWD_MASK)) ||
80771 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
80772 atomic_read(&dest->weight)) ||
80773 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
80774 index effa10c..9058928 100644
80775 --- a/net/netfilter/ipvs/ip_vs_sync.c
80776 +++ b/net/netfilter/ipvs/ip_vs_sync.c
80777 @@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
80778 cp = cp->control;
80779 if (cp) {
80780 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80781 - pkts = atomic_add_return(1, &cp->in_pkts);
80782 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80783 else
80784 pkts = sysctl_sync_threshold(ipvs);
80785 ip_vs_sync_conn(net, cp->control, pkts);
80786 @@ -758,7 +758,7 @@ control:
80787 if (!cp)
80788 return;
80789 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80790 - pkts = atomic_add_return(1, &cp->in_pkts);
80791 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80792 else
80793 pkts = sysctl_sync_threshold(ipvs);
80794 goto sloop;
80795 @@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
80796
80797 if (opt)
80798 memcpy(&cp->in_seq, opt, sizeof(*opt));
80799 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80800 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80801 cp->state = state;
80802 cp->old_state = cp->state;
80803 /*
80804 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
80805 index cc4c809..50f8fe5 100644
80806 --- a/net/netfilter/ipvs/ip_vs_xmit.c
80807 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
80808 @@ -1202,7 +1202,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
80809 else
80810 rc = NF_ACCEPT;
80811 /* do not touch skb anymore */
80812 - atomic_inc(&cp->in_pkts);
80813 + atomic_inc_unchecked(&cp->in_pkts);
80814 goto out;
80815 }
80816
80817 @@ -1323,7 +1323,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
80818 else
80819 rc = NF_ACCEPT;
80820 /* do not touch skb anymore */
80821 - atomic_inc(&cp->in_pkts);
80822 + atomic_inc_unchecked(&cp->in_pkts);
80823 goto out;
80824 }
80825
80826 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
80827 index 0f241be..2c9be6d 100644
80828 --- a/net/netfilter/nf_conntrack_core.c
80829 +++ b/net/netfilter/nf_conntrack_core.c
80830 @@ -1532,6 +1532,10 @@ err_extend:
80831 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
80832 #define DYING_NULLS_VAL ((1<<30)+1)
80833
80834 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80835 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
80836 +#endif
80837 +
80838 static int nf_conntrack_init_net(struct net *net)
80839 {
80840 int ret;
80841 @@ -1545,7 +1549,11 @@ static int nf_conntrack_init_net(struct net *net)
80842 goto err_stat;
80843 }
80844
80845 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80846 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
80847 +#else
80848 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
80849 +#endif
80850 if (!net->ct.slabname) {
80851 ret = -ENOMEM;
80852 goto err_slabname;
80853 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
80854 index 9f199f2..719ad23 100644
80855 --- a/net/netfilter/nfnetlink_log.c
80856 +++ b/net/netfilter/nfnetlink_log.c
80857 @@ -71,7 +71,7 @@ struct nfulnl_instance {
80858 };
80859
80860 static DEFINE_SPINLOCK(instances_lock);
80861 -static atomic_t global_seq;
80862 +static atomic_unchecked_t global_seq;
80863
80864 #define INSTANCE_BUCKETS 16
80865 static struct hlist_head instance_table[INSTANCE_BUCKETS];
80866 @@ -527,7 +527,7 @@ __build_packet_message(struct nfulnl_instance *inst,
80867 /* global sequence number */
80868 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
80869 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
80870 - htonl(atomic_inc_return(&global_seq))))
80871 + htonl(atomic_inc_return_unchecked(&global_seq))))
80872 goto nla_put_failure;
80873
80874 if (data_len) {
80875 diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
80876 index bd93e51..fcbbac4 100644
80877 --- a/net/netfilter/xt_TEE.c
80878 +++ b/net/netfilter/xt_TEE.c
80879 @@ -31,7 +31,7 @@
80880 #endif
80881
80882 struct xt_tee_priv {
80883 - struct notifier_block notifier;
80884 + notifier_block_no_const notifier;
80885 struct xt_tee_tginfo *tginfo;
80886 int oif;
80887 };
80888 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
80889 new file mode 100644
80890 index 0000000..c566332
80891 --- /dev/null
80892 +++ b/net/netfilter/xt_gradm.c
80893 @@ -0,0 +1,51 @@
80894 +/*
80895 + * gradm match for netfilter
80896 + * Copyright © Zbigniew Krzystolik, 2010
80897 + *
80898 + * This program is free software; you can redistribute it and/or modify
80899 + * it under the terms of the GNU General Public License; either version
80900 + * 2 or 3 as published by the Free Software Foundation.
80901 + */
80902 +#include <linux/module.h>
80903 +#include <linux/moduleparam.h>
80904 +#include <linux/skbuff.h>
80905 +#include <linux/netfilter/x_tables.h>
80906 +#include <linux/grsecurity.h>
80907 +#include <linux/netfilter/xt_gradm.h>
80908 +
80909 +static bool
80910 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
80911 +{
80912 + const struct xt_gradm_mtinfo *info = par->matchinfo;
80913 + bool retval = false;
80914 + if (gr_acl_is_enabled())
80915 + retval = true;
80916 + return retval ^ info->invflags;
80917 +}
80918 +
80919 +static struct xt_match gradm_mt_reg __read_mostly = {
80920 + .name = "gradm",
80921 + .revision = 0,
80922 + .family = NFPROTO_UNSPEC,
80923 + .match = gradm_mt,
80924 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
80925 + .me = THIS_MODULE,
80926 +};
80927 +
80928 +static int __init gradm_mt_init(void)
80929 +{
80930 + return xt_register_match(&gradm_mt_reg);
80931 +}
80932 +
80933 +static void __exit gradm_mt_exit(void)
80934 +{
80935 + xt_unregister_match(&gradm_mt_reg);
80936 +}
80937 +
80938 +module_init(gradm_mt_init);
80939 +module_exit(gradm_mt_exit);
80940 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
80941 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
80942 +MODULE_LICENSE("GPL");
80943 +MODULE_ALIAS("ipt_gradm");
80944 +MODULE_ALIAS("ip6t_gradm");
80945 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
80946 index 4fe4fb4..87a89e5 100644
80947 --- a/net/netfilter/xt_statistic.c
80948 +++ b/net/netfilter/xt_statistic.c
80949 @@ -19,7 +19,7 @@
80950 #include <linux/module.h>
80951
80952 struct xt_statistic_priv {
80953 - atomic_t count;
80954 + atomic_unchecked_t count;
80955 } ____cacheline_aligned_in_smp;
80956
80957 MODULE_LICENSE("GPL");
80958 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
80959 break;
80960 case XT_STATISTIC_MODE_NTH:
80961 do {
80962 - oval = atomic_read(&info->master->count);
80963 + oval = atomic_read_unchecked(&info->master->count);
80964 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
80965 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
80966 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
80967 if (nval == 0)
80968 ret = !ret;
80969 break;
80970 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
80971 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
80972 if (info->master == NULL)
80973 return -ENOMEM;
80974 - atomic_set(&info->master->count, info->u.nth.count);
80975 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
80976
80977 return 0;
80978 }
80979 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
80980 index 4da797f..eb1df70 100644
80981 --- a/net/netlink/af_netlink.c
80982 +++ b/net/netlink/af_netlink.c
80983 @@ -782,7 +782,7 @@ static void netlink_overrun(struct sock *sk)
80984 sk->sk_error_report(sk);
80985 }
80986 }
80987 - atomic_inc(&sk->sk_drops);
80988 + atomic_inc_unchecked(&sk->sk_drops);
80989 }
80990
80991 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
80992 @@ -2068,7 +2068,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
80993 sk_wmem_alloc_get(s),
80994 nlk->cb,
80995 atomic_read(&s->sk_refcnt),
80996 - atomic_read(&s->sk_drops),
80997 + atomic_read_unchecked(&s->sk_drops),
80998 sock_i_ino(s)
80999 );
81000
81001 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
81002 index 7261eb8..44e8ac6 100644
81003 --- a/net/netrom/af_netrom.c
81004 +++ b/net/netrom/af_netrom.c
81005 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
81006 struct sock *sk = sock->sk;
81007 struct nr_sock *nr = nr_sk(sk);
81008
81009 + memset(sax, 0, sizeof(*sax));
81010 lock_sock(sk);
81011 if (peer != 0) {
81012 if (sk->sk_state != TCP_ESTABLISHED) {
81013 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
81014 *uaddr_len = sizeof(struct full_sockaddr_ax25);
81015 } else {
81016 sax->fsa_ax25.sax25_family = AF_NETROM;
81017 - sax->fsa_ax25.sax25_ndigis = 0;
81018 sax->fsa_ax25.sax25_call = nr->source_addr;
81019 *uaddr_len = sizeof(struct sockaddr_ax25);
81020 }
81021 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
81022 index 94060ed..9c066f3 100644
81023 --- a/net/packet/af_packet.c
81024 +++ b/net/packet/af_packet.c
81025 @@ -1578,7 +1578,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
81026
81027 spin_lock(&sk->sk_receive_queue.lock);
81028 po->stats.tp_packets++;
81029 - skb->dropcount = atomic_read(&sk->sk_drops);
81030 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
81031 __skb_queue_tail(&sk->sk_receive_queue, skb);
81032 spin_unlock(&sk->sk_receive_queue.lock);
81033 sk->sk_data_ready(sk, skb->len);
81034 @@ -1587,7 +1587,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
81035 drop_n_acct:
81036 spin_lock(&sk->sk_receive_queue.lock);
81037 po->stats.tp_drops++;
81038 - atomic_inc(&sk->sk_drops);
81039 + atomic_inc_unchecked(&sk->sk_drops);
81040 spin_unlock(&sk->sk_receive_queue.lock);
81041
81042 drop_n_restore:
81043 @@ -2537,6 +2537,7 @@ out:
81044
81045 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
81046 {
81047 + struct sock_extended_err ee;
81048 struct sock_exterr_skb *serr;
81049 struct sk_buff *skb, *skb2;
81050 int copied, err;
81051 @@ -2558,8 +2559,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
81052 sock_recv_timestamp(msg, sk, skb);
81053
81054 serr = SKB_EXT_ERR(skb);
81055 + ee = serr->ee;
81056 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
81057 - sizeof(serr->ee), &serr->ee);
81058 + sizeof ee, &ee);
81059
81060 msg->msg_flags |= MSG_ERRQUEUE;
81061 err = copied;
81062 @@ -3171,7 +3173,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
81063 case PACKET_HDRLEN:
81064 if (len > sizeof(int))
81065 len = sizeof(int);
81066 - if (copy_from_user(&val, optval, len))
81067 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
81068 return -EFAULT;
81069 switch (val) {
81070 case TPACKET_V1:
81071 @@ -3210,7 +3212,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
81072 len = lv;
81073 if (put_user(len, optlen))
81074 return -EFAULT;
81075 - if (copy_to_user(optval, data, len))
81076 + if (len > sizeof(st) || copy_to_user(optval, data, len))
81077 return -EFAULT;
81078 return 0;
81079 }
81080 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
81081 index 5a940db..f0b9c12 100644
81082 --- a/net/phonet/af_phonet.c
81083 +++ b/net/phonet/af_phonet.c
81084 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
81085 {
81086 struct phonet_protocol *pp;
81087
81088 - if (protocol >= PHONET_NPROTO)
81089 + if (protocol < 0 || protocol >= PHONET_NPROTO)
81090 return NULL;
81091
81092 rcu_read_lock();
81093 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
81094 {
81095 int err = 0;
81096
81097 - if (protocol >= PHONET_NPROTO)
81098 + if (protocol < 0 || protocol >= PHONET_NPROTO)
81099 return -EINVAL;
81100
81101 err = proto_register(pp->prot, 1);
81102 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
81103 index 576f22c..bc7a71b 100644
81104 --- a/net/phonet/pep.c
81105 +++ b/net/phonet/pep.c
81106 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
81107
81108 case PNS_PEP_CTRL_REQ:
81109 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
81110 - atomic_inc(&sk->sk_drops);
81111 + atomic_inc_unchecked(&sk->sk_drops);
81112 break;
81113 }
81114 __skb_pull(skb, 4);
81115 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
81116 }
81117
81118 if (pn->rx_credits == 0) {
81119 - atomic_inc(&sk->sk_drops);
81120 + atomic_inc_unchecked(&sk->sk_drops);
81121 err = -ENOBUFS;
81122 break;
81123 }
81124 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
81125 }
81126
81127 if (pn->rx_credits == 0) {
81128 - atomic_inc(&sk->sk_drops);
81129 + atomic_inc_unchecked(&sk->sk_drops);
81130 err = NET_RX_DROP;
81131 break;
81132 }
81133 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
81134 index b7e9827..c264c85 100644
81135 --- a/net/phonet/socket.c
81136 +++ b/net/phonet/socket.c
81137 @@ -615,7 +615,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
81138 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
81139 sock_i_ino(sk),
81140 atomic_read(&sk->sk_refcnt), sk,
81141 - atomic_read(&sk->sk_drops), &len);
81142 + atomic_read_unchecked(&sk->sk_drops), &len);
81143 }
81144 seq_printf(seq, "%*s\n", 127 - len, "");
81145 return 0;
81146 diff --git a/net/rds/cong.c b/net/rds/cong.c
81147 index e5b65ac..f3b6fb7 100644
81148 --- a/net/rds/cong.c
81149 +++ b/net/rds/cong.c
81150 @@ -78,7 +78,7 @@
81151 * finds that the saved generation number is smaller than the global generation
81152 * number, it wakes up the process.
81153 */
81154 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
81155 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
81156
81157 /*
81158 * Congestion monitoring
81159 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
81160 rdsdebug("waking map %p for %pI4\n",
81161 map, &map->m_addr);
81162 rds_stats_inc(s_cong_update_received);
81163 - atomic_inc(&rds_cong_generation);
81164 + atomic_inc_unchecked(&rds_cong_generation);
81165 if (waitqueue_active(&map->m_waitq))
81166 wake_up(&map->m_waitq);
81167 if (waitqueue_active(&rds_poll_waitq))
81168 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
81169
81170 int rds_cong_updated_since(unsigned long *recent)
81171 {
81172 - unsigned long gen = atomic_read(&rds_cong_generation);
81173 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
81174
81175 if (likely(*recent == gen))
81176 return 0;
81177 diff --git a/net/rds/ib.h b/net/rds/ib.h
81178 index 8d2b3d5..227ec5b 100644
81179 --- a/net/rds/ib.h
81180 +++ b/net/rds/ib.h
81181 @@ -128,7 +128,7 @@ struct rds_ib_connection {
81182 /* sending acks */
81183 unsigned long i_ack_flags;
81184 #ifdef KERNEL_HAS_ATOMIC64
81185 - atomic64_t i_ack_next; /* next ACK to send */
81186 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
81187 #else
81188 spinlock_t i_ack_lock; /* protect i_ack_next */
81189 u64 i_ack_next; /* next ACK to send */
81190 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
81191 index a1e1162..265e129 100644
81192 --- a/net/rds/ib_cm.c
81193 +++ b/net/rds/ib_cm.c
81194 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
81195 /* Clear the ACK state */
81196 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
81197 #ifdef KERNEL_HAS_ATOMIC64
81198 - atomic64_set(&ic->i_ack_next, 0);
81199 + atomic64_set_unchecked(&ic->i_ack_next, 0);
81200 #else
81201 ic->i_ack_next = 0;
81202 #endif
81203 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
81204 index 8d19491..05a3e65 100644
81205 --- a/net/rds/ib_recv.c
81206 +++ b/net/rds/ib_recv.c
81207 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
81208 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
81209 int ack_required)
81210 {
81211 - atomic64_set(&ic->i_ack_next, seq);
81212 + atomic64_set_unchecked(&ic->i_ack_next, seq);
81213 if (ack_required) {
81214 smp_mb__before_clear_bit();
81215 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
81216 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
81217 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
81218 smp_mb__after_clear_bit();
81219
81220 - return atomic64_read(&ic->i_ack_next);
81221 + return atomic64_read_unchecked(&ic->i_ack_next);
81222 }
81223 #endif
81224
81225 diff --git a/net/rds/iw.h b/net/rds/iw.h
81226 index 04ce3b1..48119a6 100644
81227 --- a/net/rds/iw.h
81228 +++ b/net/rds/iw.h
81229 @@ -134,7 +134,7 @@ struct rds_iw_connection {
81230 /* sending acks */
81231 unsigned long i_ack_flags;
81232 #ifdef KERNEL_HAS_ATOMIC64
81233 - atomic64_t i_ack_next; /* next ACK to send */
81234 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
81235 #else
81236 spinlock_t i_ack_lock; /* protect i_ack_next */
81237 u64 i_ack_next; /* next ACK to send */
81238 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
81239 index a91e1db..cf3053f 100644
81240 --- a/net/rds/iw_cm.c
81241 +++ b/net/rds/iw_cm.c
81242 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
81243 /* Clear the ACK state */
81244 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
81245 #ifdef KERNEL_HAS_ATOMIC64
81246 - atomic64_set(&ic->i_ack_next, 0);
81247 + atomic64_set_unchecked(&ic->i_ack_next, 0);
81248 #else
81249 ic->i_ack_next = 0;
81250 #endif
81251 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
81252 index 4503335..db566b4 100644
81253 --- a/net/rds/iw_recv.c
81254 +++ b/net/rds/iw_recv.c
81255 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
81256 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
81257 int ack_required)
81258 {
81259 - atomic64_set(&ic->i_ack_next, seq);
81260 + atomic64_set_unchecked(&ic->i_ack_next, seq);
81261 if (ack_required) {
81262 smp_mb__before_clear_bit();
81263 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
81264 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
81265 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
81266 smp_mb__after_clear_bit();
81267
81268 - return atomic64_read(&ic->i_ack_next);
81269 + return atomic64_read_unchecked(&ic->i_ack_next);
81270 }
81271 #endif
81272
81273 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
81274 index edac9ef..16bcb98 100644
81275 --- a/net/rds/tcp.c
81276 +++ b/net/rds/tcp.c
81277 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
81278 int val = 1;
81279
81280 set_fs(KERNEL_DS);
81281 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
81282 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
81283 sizeof(val));
81284 set_fs(oldfs);
81285 }
81286 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
81287 index 81cf5a4..b5826ff 100644
81288 --- a/net/rds/tcp_send.c
81289 +++ b/net/rds/tcp_send.c
81290 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
81291
81292 oldfs = get_fs();
81293 set_fs(KERNEL_DS);
81294 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
81295 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
81296 sizeof(val));
81297 set_fs(oldfs);
81298 }
81299 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
81300 index 05996d0..5a1dfe0 100644
81301 --- a/net/rxrpc/af_rxrpc.c
81302 +++ b/net/rxrpc/af_rxrpc.c
81303 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
81304 __be32 rxrpc_epoch;
81305
81306 /* current debugging ID */
81307 -atomic_t rxrpc_debug_id;
81308 +atomic_unchecked_t rxrpc_debug_id;
81309
81310 /* count of skbs currently in use */
81311 atomic_t rxrpc_n_skbs;
81312 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
81313 index e4d9cbc..b229649 100644
81314 --- a/net/rxrpc/ar-ack.c
81315 +++ b/net/rxrpc/ar-ack.c
81316 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
81317
81318 _enter("{%d,%d,%d,%d},",
81319 call->acks_hard, call->acks_unacked,
81320 - atomic_read(&call->sequence),
81321 + atomic_read_unchecked(&call->sequence),
81322 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
81323
81324 stop = 0;
81325 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
81326
81327 /* each Tx packet has a new serial number */
81328 sp->hdr.serial =
81329 - htonl(atomic_inc_return(&call->conn->serial));
81330 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
81331
81332 hdr = (struct rxrpc_header *) txb->head;
81333 hdr->serial = sp->hdr.serial;
81334 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
81335 */
81336 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
81337 {
81338 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
81339 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
81340 }
81341
81342 /*
81343 @@ -629,7 +629,7 @@ process_further:
81344
81345 latest = ntohl(sp->hdr.serial);
81346 hard = ntohl(ack.firstPacket);
81347 - tx = atomic_read(&call->sequence);
81348 + tx = atomic_read_unchecked(&call->sequence);
81349
81350 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
81351 latest,
81352 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
81353 goto maybe_reschedule;
81354
81355 send_ACK_with_skew:
81356 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
81357 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
81358 ntohl(ack.serial));
81359 send_ACK:
81360 mtu = call->conn->trans->peer->if_mtu;
81361 @@ -1173,7 +1173,7 @@ send_ACK:
81362 ackinfo.rxMTU = htonl(5692);
81363 ackinfo.jumbo_max = htonl(4);
81364
81365 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
81366 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
81367 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
81368 ntohl(hdr.serial),
81369 ntohs(ack.maxSkew),
81370 @@ -1191,7 +1191,7 @@ send_ACK:
81371 send_message:
81372 _debug("send message");
81373
81374 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
81375 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
81376 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
81377 send_message_2:
81378
81379 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
81380 index a3bbb36..3341fb9 100644
81381 --- a/net/rxrpc/ar-call.c
81382 +++ b/net/rxrpc/ar-call.c
81383 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
81384 spin_lock_init(&call->lock);
81385 rwlock_init(&call->state_lock);
81386 atomic_set(&call->usage, 1);
81387 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
81388 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
81389 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
81390
81391 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
81392 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
81393 index 4106ca9..a338d7a 100644
81394 --- a/net/rxrpc/ar-connection.c
81395 +++ b/net/rxrpc/ar-connection.c
81396 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
81397 rwlock_init(&conn->lock);
81398 spin_lock_init(&conn->state_lock);
81399 atomic_set(&conn->usage, 1);
81400 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
81401 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
81402 conn->avail_calls = RXRPC_MAXCALLS;
81403 conn->size_align = 4;
81404 conn->header_size = sizeof(struct rxrpc_header);
81405 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
81406 index e7ed43a..6afa140 100644
81407 --- a/net/rxrpc/ar-connevent.c
81408 +++ b/net/rxrpc/ar-connevent.c
81409 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
81410
81411 len = iov[0].iov_len + iov[1].iov_len;
81412
81413 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
81414 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
81415 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
81416
81417 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
81418 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
81419 index 529572f..c758ca7 100644
81420 --- a/net/rxrpc/ar-input.c
81421 +++ b/net/rxrpc/ar-input.c
81422 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
81423 /* track the latest serial number on this connection for ACK packet
81424 * information */
81425 serial = ntohl(sp->hdr.serial);
81426 - hi_serial = atomic_read(&call->conn->hi_serial);
81427 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
81428 while (serial > hi_serial)
81429 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
81430 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
81431 serial);
81432
81433 /* request ACK generation for any ACK or DATA packet that requests
81434 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
81435 index a693aca..81e7293 100644
81436 --- a/net/rxrpc/ar-internal.h
81437 +++ b/net/rxrpc/ar-internal.h
81438 @@ -272,8 +272,8 @@ struct rxrpc_connection {
81439 int error; /* error code for local abort */
81440 int debug_id; /* debug ID for printks */
81441 unsigned int call_counter; /* call ID counter */
81442 - atomic_t serial; /* packet serial number counter */
81443 - atomic_t hi_serial; /* highest serial number received */
81444 + atomic_unchecked_t serial; /* packet serial number counter */
81445 + atomic_unchecked_t hi_serial; /* highest serial number received */
81446 u8 avail_calls; /* number of calls available */
81447 u8 size_align; /* data size alignment (for security) */
81448 u8 header_size; /* rxrpc + security header size */
81449 @@ -346,7 +346,7 @@ struct rxrpc_call {
81450 spinlock_t lock;
81451 rwlock_t state_lock; /* lock for state transition */
81452 atomic_t usage;
81453 - atomic_t sequence; /* Tx data packet sequence counter */
81454 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
81455 u32 abort_code; /* local/remote abort code */
81456 enum { /* current state of call */
81457 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
81458 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
81459 */
81460 extern atomic_t rxrpc_n_skbs;
81461 extern __be32 rxrpc_epoch;
81462 -extern atomic_t rxrpc_debug_id;
81463 +extern atomic_unchecked_t rxrpc_debug_id;
81464 extern struct workqueue_struct *rxrpc_workqueue;
81465
81466 /*
81467 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
81468 index 87f7135..74d3703 100644
81469 --- a/net/rxrpc/ar-local.c
81470 +++ b/net/rxrpc/ar-local.c
81471 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
81472 spin_lock_init(&local->lock);
81473 rwlock_init(&local->services_lock);
81474 atomic_set(&local->usage, 1);
81475 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
81476 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
81477 memcpy(&local->srx, srx, sizeof(*srx));
81478 }
81479
81480 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
81481 index e1ac183..b43e10e 100644
81482 --- a/net/rxrpc/ar-output.c
81483 +++ b/net/rxrpc/ar-output.c
81484 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
81485 sp->hdr.cid = call->cid;
81486 sp->hdr.callNumber = call->call_id;
81487 sp->hdr.seq =
81488 - htonl(atomic_inc_return(&call->sequence));
81489 + htonl(atomic_inc_return_unchecked(&call->sequence));
81490 sp->hdr.serial =
81491 - htonl(atomic_inc_return(&conn->serial));
81492 + htonl(atomic_inc_return_unchecked(&conn->serial));
81493 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
81494 sp->hdr.userStatus = 0;
81495 sp->hdr.securityIndex = conn->security_ix;
81496 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
81497 index bebaa43..2644591 100644
81498 --- a/net/rxrpc/ar-peer.c
81499 +++ b/net/rxrpc/ar-peer.c
81500 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
81501 INIT_LIST_HEAD(&peer->error_targets);
81502 spin_lock_init(&peer->lock);
81503 atomic_set(&peer->usage, 1);
81504 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
81505 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
81506 memcpy(&peer->srx, srx, sizeof(*srx));
81507
81508 rxrpc_assess_MTU_size(peer);
81509 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
81510 index 38047f7..9f48511 100644
81511 --- a/net/rxrpc/ar-proc.c
81512 +++ b/net/rxrpc/ar-proc.c
81513 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
81514 atomic_read(&conn->usage),
81515 rxrpc_conn_states[conn->state],
81516 key_serial(conn->key),
81517 - atomic_read(&conn->serial),
81518 - atomic_read(&conn->hi_serial));
81519 + atomic_read_unchecked(&conn->serial),
81520 + atomic_read_unchecked(&conn->hi_serial));
81521
81522 return 0;
81523 }
81524 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
81525 index 92df566..87ec1bf 100644
81526 --- a/net/rxrpc/ar-transport.c
81527 +++ b/net/rxrpc/ar-transport.c
81528 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
81529 spin_lock_init(&trans->client_lock);
81530 rwlock_init(&trans->conn_lock);
81531 atomic_set(&trans->usage, 1);
81532 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
81533 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
81534
81535 if (peer->srx.transport.family == AF_INET) {
81536 switch (peer->srx.transport_type) {
81537 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
81538 index f226709..0e735a8 100644
81539 --- a/net/rxrpc/rxkad.c
81540 +++ b/net/rxrpc/rxkad.c
81541 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
81542
81543 len = iov[0].iov_len + iov[1].iov_len;
81544
81545 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
81546 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
81547 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
81548
81549 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
81550 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
81551
81552 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
81553
81554 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
81555 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
81556 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
81557
81558 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
81559 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
81560 index ea14cb4..f27b19d 100644
81561 --- a/net/sctp/ipv6.c
81562 +++ b/net/sctp/ipv6.c
81563 @@ -1037,7 +1037,9 @@ void sctp_v6_pf_init(void)
81564
81565 void sctp_v6_pf_exit(void)
81566 {
81567 - list_del(&sctp_af_inet6.list);
81568 + pax_open_kernel();
81569 + list_del((struct list_head *)&sctp_af_inet6.list);
81570 + pax_close_kernel();
81571 }
81572
81573 /* Initialize IPv6 support and register with socket layer. */
81574 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
81575 index 9966e7b..540c575 100644
81576 --- a/net/sctp/proc.c
81577 +++ b/net/sctp/proc.c
81578 @@ -328,7 +328,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
81579 seq_printf(seq,
81580 "%8pK %8pK %-3d %-3d %-2d %-4d "
81581 "%4d %8d %8d %7d %5lu %-5d %5d ",
81582 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
81583 + assoc, sk,
81584 + sctp_sk(sk)->type, sk->sk_state,
81585 assoc->state, hash,
81586 assoc->assoc_id,
81587 assoc->sndbuf_used,
81588 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
81589 index 2d51842..ef112fb 100644
81590 --- a/net/sctp/protocol.c
81591 +++ b/net/sctp/protocol.c
81592 @@ -834,8 +834,10 @@ int sctp_register_af(struct sctp_af *af)
81593 return 0;
81594 }
81595
81596 - INIT_LIST_HEAD(&af->list);
81597 - list_add_tail(&af->list, &sctp_address_families);
81598 + pax_open_kernel();
81599 + INIT_LIST_HEAD((struct list_head *)&af->list);
81600 + list_add_tail((struct list_head *)&af->list, &sctp_address_families);
81601 + pax_close_kernel();
81602 return 1;
81603 }
81604
81605 @@ -1122,7 +1124,9 @@ static void sctp_v4_pf_init(void)
81606
81607 static void sctp_v4_pf_exit(void)
81608 {
81609 - list_del(&sctp_af_inet.list);
81610 + pax_open_kernel();
81611 + list_del((struct list_head *)&sctp_af_inet.list);
81612 + pax_close_kernel();
81613 }
81614
81615 static int sctp_v4_protosw_init(void)
81616 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
81617 index 406d957..543c737 100644
81618 --- a/net/sctp/socket.c
81619 +++ b/net/sctp/socket.c
81620 @@ -4661,6 +4661,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
81621 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
81622 if (space_left < addrlen)
81623 return -ENOMEM;
81624 + if (addrlen > sizeof(temp) || addrlen < 0)
81625 + return -EFAULT;
81626 if (copy_to_user(to, &temp, addrlen))
81627 return -EFAULT;
81628 to += addrlen;
81629 diff --git a/net/socket.c b/net/socket.c
81630 index d92c490..b4bc863 100644
81631 --- a/net/socket.c
81632 +++ b/net/socket.c
81633 @@ -89,6 +89,7 @@
81634 #include <linux/magic.h>
81635 #include <linux/slab.h>
81636 #include <linux/xattr.h>
81637 +#include <linux/in.h>
81638
81639 #include <asm/uaccess.h>
81640 #include <asm/unistd.h>
81641 @@ -106,6 +107,8 @@
81642 #include <linux/sockios.h>
81643 #include <linux/atalk.h>
81644
81645 +#include <linux/grsock.h>
81646 +
81647 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
81648 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
81649 unsigned long nr_segs, loff_t pos);
81650 @@ -322,7 +325,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
81651 &sockfs_dentry_operations, SOCKFS_MAGIC);
81652 }
81653
81654 -static struct vfsmount *sock_mnt __read_mostly;
81655 +struct vfsmount *sock_mnt __read_mostly;
81656
81657 static struct file_system_type sock_fs_type = {
81658 .name = "sockfs",
81659 @@ -1276,6 +1279,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
81660 return -EAFNOSUPPORT;
81661 if (type < 0 || type >= SOCK_MAX)
81662 return -EINVAL;
81663 + if (protocol < 0)
81664 + return -EINVAL;
81665
81666 /* Compatibility.
81667
81668 @@ -1407,6 +1412,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
81669 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
81670 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
81671
81672 + if(!gr_search_socket(family, type, protocol)) {
81673 + retval = -EACCES;
81674 + goto out;
81675 + }
81676 +
81677 + if (gr_handle_sock_all(family, type, protocol)) {
81678 + retval = -EACCES;
81679 + goto out;
81680 + }
81681 +
81682 retval = sock_create(family, type, protocol, &sock);
81683 if (retval < 0)
81684 goto out;
81685 @@ -1534,6 +1549,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
81686 if (sock) {
81687 err = move_addr_to_kernel(umyaddr, addrlen, &address);
81688 if (err >= 0) {
81689 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
81690 + err = -EACCES;
81691 + goto error;
81692 + }
81693 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
81694 + if (err)
81695 + goto error;
81696 +
81697 err = security_socket_bind(sock,
81698 (struct sockaddr *)&address,
81699 addrlen);
81700 @@ -1542,6 +1565,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
81701 (struct sockaddr *)
81702 &address, addrlen);
81703 }
81704 +error:
81705 fput_light(sock->file, fput_needed);
81706 }
81707 return err;
81708 @@ -1565,10 +1589,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
81709 if ((unsigned int)backlog > somaxconn)
81710 backlog = somaxconn;
81711
81712 + if (gr_handle_sock_server_other(sock->sk)) {
81713 + err = -EPERM;
81714 + goto error;
81715 + }
81716 +
81717 + err = gr_search_listen(sock);
81718 + if (err)
81719 + goto error;
81720 +
81721 err = security_socket_listen(sock, backlog);
81722 if (!err)
81723 err = sock->ops->listen(sock, backlog);
81724
81725 +error:
81726 fput_light(sock->file, fput_needed);
81727 }
81728 return err;
81729 @@ -1612,6 +1646,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
81730 newsock->type = sock->type;
81731 newsock->ops = sock->ops;
81732
81733 + if (gr_handle_sock_server_other(sock->sk)) {
81734 + err = -EPERM;
81735 + sock_release(newsock);
81736 + goto out_put;
81737 + }
81738 +
81739 + err = gr_search_accept(sock);
81740 + if (err) {
81741 + sock_release(newsock);
81742 + goto out_put;
81743 + }
81744 +
81745 /*
81746 * We don't need try_module_get here, as the listening socket (sock)
81747 * has the protocol module (sock->ops->owner) held.
81748 @@ -1657,6 +1703,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
81749 fd_install(newfd, newfile);
81750 err = newfd;
81751
81752 + gr_attach_curr_ip(newsock->sk);
81753 +
81754 out_put:
81755 fput_light(sock->file, fput_needed);
81756 out:
81757 @@ -1689,6 +1737,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
81758 int, addrlen)
81759 {
81760 struct socket *sock;
81761 + struct sockaddr *sck;
81762 struct sockaddr_storage address;
81763 int err, fput_needed;
81764
81765 @@ -1699,6 +1748,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
81766 if (err < 0)
81767 goto out_put;
81768
81769 + sck = (struct sockaddr *)&address;
81770 +
81771 + if (gr_handle_sock_client(sck)) {
81772 + err = -EACCES;
81773 + goto out_put;
81774 + }
81775 +
81776 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
81777 + if (err)
81778 + goto out_put;
81779 +
81780 err =
81781 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
81782 if (err)
81783 @@ -2053,7 +2113,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
81784 * checking falls down on this.
81785 */
81786 if (copy_from_user(ctl_buf,
81787 - (void __user __force *)msg_sys->msg_control,
81788 + (void __force_user *)msg_sys->msg_control,
81789 ctl_len))
81790 goto out_freectl;
81791 msg_sys->msg_control = ctl_buf;
81792 @@ -2221,7 +2281,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
81793 * kernel msghdr to use the kernel address space)
81794 */
81795
81796 - uaddr = (__force void __user *)msg_sys->msg_name;
81797 + uaddr = (void __force_user *)msg_sys->msg_name;
81798 uaddr_len = COMPAT_NAMELEN(msg);
81799 if (MSG_CMSG_COMPAT & flags) {
81800 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
81801 @@ -2844,7 +2904,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
81802 }
81803
81804 ifr = compat_alloc_user_space(buf_size);
81805 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
81806 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
81807
81808 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
81809 return -EFAULT;
81810 @@ -2868,12 +2928,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
81811 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
81812
81813 if (copy_in_user(rxnfc, compat_rxnfc,
81814 - (void *)(&rxnfc->fs.m_ext + 1) -
81815 - (void *)rxnfc) ||
81816 + (void __user *)(&rxnfc->fs.m_ext + 1) -
81817 + (void __user *)rxnfc) ||
81818 copy_in_user(&rxnfc->fs.ring_cookie,
81819 &compat_rxnfc->fs.ring_cookie,
81820 - (void *)(&rxnfc->fs.location + 1) -
81821 - (void *)&rxnfc->fs.ring_cookie) ||
81822 + (void __user *)(&rxnfc->fs.location + 1) -
81823 + (void __user *)&rxnfc->fs.ring_cookie) ||
81824 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
81825 sizeof(rxnfc->rule_cnt)))
81826 return -EFAULT;
81827 @@ -2885,12 +2945,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
81828
81829 if (convert_out) {
81830 if (copy_in_user(compat_rxnfc, rxnfc,
81831 - (const void *)(&rxnfc->fs.m_ext + 1) -
81832 - (const void *)rxnfc) ||
81833 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
81834 + (const void __user *)rxnfc) ||
81835 copy_in_user(&compat_rxnfc->fs.ring_cookie,
81836 &rxnfc->fs.ring_cookie,
81837 - (const void *)(&rxnfc->fs.location + 1) -
81838 - (const void *)&rxnfc->fs.ring_cookie) ||
81839 + (const void __user *)(&rxnfc->fs.location + 1) -
81840 + (const void __user *)&rxnfc->fs.ring_cookie) ||
81841 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
81842 sizeof(rxnfc->rule_cnt)))
81843 return -EFAULT;
81844 @@ -2960,7 +3020,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
81845 old_fs = get_fs();
81846 set_fs(KERNEL_DS);
81847 err = dev_ioctl(net, cmd,
81848 - (struct ifreq __user __force *) &kifr);
81849 + (struct ifreq __force_user *) &kifr);
81850 set_fs(old_fs);
81851
81852 return err;
81853 @@ -3069,7 +3129,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
81854
81855 old_fs = get_fs();
81856 set_fs(KERNEL_DS);
81857 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
81858 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
81859 set_fs(old_fs);
81860
81861 if (cmd == SIOCGIFMAP && !err) {
81862 @@ -3174,7 +3234,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
81863 ret |= __get_user(rtdev, &(ur4->rt_dev));
81864 if (rtdev) {
81865 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
81866 - r4.rt_dev = (char __user __force *)devname;
81867 + r4.rt_dev = (char __force_user *)devname;
81868 devname[15] = 0;
81869 } else
81870 r4.rt_dev = NULL;
81871 @@ -3400,8 +3460,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
81872 int __user *uoptlen;
81873 int err;
81874
81875 - uoptval = (char __user __force *) optval;
81876 - uoptlen = (int __user __force *) optlen;
81877 + uoptval = (char __force_user *) optval;
81878 + uoptlen = (int __force_user *) optlen;
81879
81880 set_fs(KERNEL_DS);
81881 if (level == SOL_SOCKET)
81882 @@ -3421,7 +3481,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
81883 char __user *uoptval;
81884 int err;
81885
81886 - uoptval = (char __user __force *) optval;
81887 + uoptval = (char __force_user *) optval;
81888
81889 set_fs(KERNEL_DS);
81890 if (level == SOL_SOCKET)
81891 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
81892 index 6357fcb..244c7db 100644
81893 --- a/net/sunrpc/sched.c
81894 +++ b/net/sunrpc/sched.c
81895 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
81896 #ifdef RPC_DEBUG
81897 static void rpc_task_set_debuginfo(struct rpc_task *task)
81898 {
81899 - static atomic_t rpc_pid;
81900 + static atomic_unchecked_t rpc_pid;
81901
81902 - task->tk_pid = atomic_inc_return(&rpc_pid);
81903 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
81904 }
81905 #else
81906 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
81907 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
81908 index 8343737..677025e 100644
81909 --- a/net/sunrpc/xprtrdma/svc_rdma.c
81910 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
81911 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
81912 static unsigned int min_max_inline = 4096;
81913 static unsigned int max_max_inline = 65536;
81914
81915 -atomic_t rdma_stat_recv;
81916 -atomic_t rdma_stat_read;
81917 -atomic_t rdma_stat_write;
81918 -atomic_t rdma_stat_sq_starve;
81919 -atomic_t rdma_stat_rq_starve;
81920 -atomic_t rdma_stat_rq_poll;
81921 -atomic_t rdma_stat_rq_prod;
81922 -atomic_t rdma_stat_sq_poll;
81923 -atomic_t rdma_stat_sq_prod;
81924 +atomic_unchecked_t rdma_stat_recv;
81925 +atomic_unchecked_t rdma_stat_read;
81926 +atomic_unchecked_t rdma_stat_write;
81927 +atomic_unchecked_t rdma_stat_sq_starve;
81928 +atomic_unchecked_t rdma_stat_rq_starve;
81929 +atomic_unchecked_t rdma_stat_rq_poll;
81930 +atomic_unchecked_t rdma_stat_rq_prod;
81931 +atomic_unchecked_t rdma_stat_sq_poll;
81932 +atomic_unchecked_t rdma_stat_sq_prod;
81933
81934 /* Temporary NFS request map and context caches */
81935 struct kmem_cache *svc_rdma_map_cachep;
81936 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
81937 len -= *ppos;
81938 if (len > *lenp)
81939 len = *lenp;
81940 - if (len && copy_to_user(buffer, str_buf, len))
81941 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
81942 return -EFAULT;
81943 *lenp = len;
81944 *ppos += len;
81945 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
81946 {
81947 .procname = "rdma_stat_read",
81948 .data = &rdma_stat_read,
81949 - .maxlen = sizeof(atomic_t),
81950 + .maxlen = sizeof(atomic_unchecked_t),
81951 .mode = 0644,
81952 .proc_handler = read_reset_stat,
81953 },
81954 {
81955 .procname = "rdma_stat_recv",
81956 .data = &rdma_stat_recv,
81957 - .maxlen = sizeof(atomic_t),
81958 + .maxlen = sizeof(atomic_unchecked_t),
81959 .mode = 0644,
81960 .proc_handler = read_reset_stat,
81961 },
81962 {
81963 .procname = "rdma_stat_write",
81964 .data = &rdma_stat_write,
81965 - .maxlen = sizeof(atomic_t),
81966 + .maxlen = sizeof(atomic_unchecked_t),
81967 .mode = 0644,
81968 .proc_handler = read_reset_stat,
81969 },
81970 {
81971 .procname = "rdma_stat_sq_starve",
81972 .data = &rdma_stat_sq_starve,
81973 - .maxlen = sizeof(atomic_t),
81974 + .maxlen = sizeof(atomic_unchecked_t),
81975 .mode = 0644,
81976 .proc_handler = read_reset_stat,
81977 },
81978 {
81979 .procname = "rdma_stat_rq_starve",
81980 .data = &rdma_stat_rq_starve,
81981 - .maxlen = sizeof(atomic_t),
81982 + .maxlen = sizeof(atomic_unchecked_t),
81983 .mode = 0644,
81984 .proc_handler = read_reset_stat,
81985 },
81986 {
81987 .procname = "rdma_stat_rq_poll",
81988 .data = &rdma_stat_rq_poll,
81989 - .maxlen = sizeof(atomic_t),
81990 + .maxlen = sizeof(atomic_unchecked_t),
81991 .mode = 0644,
81992 .proc_handler = read_reset_stat,
81993 },
81994 {
81995 .procname = "rdma_stat_rq_prod",
81996 .data = &rdma_stat_rq_prod,
81997 - .maxlen = sizeof(atomic_t),
81998 + .maxlen = sizeof(atomic_unchecked_t),
81999 .mode = 0644,
82000 .proc_handler = read_reset_stat,
82001 },
82002 {
82003 .procname = "rdma_stat_sq_poll",
82004 .data = &rdma_stat_sq_poll,
82005 - .maxlen = sizeof(atomic_t),
82006 + .maxlen = sizeof(atomic_unchecked_t),
82007 .mode = 0644,
82008 .proc_handler = read_reset_stat,
82009 },
82010 {
82011 .procname = "rdma_stat_sq_prod",
82012 .data = &rdma_stat_sq_prod,
82013 - .maxlen = sizeof(atomic_t),
82014 + .maxlen = sizeof(atomic_unchecked_t),
82015 .mode = 0644,
82016 .proc_handler = read_reset_stat,
82017 },
82018 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
82019 index 41cb63b..c4a1489 100644
82020 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
82021 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
82022 @@ -501,7 +501,7 @@ next_sge:
82023 svc_rdma_put_context(ctxt, 0);
82024 goto out;
82025 }
82026 - atomic_inc(&rdma_stat_read);
82027 + atomic_inc_unchecked(&rdma_stat_read);
82028
82029 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
82030 chl_map->ch[ch_no].count -= read_wr.num_sge;
82031 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
82032 dto_q);
82033 list_del_init(&ctxt->dto_q);
82034 } else {
82035 - atomic_inc(&rdma_stat_rq_starve);
82036 + atomic_inc_unchecked(&rdma_stat_rq_starve);
82037 clear_bit(XPT_DATA, &xprt->xpt_flags);
82038 ctxt = NULL;
82039 }
82040 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
82041 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
82042 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
82043 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
82044 - atomic_inc(&rdma_stat_recv);
82045 + atomic_inc_unchecked(&rdma_stat_recv);
82046
82047 /* Build up the XDR from the receive buffers. */
82048 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
82049 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
82050 index 42eb7ba..c887c45 100644
82051 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
82052 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
82053 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
82054 write_wr.wr.rdma.remote_addr = to;
82055
82056 /* Post It */
82057 - atomic_inc(&rdma_stat_write);
82058 + atomic_inc_unchecked(&rdma_stat_write);
82059 if (svc_rdma_send(xprt, &write_wr))
82060 goto err;
82061 return 0;
82062 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
82063 index 62e4f9b..dd3f2d7 100644
82064 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
82065 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
82066 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
82067 return;
82068
82069 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
82070 - atomic_inc(&rdma_stat_rq_poll);
82071 + atomic_inc_unchecked(&rdma_stat_rq_poll);
82072
82073 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
82074 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
82075 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
82076 }
82077
82078 if (ctxt)
82079 - atomic_inc(&rdma_stat_rq_prod);
82080 + atomic_inc_unchecked(&rdma_stat_rq_prod);
82081
82082 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
82083 /*
82084 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
82085 return;
82086
82087 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
82088 - atomic_inc(&rdma_stat_sq_poll);
82089 + atomic_inc_unchecked(&rdma_stat_sq_poll);
82090 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
82091 if (wc.status != IB_WC_SUCCESS)
82092 /* Close the transport */
82093 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
82094 }
82095
82096 if (ctxt)
82097 - atomic_inc(&rdma_stat_sq_prod);
82098 + atomic_inc_unchecked(&rdma_stat_sq_prod);
82099 }
82100
82101 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
82102 @@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
82103 spin_lock_bh(&xprt->sc_lock);
82104 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
82105 spin_unlock_bh(&xprt->sc_lock);
82106 - atomic_inc(&rdma_stat_sq_starve);
82107 + atomic_inc_unchecked(&rdma_stat_sq_starve);
82108
82109 /* See if we can opportunistically reap SQ WR to make room */
82110 sq_cq_reap(xprt);
82111 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
82112 index e3a6e37..be2ea77 100644
82113 --- a/net/sysctl_net.c
82114 +++ b/net/sysctl_net.c
82115 @@ -43,7 +43,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
82116 struct ctl_table *table)
82117 {
82118 /* Allow network administrator to have same access as root. */
82119 - if (capable(CAP_NET_ADMIN)) {
82120 + if (capable_nolog(CAP_NET_ADMIN)) {
82121 int mode = (table->mode >> 6) & 7;
82122 return (mode << 6) | (mode << 3) | mode;
82123 }
82124 diff --git a/net/tipc/link.c b/net/tipc/link.c
82125 index a79c755..eca357d 100644
82126 --- a/net/tipc/link.c
82127 +++ b/net/tipc/link.c
82128 @@ -1169,7 +1169,7 @@ static int link_send_sections_long(struct tipc_port *sender,
82129 struct tipc_msg fragm_hdr;
82130 struct sk_buff *buf, *buf_chain, *prev;
82131 u32 fragm_crs, fragm_rest, hsz, sect_rest;
82132 - const unchar *sect_crs;
82133 + const unchar __user *sect_crs;
82134 int curr_sect;
82135 u32 fragm_no;
82136
82137 @@ -1210,7 +1210,7 @@ again:
82138
82139 if (!sect_rest) {
82140 sect_rest = msg_sect[++curr_sect].iov_len;
82141 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
82142 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
82143 }
82144
82145 if (sect_rest < fragm_rest)
82146 @@ -1229,7 +1229,7 @@ error:
82147 }
82148 } else
82149 skb_copy_to_linear_data_offset(buf, fragm_crs,
82150 - sect_crs, sz);
82151 + (const void __force_kernel *)sect_crs, sz);
82152 sect_crs += sz;
82153 sect_rest -= sz;
82154 fragm_crs += sz;
82155 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
82156 index f2db8a8..9245aa4 100644
82157 --- a/net/tipc/msg.c
82158 +++ b/net/tipc/msg.c
82159 @@ -98,7 +98,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
82160 msg_sect[cnt].iov_len);
82161 else
82162 skb_copy_to_linear_data_offset(*buf, pos,
82163 - msg_sect[cnt].iov_base,
82164 + (const void __force_kernel *)msg_sect[cnt].iov_base,
82165 msg_sect[cnt].iov_len);
82166 pos += msg_sect[cnt].iov_len;
82167 }
82168 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
82169 index 0f7d0d0..00f89bf 100644
82170 --- a/net/tipc/subscr.c
82171 +++ b/net/tipc/subscr.c
82172 @@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
82173 {
82174 struct iovec msg_sect;
82175
82176 - msg_sect.iov_base = (void *)&sub->evt;
82177 + msg_sect.iov_base = (void __force_user *)&sub->evt;
82178 msg_sect.iov_len = sizeof(struct tipc_event);
82179
82180 sub->evt.event = htohl(event, sub->swap);
82181 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
82182 index 5b5c876..3127bf7 100644
82183 --- a/net/unix/af_unix.c
82184 +++ b/net/unix/af_unix.c
82185 @@ -786,6 +786,12 @@ static struct sock *unix_find_other(struct net *net,
82186 err = -ECONNREFUSED;
82187 if (!S_ISSOCK(inode->i_mode))
82188 goto put_fail;
82189 +
82190 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
82191 + err = -EACCES;
82192 + goto put_fail;
82193 + }
82194 +
82195 u = unix_find_socket_byinode(inode);
82196 if (!u)
82197 goto put_fail;
82198 @@ -806,6 +812,13 @@ static struct sock *unix_find_other(struct net *net,
82199 if (u) {
82200 struct dentry *dentry;
82201 dentry = unix_sk(u)->path.dentry;
82202 +
82203 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
82204 + err = -EPERM;
82205 + sock_put(u);
82206 + goto fail;
82207 + }
82208 +
82209 if (dentry)
82210 touch_atime(&unix_sk(u)->path);
82211 } else
82212 @@ -839,12 +852,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
82213 */
82214 err = security_path_mknod(&path, dentry, mode, 0);
82215 if (!err) {
82216 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
82217 + err = -EACCES;
82218 + goto out;
82219 + }
82220 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
82221 if (!err) {
82222 res->mnt = mntget(path.mnt);
82223 res->dentry = dget(dentry);
82224 + gr_handle_create(dentry, path.mnt);
82225 }
82226 }
82227 +out:
82228 done_path_create(&path, dentry);
82229 return err;
82230 }
82231 diff --git a/net/wireless/core.h b/net/wireless/core.h
82232 index a343be4..0c6e081 100644
82233 --- a/net/wireless/core.h
82234 +++ b/net/wireless/core.h
82235 @@ -28,7 +28,7 @@ struct cfg80211_registered_device {
82236 struct mutex mtx;
82237
82238 /* rfkill support */
82239 - struct rfkill_ops rfkill_ops;
82240 + rfkill_ops_no_const rfkill_ops;
82241 struct rfkill *rfkill;
82242 struct work_struct rfkill_sync;
82243
82244 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
82245 index c8717c1..08539f5 100644
82246 --- a/net/wireless/wext-core.c
82247 +++ b/net/wireless/wext-core.c
82248 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
82249 */
82250
82251 /* Support for very large requests */
82252 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
82253 - (user_length > descr->max_tokens)) {
82254 + if (user_length > descr->max_tokens) {
82255 /* Allow userspace to GET more than max so
82256 * we can support any size GET requests.
82257 * There is still a limit : -ENOMEM.
82258 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
82259 }
82260 }
82261
82262 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
82263 - /*
82264 - * If this is a GET, but not NOMAX, it means that the extra
82265 - * data is not bounded by userspace, but by max_tokens. Thus
82266 - * set the length to max_tokens. This matches the extra data
82267 - * allocation.
82268 - * The driver should fill it with the number of tokens it
82269 - * provided, and it may check iwp->length rather than having
82270 - * knowledge of max_tokens. If the driver doesn't change the
82271 - * iwp->length, this ioctl just copies back max_token tokens
82272 - * filled with zeroes. Hopefully the driver isn't claiming
82273 - * them to be valid data.
82274 - */
82275 - iwp->length = descr->max_tokens;
82276 - }
82277 -
82278 err = handler(dev, info, (union iwreq_data *) iwp, extra);
82279
82280 iwp->length += essid_compat;
82281 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
82282 index 41eabc4..8d4e6d6 100644
82283 --- a/net/xfrm/xfrm_policy.c
82284 +++ b/net/xfrm/xfrm_policy.c
82285 @@ -317,7 +317,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
82286 {
82287 policy->walk.dead = 1;
82288
82289 - atomic_inc(&policy->genid);
82290 + atomic_inc_unchecked(&policy->genid);
82291
82292 if (del_timer(&policy->timer))
82293 xfrm_pol_put(policy);
82294 @@ -601,7 +601,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
82295 hlist_add_head(&policy->bydst, chain);
82296 xfrm_pol_hold(policy);
82297 net->xfrm.policy_count[dir]++;
82298 - atomic_inc(&flow_cache_genid);
82299 + atomic_inc_unchecked(&flow_cache_genid);
82300 rt_genid_bump(net);
82301 if (delpol)
82302 __xfrm_policy_unlink(delpol, dir);
82303 @@ -1550,7 +1550,7 @@ free_dst:
82304 goto out;
82305 }
82306
82307 -static int inline
82308 +static inline int
82309 xfrm_dst_alloc_copy(void **target, const void *src, int size)
82310 {
82311 if (!*target) {
82312 @@ -1562,7 +1562,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
82313 return 0;
82314 }
82315
82316 -static int inline
82317 +static inline int
82318 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
82319 {
82320 #ifdef CONFIG_XFRM_SUB_POLICY
82321 @@ -1574,7 +1574,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
82322 #endif
82323 }
82324
82325 -static int inline
82326 +static inline int
82327 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
82328 {
82329 #ifdef CONFIG_XFRM_SUB_POLICY
82330 @@ -1668,7 +1668,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
82331
82332 xdst->num_pols = num_pols;
82333 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
82334 - xdst->policy_genid = atomic_read(&pols[0]->genid);
82335 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
82336
82337 return xdst;
82338 }
82339 @@ -2369,7 +2369,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
82340 if (xdst->xfrm_genid != dst->xfrm->genid)
82341 return 0;
82342 if (xdst->num_pols > 0 &&
82343 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
82344 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
82345 return 0;
82346
82347 mtu = dst_mtu(dst->child);
82348 @@ -2896,7 +2896,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
82349 sizeof(pol->xfrm_vec[i].saddr));
82350 pol->xfrm_vec[i].encap_family = mp->new_family;
82351 /* flush bundles */
82352 - atomic_inc(&pol->genid);
82353 + atomic_inc_unchecked(&pol->genid);
82354 }
82355 }
82356
82357 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
82358 index 3459692..eefb515 100644
82359 --- a/net/xfrm/xfrm_state.c
82360 +++ b/net/xfrm/xfrm_state.c
82361 @@ -278,7 +278,9 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
82362 if (!try_module_get(afinfo->owner))
82363 goto out;
82364
82365 - mode->afinfo = afinfo;
82366 + pax_open_kernel();
82367 + *(void **)&mode->afinfo = afinfo;
82368 + pax_close_kernel();
82369 modemap[mode->encap] = mode;
82370 err = 0;
82371
82372 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
82373 index 0e801c3..5c8ad3b 100644
82374 --- a/scripts/Makefile.build
82375 +++ b/scripts/Makefile.build
82376 @@ -111,7 +111,7 @@ endif
82377 endif
82378
82379 # Do not include host rules unless needed
82380 -ifneq ($(hostprogs-y)$(hostprogs-m),)
82381 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
82382 include scripts/Makefile.host
82383 endif
82384
82385 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
82386 index 686cb0d..9d653bf 100644
82387 --- a/scripts/Makefile.clean
82388 +++ b/scripts/Makefile.clean
82389 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
82390 __clean-files := $(extra-y) $(always) \
82391 $(targets) $(clean-files) \
82392 $(host-progs) \
82393 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
82394 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
82395 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
82396
82397 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
82398
82399 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
82400 index 1ac414f..38575f7 100644
82401 --- a/scripts/Makefile.host
82402 +++ b/scripts/Makefile.host
82403 @@ -31,6 +31,8 @@
82404 # Note: Shared libraries consisting of C++ files are not supported
82405
82406 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
82407 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
82408 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
82409
82410 # C code
82411 # Executables compiled from a single .c file
82412 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
82413 # Shared libaries (only .c supported)
82414 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
82415 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
82416 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
82417 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
82418 # Remove .so files from "xxx-objs"
82419 host-cobjs := $(filter-out %.so,$(host-cobjs))
82420 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
82421
82422 -#Object (.o) files used by the shared libaries
82423 +# Object (.o) files used by the shared libaries
82424 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
82425 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
82426
82427 # output directory for programs/.o files
82428 # hostprogs-y := tools/build may have been specified. Retrieve directory
82429 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
82430 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
82431 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
82432 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
82433 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
82434 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
82435 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
82436 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
82437
82438 obj-dirs += $(host-objdirs)
82439 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
82440 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
82441 $(call if_changed_dep,host-cshobjs)
82442
82443 +# Compile .c file, create position independent .o file
82444 +# host-cxxshobjs -> .o
82445 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
82446 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
82447 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
82448 + $(call if_changed_dep,host-cxxshobjs)
82449 +
82450 # Link a shared library, based on position independent .o files
82451 # *.o -> .so shared library (host-cshlib)
82452 quiet_cmd_host-cshlib = HOSTLLD -shared $@
82453 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
82454 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
82455 $(call if_changed,host-cshlib)
82456
82457 +# Link a shared library, based on position independent .o files
82458 +# *.o -> .so shared library (host-cxxshlib)
82459 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
82460 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
82461 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
82462 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
82463 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
82464 + $(call if_changed,host-cxxshlib)
82465 +
82466 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
82467 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
82468 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
82469
82470 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
82471 index cb1f50c..cef2a7c 100644
82472 --- a/scripts/basic/fixdep.c
82473 +++ b/scripts/basic/fixdep.c
82474 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
82475 /*
82476 * Lookup a value in the configuration string.
82477 */
82478 -static int is_defined_config(const char *name, int len, unsigned int hash)
82479 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
82480 {
82481 struct item *aux;
82482
82483 @@ -211,10 +211,10 @@ static void clear_config(void)
82484 /*
82485 * Record the use of a CONFIG_* word.
82486 */
82487 -static void use_config(const char *m, int slen)
82488 +static void use_config(const char *m, unsigned int slen)
82489 {
82490 unsigned int hash = strhash(m, slen);
82491 - int c, i;
82492 + unsigned int c, i;
82493
82494 if (is_defined_config(m, slen, hash))
82495 return;
82496 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
82497
82498 static void parse_config_file(const char *map, size_t len)
82499 {
82500 - const int *end = (const int *) (map + len);
82501 + const unsigned int *end = (const unsigned int *) (map + len);
82502 /* start at +1, so that p can never be < map */
82503 - const int *m = (const int *) map + 1;
82504 + const unsigned int *m = (const unsigned int *) map + 1;
82505 const char *p, *q;
82506
82507 for (; m < end; m++) {
82508 @@ -406,7 +406,7 @@ static void print_deps(void)
82509 static void traps(void)
82510 {
82511 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
82512 - int *p = (int *)test;
82513 + unsigned int *p = (unsigned int *)test;
82514
82515 if (*p != INT_CONF) {
82516 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
82517 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
82518 new file mode 100644
82519 index 0000000..008ac1a
82520 --- /dev/null
82521 +++ b/scripts/gcc-plugin.sh
82522 @@ -0,0 +1,17 @@
82523 +#!/bin/bash
82524 +plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
82525 +#include "gcc-plugin.h"
82526 +#include "tree.h"
82527 +#include "tm.h"
82528 +#include "rtl.h"
82529 +#ifdef ENABLE_BUILD_WITH_CXX
82530 +#warning $2
82531 +#else
82532 +#warning $1
82533 +#endif
82534 +EOF`
82535 +if [ $? -eq 0 ]
82536 +then
82537 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
82538 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
82539 +fi
82540 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
82541 index b3d907e..a4782ab 100644
82542 --- a/scripts/link-vmlinux.sh
82543 +++ b/scripts/link-vmlinux.sh
82544 @@ -152,7 +152,7 @@ else
82545 fi;
82546
82547 # final build of init/
82548 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
82549 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
82550
82551 kallsymso=""
82552 kallsyms_vmlinux=""
82553 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
82554 index df4fc23..0ea719d 100644
82555 --- a/scripts/mod/file2alias.c
82556 +++ b/scripts/mod/file2alias.c
82557 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
82558 unsigned long size, unsigned long id_size,
82559 void *symval)
82560 {
82561 - int i;
82562 + unsigned int i;
82563
82564 if (size % id_size || size < id_size) {
82565 if (cross_build != 0)
82566 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
82567 /* USB is special because the bcdDevice can be matched against a numeric range */
82568 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
82569 static void do_usb_entry(struct usb_device_id *id,
82570 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
82571 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
82572 unsigned char range_lo, unsigned char range_hi,
82573 unsigned char max, struct module *mod)
82574 {
82575 @@ -262,7 +262,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
82576 {
82577 unsigned int devlo, devhi;
82578 unsigned char chi, clo, max;
82579 - int ndigits;
82580 + unsigned int ndigits;
82581
82582 id->match_flags = TO_NATIVE(id->match_flags);
82583 id->idVendor = TO_NATIVE(id->idVendor);
82584 @@ -507,7 +507,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
82585 for (i = 0; i < count; i++) {
82586 const char *id = (char *)devs[i].id;
82587 char acpi_id[sizeof(devs[0].id)];
82588 - int j;
82589 + unsigned int j;
82590
82591 buf_printf(&mod->dev_table_buf,
82592 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
82593 @@ -537,7 +537,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
82594
82595 for (j = 0; j < PNP_MAX_DEVICES; j++) {
82596 const char *id = (char *)card->devs[j].id;
82597 - int i2, j2;
82598 + unsigned int i2, j2;
82599 int dup = 0;
82600
82601 if (!id[0])
82602 @@ -563,7 +563,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
82603 /* add an individual alias for every device entry */
82604 if (!dup) {
82605 char acpi_id[sizeof(card->devs[0].id)];
82606 - int k;
82607 + unsigned int k;
82608
82609 buf_printf(&mod->dev_table_buf,
82610 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
82611 @@ -888,7 +888,7 @@ static void dmi_ascii_filter(char *d, const char *s)
82612 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
82613 char *alias)
82614 {
82615 - int i, j;
82616 + unsigned int i, j;
82617
82618 sprintf(alias, "dmi*");
82619
82620 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
82621 index 0d93856..e828363 100644
82622 --- a/scripts/mod/modpost.c
82623 +++ b/scripts/mod/modpost.c
82624 @@ -933,6 +933,7 @@ enum mismatch {
82625 ANY_INIT_TO_ANY_EXIT,
82626 ANY_EXIT_TO_ANY_INIT,
82627 EXPORT_TO_INIT_EXIT,
82628 + DATA_TO_TEXT
82629 };
82630
82631 struct sectioncheck {
82632 @@ -1047,6 +1048,12 @@ const struct sectioncheck sectioncheck[] = {
82633 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
82634 .mismatch = EXPORT_TO_INIT_EXIT,
82635 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
82636 +},
82637 +/* Do not reference code from writable data */
82638 +{
82639 + .fromsec = { DATA_SECTIONS, NULL },
82640 + .tosec = { TEXT_SECTIONS, NULL },
82641 + .mismatch = DATA_TO_TEXT
82642 }
82643 };
82644
82645 @@ -1169,10 +1176,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
82646 continue;
82647 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
82648 continue;
82649 - if (sym->st_value == addr)
82650 - return sym;
82651 /* Find a symbol nearby - addr are maybe negative */
82652 d = sym->st_value - addr;
82653 + if (d == 0)
82654 + return sym;
82655 if (d < 0)
82656 d = addr - sym->st_value;
82657 if (d < distance) {
82658 @@ -1451,6 +1458,14 @@ static void report_sec_mismatch(const char *modname,
82659 tosym, prl_to, prl_to, tosym);
82660 free(prl_to);
82661 break;
82662 + case DATA_TO_TEXT:
82663 +#if 0
82664 + fprintf(stderr,
82665 + "The %s %s:%s references\n"
82666 + "the %s %s:%s%s\n",
82667 + from, fromsec, fromsym, to, tosec, tosym, to_p);
82668 +#endif
82669 + break;
82670 }
82671 fprintf(stderr, "\n");
82672 }
82673 @@ -1685,7 +1700,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
82674 static void check_sec_ref(struct module *mod, const char *modname,
82675 struct elf_info *elf)
82676 {
82677 - int i;
82678 + unsigned int i;
82679 Elf_Shdr *sechdrs = elf->sechdrs;
82680
82681 /* Walk through all sections */
82682 @@ -1783,7 +1798,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
82683 va_end(ap);
82684 }
82685
82686 -void buf_write(struct buffer *buf, const char *s, int len)
82687 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
82688 {
82689 if (buf->size - buf->pos < len) {
82690 buf->size += len + SZ;
82691 @@ -2001,7 +2016,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
82692 if (fstat(fileno(file), &st) < 0)
82693 goto close_write;
82694
82695 - if (st.st_size != b->pos)
82696 + if (st.st_size != (off_t)b->pos)
82697 goto close_write;
82698
82699 tmp = NOFAIL(malloc(b->pos));
82700 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
82701 index 51207e4..f7d603d 100644
82702 --- a/scripts/mod/modpost.h
82703 +++ b/scripts/mod/modpost.h
82704 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
82705
82706 struct buffer {
82707 char *p;
82708 - int pos;
82709 - int size;
82710 + unsigned int pos;
82711 + unsigned int size;
82712 };
82713
82714 void __attribute__((format(printf, 2, 3)))
82715 buf_printf(struct buffer *buf, const char *fmt, ...);
82716
82717 void
82718 -buf_write(struct buffer *buf, const char *s, int len);
82719 +buf_write(struct buffer *buf, const char *s, unsigned int len);
82720
82721 struct module {
82722 struct module *next;
82723 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
82724 index 9dfcd6d..099068e 100644
82725 --- a/scripts/mod/sumversion.c
82726 +++ b/scripts/mod/sumversion.c
82727 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
82728 goto out;
82729 }
82730
82731 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
82732 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
82733 warn("writing sum in %s failed: %s\n",
82734 filename, strerror(errno));
82735 goto out;
82736 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
82737 index 5c11312..72742b5 100644
82738 --- a/scripts/pnmtologo.c
82739 +++ b/scripts/pnmtologo.c
82740 @@ -237,14 +237,14 @@ static void write_header(void)
82741 fprintf(out, " * Linux logo %s\n", logoname);
82742 fputs(" */\n\n", out);
82743 fputs("#include <linux/linux_logo.h>\n\n", out);
82744 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
82745 + fprintf(out, "static unsigned char %s_data[] = {\n",
82746 logoname);
82747 }
82748
82749 static void write_footer(void)
82750 {
82751 fputs("\n};\n\n", out);
82752 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
82753 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
82754 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
82755 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
82756 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
82757 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
82758 fputs("\n};\n\n", out);
82759
82760 /* write logo clut */
82761 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
82762 + fprintf(out, "static unsigned char %s_clut[] = {\n",
82763 logoname);
82764 write_hex_cnt = 0;
82765 for (i = 0; i < logo_clutsize; i++) {
82766 diff --git a/security/Kconfig b/security/Kconfig
82767 index e9c6ac7..a9bcf23 100644
82768 --- a/security/Kconfig
82769 +++ b/security/Kconfig
82770 @@ -4,6 +4,896 @@
82771
82772 menu "Security options"
82773
82774 +menu "Grsecurity"
82775 +
82776 + config ARCH_TRACK_EXEC_LIMIT
82777 + bool
82778 +
82779 + config PAX_KERNEXEC_PLUGIN
82780 + bool
82781 +
82782 + config PAX_PER_CPU_PGD
82783 + bool
82784 +
82785 + config TASK_SIZE_MAX_SHIFT
82786 + int
82787 + depends on X86_64
82788 + default 47 if !PAX_PER_CPU_PGD
82789 + default 42 if PAX_PER_CPU_PGD
82790 +
82791 + config PAX_ENABLE_PAE
82792 + bool
82793 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
82794 +
82795 + config PAX_USERCOPY_SLABS
82796 + bool
82797 +
82798 +config GRKERNSEC
82799 + bool "Grsecurity"
82800 + select CRYPTO
82801 + select CRYPTO_SHA256
82802 + select PROC_FS
82803 + select STOP_MACHINE
82804 + help
82805 + If you say Y here, you will be able to configure many features
82806 + that will enhance the security of your system. It is highly
82807 + recommended that you say Y here and read through the help
82808 + for each option so that you fully understand the features and
82809 + can evaluate their usefulness for your machine.
82810 +
82811 +choice
82812 + prompt "Configuration Method"
82813 + depends on GRKERNSEC
82814 + default GRKERNSEC_CONFIG_CUSTOM
82815 + help
82816 +
82817 +config GRKERNSEC_CONFIG_AUTO
82818 + bool "Automatic"
82819 + help
82820 + If you choose this configuration method, you'll be able to answer a small
82821 + number of simple questions about how you plan to use this kernel.
82822 + The settings of grsecurity and PaX will be automatically configured for
82823 + the highest commonly-used settings within the provided constraints.
82824 +
82825 + If you require additional configuration, custom changes can still be made
82826 + from the "custom configuration" menu.
82827 +
82828 +config GRKERNSEC_CONFIG_CUSTOM
82829 + bool "Custom"
82830 + help
82831 + If you choose this configuration method, you'll be able to configure all
82832 + grsecurity and PaX settings manually. Via this method, no options are
82833 + automatically enabled.
82834 +
82835 +endchoice
82836 +
82837 +choice
82838 + prompt "Usage Type"
82839 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
82840 + default GRKERNSEC_CONFIG_SERVER
82841 + help
82842 +
82843 +config GRKERNSEC_CONFIG_SERVER
82844 + bool "Server"
82845 + help
82846 + Choose this option if you plan to use this kernel on a server.
82847 +
82848 +config GRKERNSEC_CONFIG_DESKTOP
82849 + bool "Desktop"
82850 + help
82851 + Choose this option if you plan to use this kernel on a desktop.
82852 +
82853 +endchoice
82854 +
82855 +choice
82856 + prompt "Virtualization Type"
82857 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
82858 + default GRKERNSEC_CONFIG_VIRT_NONE
82859 + help
82860 +
82861 +config GRKERNSEC_CONFIG_VIRT_NONE
82862 + bool "None"
82863 + help
82864 + Choose this option if this kernel will be run on bare metal.
82865 +
82866 +config GRKERNSEC_CONFIG_VIRT_GUEST
82867 + bool "Guest"
82868 + help
82869 + Choose this option if this kernel will be run as a VM guest.
82870 +
82871 +config GRKERNSEC_CONFIG_VIRT_HOST
82872 + bool "Host"
82873 + help
82874 + Choose this option if this kernel will be run as a VM host.
82875 +
82876 +endchoice
82877 +
82878 +choice
82879 + prompt "Virtualization Hardware"
82880 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
82881 + help
82882 +
82883 +config GRKERNSEC_CONFIG_VIRT_EPT
82884 + bool "EPT/RVI Processor Support"
82885 + depends on X86
82886 + help
82887 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
82888 + hardware virtualization. This allows for additional kernel hardening protections
82889 + to operate without additional performance impact.
82890 +
82891 + To see if your Intel processor supports EPT, see:
82892 + http://ark.intel.com/Products/VirtualizationTechnology
82893 + (Most Core i3/5/7 support EPT)
82894 +
82895 + To see if your AMD processor supports RVI, see:
82896 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
82897 +
82898 +config GRKERNSEC_CONFIG_VIRT_SOFT
82899 + bool "First-gen/No Hardware Virtualization"
82900 + help
82901 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
82902 + support hardware virtualization or doesn't support the EPT/RVI extensions.
82903 +
82904 +endchoice
82905 +
82906 +choice
82907 + prompt "Virtualization Software"
82908 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
82909 + help
82910 +
82911 +config GRKERNSEC_CONFIG_VIRT_XEN
82912 + bool "Xen"
82913 + help
82914 + Choose this option if this kernel is running as a Xen guest or host.
82915 +
82916 +config GRKERNSEC_CONFIG_VIRT_VMWARE
82917 + bool "VMWare"
82918 + help
82919 + Choose this option if this kernel is running as a VMWare guest or host.
82920 +
82921 +config GRKERNSEC_CONFIG_VIRT_KVM
82922 + bool "KVM"
82923 + help
82924 + Choose this option if this kernel is running as a KVM guest or host.
82925 +
82926 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
82927 + bool "VirtualBox"
82928 + help
82929 + Choose this option if this kernel is running as a VirtualBox guest or host.
82930 +
82931 +endchoice
82932 +
82933 +choice
82934 + prompt "Required Priorities"
82935 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
82936 + default GRKERNSEC_CONFIG_PRIORITY_PERF
82937 + help
82938 +
82939 +config GRKERNSEC_CONFIG_PRIORITY_PERF
82940 + bool "Performance"
82941 + help
82942 + Choose this option if performance is of highest priority for this deployment
82943 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
82944 + and freed memory sanitizing will be disabled.
82945 +
82946 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
82947 + bool "Security"
82948 + help
82949 + Choose this option if security is of highest priority for this deployment of
82950 + grsecurity. UDEREF, kernel stack clearing, and freed memory sanitizing will
82951 + be enabled for this kernel. In a worst-case scenario, these features can
82952 + introduce a 20% performance hit (UDEREF on x64 contributing half of this hit).
82953 +
82954 +endchoice
82955 +
82956 +menu "Default Special Groups"
82957 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
82958 +
82959 +config GRKERNSEC_PROC_GID
82960 + int "GID exempted from /proc restrictions"
82961 + default 1001
82962 + help
82963 + Setting this GID determines which group will be exempted from
82964 + grsecurity's /proc restrictions, allowing users of the specified
82965 + group to view network statistics and the existence of other users'
82966 + processes on the system. This GID may also be chosen at boot time
82967 + via "grsec_proc_gid=" on the kernel commandline.
82968 +
82969 +config GRKERNSEC_TPE_UNTRUSTED_GID
82970 + int "GID for TPE-untrusted users"
82971 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
82972 + default 1005
82973 + help
82974 + Setting this GID determines which group untrusted users should
82975 + be added to. These users will be placed under grsecurity's Trusted Path
82976 + Execution mechanism, preventing them from executing their own binaries.
82977 + The users will only be able to execute binaries in directories owned and
82978 + writable only by the root user. If the sysctl option is enabled, a sysctl
82979 + option with name "tpe_gid" is created.
82980 +
82981 +config GRKERNSEC_TPE_TRUSTED_GID
82982 + int "GID for TPE-trusted users"
82983 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
82984 + default 1005
82985 + help
82986 + Setting this GID determines what group TPE restrictions will be
82987 + *disabled* for. If the sysctl option is enabled, a sysctl option
82988 + with name "tpe_gid" is created.
82989 +
82990 +config GRKERNSEC_SYMLINKOWN_GID
82991 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
82992 + depends on GRKERNSEC_CONFIG_SERVER
82993 + default 1006
82994 + help
82995 + Setting this GID determines what group kernel-enforced
82996 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
82997 + is enabled, a sysctl option with name "symlinkown_gid" is created.
82998 +
82999 +
83000 +endmenu
83001 +
83002 +menu "Customize Configuration"
83003 +depends on GRKERNSEC
83004 +
83005 +menu "PaX"
83006 +
83007 +config PAX
83008 + bool "Enable various PaX features"
83009 + default y if GRKERNSEC_CONFIG_AUTO
83010 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
83011 + help
83012 + This allows you to enable various PaX features. PaX adds
83013 + intrusion prevention mechanisms to the kernel that reduce
83014 + the risks posed by exploitable memory corruption bugs.
83015 +
83016 +menu "PaX Control"
83017 + depends on PAX
83018 +
83019 +config PAX_SOFTMODE
83020 + bool 'Support soft mode'
83021 + help
83022 + Enabling this option will allow you to run PaX in soft mode, that
83023 + is, PaX features will not be enforced by default, only on executables
83024 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
83025 + support as they are the only way to mark executables for soft mode use.
83026 +
83027 + Soft mode can be activated by using the "pax_softmode=1" kernel command
83028 + line option on boot. Furthermore you can control various PaX features
83029 + at runtime via the entries in /proc/sys/kernel/pax.
83030 +
83031 +config PAX_EI_PAX
83032 + bool 'Use legacy ELF header marking'
83033 + default y if GRKERNSEC_CONFIG_AUTO
83034 + help
83035 + Enabling this option will allow you to control PaX features on
83036 + a per executable basis via the 'chpax' utility available at
83037 + http://pax.grsecurity.net/. The control flags will be read from
83038 + an otherwise reserved part of the ELF header. This marking has
83039 + numerous drawbacks (no support for soft-mode, toolchain does not
83040 + know about the non-standard use of the ELF header) therefore it
83041 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
83042 + support.
83043 +
83044 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
83045 + support as well, they will override the legacy EI_PAX marks.
83046 +
83047 + If you enable none of the marking options then all applications
83048 + will run with PaX enabled on them by default.
83049 +
83050 +config PAX_PT_PAX_FLAGS
83051 + bool 'Use ELF program header marking'
83052 + default y if GRKERNSEC_CONFIG_AUTO
83053 + help
83054 + Enabling this option will allow you to control PaX features on
83055 + a per executable basis via the 'paxctl' utility available at
83056 + http://pax.grsecurity.net/. The control flags will be read from
83057 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
83058 + has the benefits of supporting both soft mode and being fully
83059 + integrated into the toolchain (the binutils patch is available
83060 + from http://pax.grsecurity.net).
83061 +
83062 + Note that if you enable the legacy EI_PAX marking support as well,
83063 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
83064 +
83065 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
83066 + must make sure that the marks are the same if a binary has both marks.
83067 +
83068 + If you enable none of the marking options then all applications
83069 + will run with PaX enabled on them by default.
83070 +
83071 +config PAX_XATTR_PAX_FLAGS
83072 + bool 'Use filesystem extended attributes marking'
83073 + default y if GRKERNSEC_CONFIG_AUTO
83074 + select CIFS_XATTR if CIFS
83075 + select EXT2_FS_XATTR if EXT2_FS
83076 + select EXT3_FS_XATTR if EXT3_FS
83077 + select EXT4_FS_XATTR if EXT4_FS
83078 + select JFFS2_FS_XATTR if JFFS2_FS
83079 + select REISERFS_FS_XATTR if REISERFS_FS
83080 + select SQUASHFS_XATTR if SQUASHFS
83081 + select TMPFS_XATTR if TMPFS
83082 + select UBIFS_FS_XATTR if UBIFS_FS
83083 + help
83084 + Enabling this option will allow you to control PaX features on
83085 + a per executable basis via the 'setfattr' utility. The control
83086 + flags will be read from the user.pax.flags extended attribute of
83087 + the file. This marking has the benefit of supporting binary-only
83088 + applications that self-check themselves (e.g., skype) and would
83089 + not tolerate chpax/paxctl changes. The main drawback is that
83090 + extended attributes are not supported by some filesystems (e.g.,
83091 + isofs, udf, vfat) so copying files through such filesystems will
83092 + lose the extended attributes and these PaX markings.
83093 +
83094 + Note that if you enable the legacy EI_PAX marking support as well,
83095 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
83096 +
83097 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
83098 + must make sure that the marks are the same if a binary has both marks.
83099 +
83100 + If you enable none of the marking options then all applications
83101 + will run with PaX enabled on them by default.
83102 +
83103 +choice
83104 + prompt 'MAC system integration'
83105 + default PAX_HAVE_ACL_FLAGS
83106 + help
83107 + Mandatory Access Control systems have the option of controlling
83108 + PaX flags on a per executable basis, choose the method supported
83109 + by your particular system.
83110 +
83111 + - "none": if your MAC system does not interact with PaX,
83112 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
83113 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
83114 +
83115 + NOTE: this option is for developers/integrators only.
83116 +
83117 + config PAX_NO_ACL_FLAGS
83118 + bool 'none'
83119 +
83120 + config PAX_HAVE_ACL_FLAGS
83121 + bool 'direct'
83122 +
83123 + config PAX_HOOK_ACL_FLAGS
83124 + bool 'hook'
83125 +endchoice
83126 +
83127 +endmenu
83128 +
83129 +menu "Non-executable pages"
83130 + depends on PAX
83131 +
83132 +config PAX_NOEXEC
83133 + bool "Enforce non-executable pages"
83134 + default y if GRKERNSEC_CONFIG_AUTO
83135 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
83136 + help
83137 + By design some architectures do not allow for protecting memory
83138 + pages against execution or even if they do, Linux does not make
83139 + use of this feature. In practice this means that if a page is
83140 + readable (such as the stack or heap) it is also executable.
83141 +
83142 + There is a well known exploit technique that makes use of this
83143 + fact and a common programming mistake where an attacker can
83144 + introduce code of his choice somewhere in the attacked program's
83145 + memory (typically the stack or the heap) and then execute it.
83146 +
83147 + If the attacked program was running with different (typically
83148 + higher) privileges than that of the attacker, then he can elevate
83149 + his own privilege level (e.g. get a root shell, write to files for
83150 + which he does not have write access to, etc).
83151 +
83152 + Enabling this option will let you choose from various features
83153 + that prevent the injection and execution of 'foreign' code in
83154 + a program.
83155 +
83156 + This will also break programs that rely on the old behaviour and
83157 + expect that dynamically allocated memory via the malloc() family
83158 + of functions is executable (which it is not). Notable examples
83159 + are the XFree86 4.x server, the java runtime and wine.
83160 +
83161 +config PAX_PAGEEXEC
83162 + bool "Paging based non-executable pages"
83163 + default y if GRKERNSEC_CONFIG_AUTO
83164 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
83165 + select S390_SWITCH_AMODE if S390
83166 + select S390_EXEC_PROTECT if S390
83167 + select ARCH_TRACK_EXEC_LIMIT if X86_32
83168 + help
83169 + This implementation is based on the paging feature of the CPU.
83170 + On i386 without hardware non-executable bit support there is a
83171 + variable but usually low performance impact, however on Intel's
83172 + P4 core based CPUs it is very high so you should not enable this
83173 + for kernels meant to be used on such CPUs.
83174 +
83175 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
83176 + with hardware non-executable bit support there is no performance
83177 + impact, on ppc the impact is negligible.
83178 +
83179 + Note that several architectures require various emulations due to
83180 + badly designed userland ABIs, this will cause a performance impact
83181 + but will disappear as soon as userland is fixed. For example, ppc
83182 + userland MUST have been built with secure-plt by a recent toolchain.
83183 +
83184 +config PAX_SEGMEXEC
83185 + bool "Segmentation based non-executable pages"
83186 + default y if GRKERNSEC_CONFIG_AUTO
83187 + depends on PAX_NOEXEC && X86_32
83188 + help
83189 + This implementation is based on the segmentation feature of the
83190 + CPU and has a very small performance impact, however applications
83191 + will be limited to a 1.5 GB address space instead of the normal
83192 + 3 GB.
83193 +
83194 +config PAX_EMUTRAMP
83195 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
83196 + default y if PARISC
83197 + help
83198 + There are some programs and libraries that for one reason or
83199 + another attempt to execute special small code snippets from
83200 + non-executable memory pages. Most notable examples are the
83201 + signal handler return code generated by the kernel itself and
83202 + the GCC trampolines.
83203 +
83204 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
83205 + such programs will no longer work under your kernel.
83206 +
83207 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
83208 + utilities to enable trampoline emulation for the affected programs
83209 + yet still have the protection provided by the non-executable pages.
83210 +
83211 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
83212 + your system will not even boot.
83213 +
83214 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
83215 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
83216 + for the affected files.
83217 +
83218 + NOTE: enabling this feature *may* open up a loophole in the
83219 + protection provided by non-executable pages that an attacker
83220 + could abuse. Therefore the best solution is to not have any
83221 + files on your system that would require this option. This can
83222 + be achieved by not using libc5 (which relies on the kernel
83223 + signal handler return code) and not using or rewriting programs
83224 + that make use of the nested function implementation of GCC.
83225 + Skilled users can just fix GCC itself so that it implements
83226 + nested function calls in a way that does not interfere with PaX.
83227 +
83228 +config PAX_EMUSIGRT
83229 + bool "Automatically emulate sigreturn trampolines"
83230 + depends on PAX_EMUTRAMP && PARISC
83231 + default y
83232 + help
83233 + Enabling this option will have the kernel automatically detect
83234 + and emulate signal return trampolines executing on the stack
83235 + that would otherwise lead to task termination.
83236 +
83237 + This solution is intended as a temporary one for users with
83238 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
83239 + Modula-3 runtime, etc) or executables linked to such, basically
83240 + everything that does not specify its own SA_RESTORER function in
83241 + normal executable memory like glibc 2.1+ does.
83242 +
83243 + On parisc you MUST enable this option, otherwise your system will
83244 + not even boot.
83245 +
83246 + NOTE: this feature cannot be disabled on a per executable basis
83247 + and since it *does* open up a loophole in the protection provided
83248 + by non-executable pages, the best solution is to not have any
83249 + files on your system that would require this option.
83250 +
83251 +config PAX_MPROTECT
83252 + bool "Restrict mprotect()"
83253 + default y if GRKERNSEC_CONFIG_AUTO
83254 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
83255 + help
83256 + Enabling this option will prevent programs from
83257 + - changing the executable status of memory pages that were
83258 + not originally created as executable,
83259 + - making read-only executable pages writable again,
83260 + - creating executable pages from anonymous memory,
83261 + - making read-only-after-relocations (RELRO) data pages writable again.
83262 +
83263 + You should say Y here to complete the protection provided by
83264 + the enforcement of non-executable pages.
83265 +
83266 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
83267 + this feature on a per file basis.
83268 +
83269 +config PAX_MPROTECT_COMPAT
83270 + bool "Use legacy/compat protection demoting (read help)"
83271 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
83272 + depends on PAX_MPROTECT
83273 + help
83274 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
83275 + by sending the proper error code to the application. For some broken
83276 + userland, this can cause problems with Python or other applications. The
83277 + current implementation however allows for applications like clamav to
83278 + detect if JIT compilation/execution is allowed and to fall back gracefully
83279 + to an interpreter-based mode if it does not. While we encourage everyone
83280 + to use the current implementation as-is and push upstream to fix broken
83281 + userland (note that the RWX logging option can assist with this), in some
83282 + environments this may not be possible. Having to disable MPROTECT
83283 + completely on certain binaries reduces the security benefit of PaX,
83284 + so this option is provided for those environments to revert to the old
83285 + behavior.
83286 +
83287 +config PAX_ELFRELOCS
83288 + bool "Allow ELF text relocations (read help)"
83289 + depends on PAX_MPROTECT
83290 + default n
83291 + help
83292 + Non-executable pages and mprotect() restrictions are effective
83293 + in preventing the introduction of new executable code into an
83294 + attacked task's address space. There remain only two venues
83295 + for this kind of attack: if the attacker can execute already
83296 + existing code in the attacked task then he can either have it
83297 + create and mmap() a file containing his code or have it mmap()
83298 + an already existing ELF library that does not have position
83299 + independent code in it and use mprotect() on it to make it
83300 + writable and copy his code there. While protecting against
83301 + the former approach is beyond PaX, the latter can be prevented
83302 + by having only PIC ELF libraries on one's system (which do not
83303 + need to relocate their code). If you are sure this is your case,
83304 + as is the case with all modern Linux distributions, then leave
83305 + this option disabled. You should say 'n' here.
83306 +
83307 +config PAX_ETEXECRELOCS
83308 + bool "Allow ELF ET_EXEC text relocations"
83309 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
83310 + select PAX_ELFRELOCS
83311 + default y
83312 + help
83313 + On some architectures there are incorrectly created applications
83314 + that require text relocations and would not work without enabling
83315 + this option. If you are an alpha, ia64 or parisc user, you should
83316 + enable this option and disable it once you have made sure that
83317 + none of your applications need it.
83318 +
83319 +config PAX_EMUPLT
83320 + bool "Automatically emulate ELF PLT"
83321 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
83322 + default y
83323 + help
83324 + Enabling this option will have the kernel automatically detect
83325 + and emulate the Procedure Linkage Table entries in ELF files.
83326 + On some architectures such entries are in writable memory, and
83327 + become non-executable leading to task termination. Therefore
83328 + it is mandatory that you enable this option on alpha, parisc,
83329 + sparc and sparc64, otherwise your system would not even boot.
83330 +
83331 + NOTE: this feature *does* open up a loophole in the protection
83332 + provided by the non-executable pages, therefore the proper
83333 + solution is to modify the toolchain to produce a PLT that does
83334 + not need to be writable.
83335 +
83336 +config PAX_DLRESOLVE
83337 + bool 'Emulate old glibc resolver stub'
83338 + depends on PAX_EMUPLT && SPARC
83339 + default n
83340 + help
83341 + This option is needed if userland has an old glibc (before 2.4)
83342 + that puts a 'save' instruction into the runtime generated resolver
83343 + stub that needs special emulation.
83344 +
83345 +config PAX_KERNEXEC
83346 + bool "Enforce non-executable kernel pages"
83347 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
83348 + depends on (X86 || ARM_LPAE) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
83349 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
83350 + select PAX_KERNEXEC_PLUGIN if X86_64
83351 + help
83352 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
83353 + that is, enabling this option will make it harder to inject
83354 + and execute 'foreign' code in kernel memory itself.
83355 +
83356 +choice
83357 + prompt "Return Address Instrumentation Method"
83358 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
83359 + depends on PAX_KERNEXEC_PLUGIN
83360 + help
83361 + Select the method used to instrument function pointer dereferences.
83362 + Note that binary modules cannot be instrumented by this approach.
83363 +
83364 + Note that the implementation requires a gcc with plugin support,
83365 + i.e., gcc 4.5 or newer. You may need to install the supporting
83366 + headers explicitly in addition to the normal gcc package.
83367 +
83368 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
83369 + bool "bts"
83370 + help
83371 + This method is compatible with binary only modules but has
83372 + a higher runtime overhead.
83373 +
83374 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
83375 + bool "or"
83376 + depends on !PARAVIRT
83377 + help
83378 + This method is incompatible with binary only modules but has
83379 + a lower runtime overhead.
83380 +endchoice
83381 +
83382 +config PAX_KERNEXEC_PLUGIN_METHOD
83383 + string
83384 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
83385 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
83386 + default ""
83387 +
83388 +config PAX_KERNEXEC_MODULE_TEXT
83389 + int "Minimum amount of memory reserved for module code"
83390 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
83391 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
83392 + depends on PAX_KERNEXEC && X86_32 && MODULES
83393 + help
83394 + Due to implementation details the kernel must reserve a fixed
83395 + amount of memory for module code at compile time that cannot be
83396 + changed at runtime. Here you can specify the minimum amount
83397 + in MB that will be reserved. Due to the same implementation
83398 + details this size will always be rounded up to the next 2/4 MB
83399 + boundary (depends on PAE) so the actually available memory for
83400 + module code will usually be more than this minimum.
83401 +
83402 + The default 4 MB should be enough for most users but if you have
83403 + an excessive number of modules (e.g., most distribution configs
83404 + compile many drivers as modules) or use huge modules such as
83405 + nvidia's kernel driver, you will need to adjust this amount.
83406 + A good rule of thumb is to look at your currently loaded kernel
83407 + modules and add up their sizes.
83408 +
83409 +endmenu
83410 +
83411 +menu "Address Space Layout Randomization"
83412 + depends on PAX
83413 +
83414 +config PAX_ASLR
83415 + bool "Address Space Layout Randomization"
83416 + default y if GRKERNSEC_CONFIG_AUTO
83417 + help
83418 + Many if not most exploit techniques rely on the knowledge of
83419 + certain addresses in the attacked program. The following options
83420 + will allow the kernel to apply a certain amount of randomization
83421 + to specific parts of the program thereby forcing an attacker to
83422 + guess them in most cases. Any failed guess will most likely crash
83423 + the attacked program which allows the kernel to detect such attempts
83424 + and react on them. PaX itself provides no reaction mechanisms,
83425 + instead it is strongly encouraged that you make use of Nergal's
83426 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
83427 + (http://www.grsecurity.net/) built-in crash detection features or
83428 + develop one yourself.
83429 +
83430 + By saying Y here you can choose to randomize the following areas:
83431 + - top of the task's kernel stack
83432 + - top of the task's userland stack
83433 + - base address for mmap() requests that do not specify one
83434 + (this includes all libraries)
83435 + - base address of the main executable
83436 +
83437 + It is strongly recommended to say Y here as address space layout
83438 + randomization has negligible impact on performance yet it provides
83439 + a very effective protection.
83440 +
83441 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
83442 + this feature on a per file basis.
83443 +
83444 +config PAX_RANDKSTACK
83445 + bool "Randomize kernel stack base"
83446 + default y if GRKERNSEC_CONFIG_AUTO
83447 + depends on X86_TSC && X86
83448 + help
83449 + By saying Y here the kernel will randomize every task's kernel
83450 + stack on every system call. This will not only force an attacker
83451 + to guess it but also prevent him from making use of possible
83452 + leaked information about it.
83453 +
83454 + Since the kernel stack is a rather scarce resource, randomization
83455 + may cause unexpected stack overflows, therefore you should very
83456 + carefully test your system. Note that once enabled in the kernel
83457 + configuration, this feature cannot be disabled on a per file basis.
83458 +
83459 +config PAX_RANDUSTACK
83460 + bool "Randomize user stack base"
83461 + default y if GRKERNSEC_CONFIG_AUTO
83462 + depends on PAX_ASLR
83463 + help
83464 + By saying Y here the kernel will randomize every task's userland
83465 + stack. The randomization is done in two steps where the second
83466 + one may apply a big amount of shift to the top of the stack and
83467 + cause problems for programs that want to use lots of memory (more
83468 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
83469 + For this reason the second step can be controlled by 'chpax' or
83470 + 'paxctl' on a per file basis.
83471 +
83472 +config PAX_RANDMMAP
83473 + bool "Randomize mmap() base"
83474 + default y if GRKERNSEC_CONFIG_AUTO
83475 + depends on PAX_ASLR
83476 + help
83477 + By saying Y here the kernel will use a randomized base address for
83478 + mmap() requests that do not specify one themselves. As a result
83479 + all dynamically loaded libraries will appear at random addresses
83480 + and therefore be harder to exploit by a technique where an attacker
83481 + attempts to execute library code for his purposes (e.g. spawn a
83482 + shell from an exploited program that is running at an elevated
83483 + privilege level).
83484 +
83485 + Furthermore, if a program is relinked as a dynamic ELF file, its
83486 + base address will be randomized as well, completing the full
83487 + randomization of the address space layout. Attacking such programs
83488 + becomes a guess game. You can find an example of doing this at
83489 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
83490 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
83491 +
83492 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
83493 + feature on a per file basis.
83494 +
83495 +endmenu
83496 +
83497 +menu "Miscellaneous hardening features"
83498 +
83499 +config PAX_MEMORY_SANITIZE
83500 + bool "Sanitize all freed memory"
83501 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
83502 + depends on !HIBERNATION
83503 + help
83504 + By saying Y here the kernel will erase memory pages as soon as they
83505 + are freed. This in turn reduces the lifetime of data stored in the
83506 + pages, making it less likely that sensitive information such as
83507 + passwords, cryptographic secrets, etc stay in memory for too long.
83508 +
83509 + This is especially useful for programs whose runtime is short, long
83510 + lived processes and the kernel itself benefit from this as long as
83511 + they operate on whole memory pages and ensure timely freeing of pages
83512 + that may hold sensitive information.
83513 +
83514 + The tradeoff is performance impact, on a single CPU system kernel
83515 + compilation sees a 3% slowdown, other systems and workloads may vary
83516 + and you are advised to test this feature on your expected workload
83517 + before deploying it.
83518 +
83519 + Note that this feature does not protect data stored in live pages,
83520 + e.g., process memory swapped to disk may stay there for a long time.
83521 +
83522 +config PAX_MEMORY_STACKLEAK
83523 + bool "Sanitize kernel stack"
83524 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
83525 + depends on X86
83526 + help
83527 + By saying Y here the kernel will erase the kernel stack before it
83528 + returns from a system call. This in turn reduces the information
83529 + that a kernel stack leak bug can reveal.
83530 +
83531 + Note that such a bug can still leak information that was put on
83532 + the stack by the current system call (the one eventually triggering
83533 + the bug) but traces of earlier system calls on the kernel stack
83534 + cannot leak anymore.
83535 +
83536 + The tradeoff is performance impact: on a single CPU system kernel
83537 + compilation sees a 1% slowdown, other systems and workloads may vary
83538 + and you are advised to test this feature on your expected workload
83539 + before deploying it.
83540 +
83541 + Note that the full feature requires a gcc with plugin support,
83542 + i.e., gcc 4.5 or newer. You may need to install the supporting
83543 + headers explicitly in addition to the normal gcc package. Using
83544 + older gcc versions means that functions with large enough stack
83545 + frames may leave uninitialized memory behind that may be exposed
83546 + to a later syscall leaking the stack.
83547 +
83548 +config PAX_MEMORY_UDEREF
83549 + bool "Prevent invalid userland pointer dereference"
83550 + default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
83551 + depends on X86 && !UML_X86 && !XEN
83552 + select PAX_PER_CPU_PGD if X86_64
83553 + help
83554 + By saying Y here the kernel will be prevented from dereferencing
83555 + userland pointers in contexts where the kernel expects only kernel
83556 + pointers. This is both a useful runtime debugging feature and a
83557 + security measure that prevents exploiting a class of kernel bugs.
83558 +
83559 + The tradeoff is that some virtualization solutions may experience
83560 + a huge slowdown and therefore you should not enable this feature
83561 + for kernels meant to run in such environments. Whether a given VM
83562 + solution is affected or not is best determined by simply trying it
83563 + out, the performance impact will be obvious right on boot as this
83564 + mechanism engages from very early on. A good rule of thumb is that
83565 + VMs running on CPUs without hardware virtualization support (i.e.,
83566 + the majority of IA-32 CPUs) will likely experience the slowdown.
83567 +
83568 +config PAX_REFCOUNT
83569 + bool "Prevent various kernel object reference counter overflows"
83570 + default y if GRKERNSEC_CONFIG_AUTO
83571 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
83572 + help
83573 + By saying Y here the kernel will detect and prevent overflowing
83574 + various (but not all) kinds of object reference counters. Such
83575 + overflows can normally occur due to bugs only and are often, if
83576 + not always, exploitable.
83577 +
83578 + The tradeoff is that data structures protected by an overflowed
83579 + refcount will never be freed and therefore will leak memory. Note
83580 + that this leak also happens even without this protection but in
83581 + that case the overflow can eventually trigger the freeing of the
83582 + data structure while it is still being used elsewhere, resulting
83583 + in the exploitable situation that this feature prevents.
83584 +
83585 + Since this has a negligible performance impact, you should enable
83586 + this feature.
83587 +
83588 +config PAX_USERCOPY
83589 + bool "Harden heap object copies between kernel and userland"
83590 + default y if GRKERNSEC_CONFIG_AUTO
83591 + depends on ARM || IA64 || PPC || SPARC || X86
83592 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
83593 + select PAX_USERCOPY_SLABS
83594 + help
83595 + By saying Y here the kernel will enforce the size of heap objects
83596 + when they are copied in either direction between the kernel and
83597 + userland, even if only a part of the heap object is copied.
83598 +
83599 + Specifically, this checking prevents information leaking from the
83600 + kernel heap during kernel to userland copies (if the kernel heap
83601 + object is otherwise fully initialized) and prevents kernel heap
83602 + overflows during userland to kernel copies.
83603 +
83604 + Note that the current implementation provides the strictest bounds
83605 + checks for the SLUB allocator.
83606 +
83607 + Enabling this option also enables per-slab cache protection against
83608 + data in a given cache being copied into/out of via userland
83609 + accessors. Though the whitelist of regions will be reduced over
83610 + time, it notably protects important data structures like task structs.
83611 +
83612 + If frame pointers are enabled on x86, this option will also restrict
83613 + copies into and out of the kernel stack to local variables within a
83614 + single frame.
83615 +
83616 + Since this has a negligible performance impact, you should enable
83617 + this feature.
83618 +
83619 +config PAX_SIZE_OVERFLOW
83620 + bool "Prevent various integer overflows in function size parameters"
83621 + default y if GRKERNSEC_CONFIG_AUTO
83622 + depends on X86
83623 + help
83624 + By saying Y here the kernel recomputes expressions of function
83625 + arguments marked by a size_overflow attribute with double integer
83626 + precision (DImode/TImode for 32/64 bit integer types).
83627 +
83628 + The recomputed argument is checked against TYPE_MAX and an event
83629 + is logged on overflow and the triggering process is killed.
83630 +
83631 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
83632 +
83633 + Note that the implementation requires a gcc with plugin support,
83634 + i.e., gcc 4.5 or newer. You may need to install the supporting
83635 + headers explicitly in addition to the normal gcc package.
83636 +
83637 +config PAX_LATENT_ENTROPY
83638 + bool "Generate some entropy during boot"
83639 + default y if GRKERNSEC_CONFIG_AUTO
83640 + help
83641 + By saying Y here the kernel will instrument early boot code to
83642 + extract some entropy from both original and artificially created
83643 + program state. This will help especially embedded systems where
83644 + there is little 'natural' source of entropy normally. The cost
83645 + is some slowdown of the boot process.
83646 +
83647 + Note that the implementation requires a gcc with plugin support,
83648 + i.e., gcc 4.5 or newer. You may need to install the supporting
83649 + headers explicitly in addition to the normal gcc package.
83650 +
83651 + Note that entropy extracted this way is not cryptographically
83652 + secure!
83653 +
83654 +endmenu
83655 +
83656 +endmenu
83657 +
83658 +source grsecurity/Kconfig
83659 +
83660 +endmenu
83661 +
83662 +endmenu
83663 +
83664 source security/keys/Kconfig
83665
83666 config SECURITY_DMESG_RESTRICT
83667 @@ -103,7 +993,7 @@ config INTEL_TXT
83668 config LSM_MMAP_MIN_ADDR
83669 int "Low address space for LSM to protect from user allocation"
83670 depends on SECURITY && SECURITY_SELINUX
83671 - default 32768 if ARM
83672 + default 32768 if ALPHA || ARM || PARISC || SPARC32
83673 default 65536
83674 help
83675 This is the portion of low virtual memory which should be protected
83676 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
83677 index 8c2a7f6..b133ac9 100644
83678 --- a/security/apparmor/lsm.c
83679 +++ b/security/apparmor/lsm.c
83680 @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
83681 return error;
83682 }
83683
83684 -static struct security_operations apparmor_ops = {
83685 +static struct security_operations apparmor_ops __read_only = {
83686 .name = "apparmor",
83687
83688 .ptrace_access_check = apparmor_ptrace_access_check,
83689 diff --git a/security/commoncap.c b/security/commoncap.c
83690 index 6dbae46..d5611fd 100644
83691 --- a/security/commoncap.c
83692 +++ b/security/commoncap.c
83693 @@ -415,6 +415,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
83694 return 0;
83695 }
83696
83697 +/* returns:
83698 + 1 for suid privilege
83699 + 2 for sgid privilege
83700 + 3 for fscap privilege
83701 +*/
83702 +int is_privileged_binary(const struct dentry *dentry)
83703 +{
83704 + struct cpu_vfs_cap_data capdata;
83705 + struct inode *inode = dentry->d_inode;
83706 +
83707 + if (!inode || S_ISDIR(inode->i_mode))
83708 + return 0;
83709 +
83710 + if (inode->i_mode & S_ISUID)
83711 + return 1;
83712 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
83713 + return 2;
83714 +
83715 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
83716 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
83717 + return 3;
83718 + }
83719 +
83720 + return 0;
83721 +}
83722 +
83723 /*
83724 * Attempt to get the on-exec apply capability sets for an executable file from
83725 * its xattrs and, if present, apply them to the proposed credentials being
83726 @@ -583,6 +609,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
83727 const struct cred *cred = current_cred();
83728 kuid_t root_uid = make_kuid(cred->user_ns, 0);
83729
83730 + if (gr_acl_enable_at_secure())
83731 + return 1;
83732 +
83733 if (!uid_eq(cred->uid, root_uid)) {
83734 if (bprm->cap_effective)
83735 return 1;
83736 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
83737 index 6ee8826..6350060 100644
83738 --- a/security/integrity/ima/ima.h
83739 +++ b/security/integrity/ima/ima.h
83740 @@ -96,8 +96,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
83741 extern spinlock_t ima_queue_lock;
83742
83743 struct ima_h_table {
83744 - atomic_long_t len; /* number of stored measurements in the list */
83745 - atomic_long_t violations;
83746 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
83747 + atomic_long_unchecked_t violations;
83748 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
83749 };
83750 extern struct ima_h_table ima_htable;
83751 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
83752 index b356884..fd9676e 100644
83753 --- a/security/integrity/ima/ima_api.c
83754 +++ b/security/integrity/ima/ima_api.c
83755 @@ -79,7 +79,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
83756 int result;
83757
83758 /* can overflow, only indicator */
83759 - atomic_long_inc(&ima_htable.violations);
83760 + atomic_long_inc_unchecked(&ima_htable.violations);
83761
83762 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
83763 if (!entry) {
83764 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
83765 index 38477c9..87a60c7 100644
83766 --- a/security/integrity/ima/ima_fs.c
83767 +++ b/security/integrity/ima/ima_fs.c
83768 @@ -28,12 +28,12 @@
83769 static int valid_policy = 1;
83770 #define TMPBUFLEN 12
83771 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
83772 - loff_t *ppos, atomic_long_t *val)
83773 + loff_t *ppos, atomic_long_unchecked_t *val)
83774 {
83775 char tmpbuf[TMPBUFLEN];
83776 ssize_t len;
83777
83778 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
83779 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
83780 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
83781 }
83782
83783 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
83784 index 55a6271..ad829c3 100644
83785 --- a/security/integrity/ima/ima_queue.c
83786 +++ b/security/integrity/ima/ima_queue.c
83787 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
83788 INIT_LIST_HEAD(&qe->later);
83789 list_add_tail_rcu(&qe->later, &ima_measurements);
83790
83791 - atomic_long_inc(&ima_htable.len);
83792 + atomic_long_inc_unchecked(&ima_htable.len);
83793 key = ima_hash_key(entry->digest);
83794 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
83795 return 0;
83796 diff --git a/security/keys/compat.c b/security/keys/compat.c
83797 index 1c26176..64a1ba2 100644
83798 --- a/security/keys/compat.c
83799 +++ b/security/keys/compat.c
83800 @@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
83801 if (ret == 0)
83802 goto no_payload_free;
83803
83804 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
83805 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
83806
83807 if (iov != iovstack)
83808 kfree(iov);
83809 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
83810 index 5d34b4e..2456674 100644
83811 --- a/security/keys/keyctl.c
83812 +++ b/security/keys/keyctl.c
83813 @@ -986,7 +986,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
83814 /*
83815 * Copy the iovec data from userspace
83816 */
83817 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
83818 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
83819 unsigned ioc)
83820 {
83821 for (; ioc > 0; ioc--) {
83822 @@ -1008,7 +1008,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
83823 * If successful, 0 will be returned.
83824 */
83825 long keyctl_instantiate_key_common(key_serial_t id,
83826 - const struct iovec *payload_iov,
83827 + const struct iovec __user *payload_iov,
83828 unsigned ioc,
83829 size_t plen,
83830 key_serial_t ringid)
83831 @@ -1103,7 +1103,7 @@ long keyctl_instantiate_key(key_serial_t id,
83832 [0].iov_len = plen
83833 };
83834
83835 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
83836 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
83837 }
83838
83839 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
83840 @@ -1136,7 +1136,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
83841 if (ret == 0)
83842 goto no_payload_free;
83843
83844 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
83845 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
83846
83847 if (iov != iovstack)
83848 kfree(iov);
83849 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
83850 index 6e42df1..aba52bd 100644
83851 --- a/security/keys/keyring.c
83852 +++ b/security/keys/keyring.c
83853 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
83854 ret = -EFAULT;
83855
83856 for (loop = 0; loop < klist->nkeys; loop++) {
83857 + key_serial_t serial;
83858 key = rcu_deref_link_locked(klist, loop,
83859 keyring);
83860 + serial = key->serial;
83861
83862 tmp = sizeof(key_serial_t);
83863 if (tmp > buflen)
83864 tmp = buflen;
83865
83866 - if (copy_to_user(buffer,
83867 - &key->serial,
83868 - tmp) != 0)
83869 + if (copy_to_user(buffer, &serial, tmp))
83870 goto error;
83871
83872 buflen -= tmp;
83873 diff --git a/security/min_addr.c b/security/min_addr.c
83874 index f728728..6457a0c 100644
83875 --- a/security/min_addr.c
83876 +++ b/security/min_addr.c
83877 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
83878 */
83879 static void update_mmap_min_addr(void)
83880 {
83881 +#ifndef SPARC
83882 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
83883 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
83884 mmap_min_addr = dac_mmap_min_addr;
83885 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
83886 #else
83887 mmap_min_addr = dac_mmap_min_addr;
83888 #endif
83889 +#endif
83890 }
83891
83892 /*
83893 diff --git a/security/security.c b/security/security.c
83894 index 8dcd4ae..1124de7 100644
83895 --- a/security/security.c
83896 +++ b/security/security.c
83897 @@ -20,6 +20,7 @@
83898 #include <linux/ima.h>
83899 #include <linux/evm.h>
83900 #include <linux/fsnotify.h>
83901 +#include <linux/mm.h>
83902 #include <linux/mman.h>
83903 #include <linux/mount.h>
83904 #include <linux/personality.h>
83905 @@ -32,8 +33,8 @@
83906 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
83907 CONFIG_DEFAULT_SECURITY;
83908
83909 -static struct security_operations *security_ops;
83910 -static struct security_operations default_security_ops = {
83911 +static struct security_operations *security_ops __read_only;
83912 +static struct security_operations default_security_ops __read_only = {
83913 .name = "default",
83914 };
83915
83916 @@ -74,7 +75,9 @@ int __init security_init(void)
83917
83918 void reset_security_ops(void)
83919 {
83920 + pax_open_kernel();
83921 security_ops = &default_security_ops;
83922 + pax_close_kernel();
83923 }
83924
83925 /* Save user chosen LSM */
83926 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
83927 index 61a5336..27215d8 100644
83928 --- a/security/selinux/hooks.c
83929 +++ b/security/selinux/hooks.c
83930 @@ -95,8 +95,6 @@
83931
83932 #define NUM_SEL_MNT_OPTS 5
83933
83934 -extern struct security_operations *security_ops;
83935 -
83936 /* SECMARK reference count */
83937 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
83938
83939 @@ -5476,7 +5474,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
83940
83941 #endif
83942
83943 -static struct security_operations selinux_ops = {
83944 +static struct security_operations selinux_ops __read_only = {
83945 .name = "selinux",
83946
83947 .ptrace_access_check = selinux_ptrace_access_check,
83948 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
83949 index 65f67cb..3f141ef 100644
83950 --- a/security/selinux/include/xfrm.h
83951 +++ b/security/selinux/include/xfrm.h
83952 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
83953
83954 static inline void selinux_xfrm_notify_policyload(void)
83955 {
83956 - atomic_inc(&flow_cache_genid);
83957 + atomic_inc_unchecked(&flow_cache_genid);
83958 rt_genid_bump(&init_net);
83959 }
83960 #else
83961 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
83962 index 38be92c..21f49ee 100644
83963 --- a/security/smack/smack_lsm.c
83964 +++ b/security/smack/smack_lsm.c
83965 @@ -3398,7 +3398,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
83966 return 0;
83967 }
83968
83969 -struct security_operations smack_ops = {
83970 +struct security_operations smack_ops __read_only = {
83971 .name = "smack",
83972
83973 .ptrace_access_check = smack_ptrace_access_check,
83974 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
83975 index a2ee362..5754f34 100644
83976 --- a/security/tomoyo/tomoyo.c
83977 +++ b/security/tomoyo/tomoyo.c
83978 @@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
83979 * tomoyo_security_ops is a "struct security_operations" which is used for
83980 * registering TOMOYO.
83981 */
83982 -static struct security_operations tomoyo_security_ops = {
83983 +static struct security_operations tomoyo_security_ops __read_only = {
83984 .name = "tomoyo",
83985 .cred_alloc_blank = tomoyo_cred_alloc_blank,
83986 .cred_prepare = tomoyo_cred_prepare,
83987 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
83988 index 20ef514..4182bed 100644
83989 --- a/security/yama/Kconfig
83990 +++ b/security/yama/Kconfig
83991 @@ -1,6 +1,6 @@
83992 config SECURITY_YAMA
83993 bool "Yama support"
83994 - depends on SECURITY
83995 + depends on SECURITY && !GRKERNSEC
83996 select SECURITYFS
83997 select SECURITY_PATH
83998 default n
83999 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
84000 index 4cedc69..e59d8a3 100644
84001 --- a/sound/aoa/codecs/onyx.c
84002 +++ b/sound/aoa/codecs/onyx.c
84003 @@ -54,7 +54,7 @@ struct onyx {
84004 spdif_locked:1,
84005 analog_locked:1,
84006 original_mute:2;
84007 - int open_count;
84008 + local_t open_count;
84009 struct codec_info *codec_info;
84010
84011 /* mutex serializes concurrent access to the device
84012 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
84013 struct onyx *onyx = cii->codec_data;
84014
84015 mutex_lock(&onyx->mutex);
84016 - onyx->open_count++;
84017 + local_inc(&onyx->open_count);
84018 mutex_unlock(&onyx->mutex);
84019
84020 return 0;
84021 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
84022 struct onyx *onyx = cii->codec_data;
84023
84024 mutex_lock(&onyx->mutex);
84025 - onyx->open_count--;
84026 - if (!onyx->open_count)
84027 + if (local_dec_and_test(&onyx->open_count))
84028 onyx->spdif_locked = onyx->analog_locked = 0;
84029 mutex_unlock(&onyx->mutex);
84030
84031 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
84032 index ffd2025..df062c9 100644
84033 --- a/sound/aoa/codecs/onyx.h
84034 +++ b/sound/aoa/codecs/onyx.h
84035 @@ -11,6 +11,7 @@
84036 #include <linux/i2c.h>
84037 #include <asm/pmac_low_i2c.h>
84038 #include <asm/prom.h>
84039 +#include <asm/local.h>
84040
84041 /* PCM3052 register definitions */
84042
84043 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
84044 index 4c1cc51..16040040 100644
84045 --- a/sound/core/oss/pcm_oss.c
84046 +++ b/sound/core/oss/pcm_oss.c
84047 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
84048 if (in_kernel) {
84049 mm_segment_t fs;
84050 fs = snd_enter_user();
84051 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
84052 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
84053 snd_leave_user(fs);
84054 } else {
84055 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
84056 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
84057 }
84058 if (ret != -EPIPE && ret != -ESTRPIPE)
84059 break;
84060 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
84061 if (in_kernel) {
84062 mm_segment_t fs;
84063 fs = snd_enter_user();
84064 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
84065 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
84066 snd_leave_user(fs);
84067 } else {
84068 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
84069 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
84070 }
84071 if (ret == -EPIPE) {
84072 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
84073 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
84074 struct snd_pcm_plugin_channel *channels;
84075 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
84076 if (!in_kernel) {
84077 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
84078 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
84079 return -EFAULT;
84080 buf = runtime->oss.buffer;
84081 }
84082 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
84083 }
84084 } else {
84085 tmp = snd_pcm_oss_write2(substream,
84086 - (const char __force *)buf,
84087 + (const char __force_kernel *)buf,
84088 runtime->oss.period_bytes, 0);
84089 if (tmp <= 0)
84090 goto err;
84091 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
84092 struct snd_pcm_runtime *runtime = substream->runtime;
84093 snd_pcm_sframes_t frames, frames1;
84094 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
84095 - char __user *final_dst = (char __force __user *)buf;
84096 + char __user *final_dst = (char __force_user *)buf;
84097 if (runtime->oss.plugin_first) {
84098 struct snd_pcm_plugin_channel *channels;
84099 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
84100 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
84101 xfer += tmp;
84102 runtime->oss.buffer_used -= tmp;
84103 } else {
84104 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
84105 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
84106 runtime->oss.period_bytes, 0);
84107 if (tmp <= 0)
84108 goto err;
84109 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
84110 size1);
84111 size1 /= runtime->channels; /* frames */
84112 fs = snd_enter_user();
84113 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
84114 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
84115 snd_leave_user(fs);
84116 }
84117 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
84118 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
84119 index 91cdf943..4085161 100644
84120 --- a/sound/core/pcm_compat.c
84121 +++ b/sound/core/pcm_compat.c
84122 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
84123 int err;
84124
84125 fs = snd_enter_user();
84126 - err = snd_pcm_delay(substream, &delay);
84127 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
84128 snd_leave_user(fs);
84129 if (err < 0)
84130 return err;
84131 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
84132 index f9ddecf..e27404d 100644
84133 --- a/sound/core/pcm_native.c
84134 +++ b/sound/core/pcm_native.c
84135 @@ -2804,11 +2804,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
84136 switch (substream->stream) {
84137 case SNDRV_PCM_STREAM_PLAYBACK:
84138 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
84139 - (void __user *)arg);
84140 + (void __force_user *)arg);
84141 break;
84142 case SNDRV_PCM_STREAM_CAPTURE:
84143 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
84144 - (void __user *)arg);
84145 + (void __force_user *)arg);
84146 break;
84147 default:
84148 result = -EINVAL;
84149 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
84150 index 60e8fc1..786abcb 100644
84151 --- a/sound/core/seq/seq_device.c
84152 +++ b/sound/core/seq/seq_device.c
84153 @@ -64,7 +64,7 @@ struct ops_list {
84154 int argsize; /* argument size */
84155
84156 /* operators */
84157 - struct snd_seq_dev_ops ops;
84158 + struct snd_seq_dev_ops *ops;
84159
84160 /* registred devices */
84161 struct list_head dev_list; /* list of devices */
84162 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
84163
84164 mutex_lock(&ops->reg_mutex);
84165 /* copy driver operators */
84166 - ops->ops = *entry;
84167 + ops->ops = entry;
84168 ops->driver |= DRIVER_LOADED;
84169 ops->argsize = argsize;
84170
84171 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
84172 dev->name, ops->id, ops->argsize, dev->argsize);
84173 return -EINVAL;
84174 }
84175 - if (ops->ops.init_device(dev) >= 0) {
84176 + if (ops->ops->init_device(dev) >= 0) {
84177 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
84178 ops->num_init_devices++;
84179 } else {
84180 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
84181 dev->name, ops->id, ops->argsize, dev->argsize);
84182 return -EINVAL;
84183 }
84184 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
84185 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
84186 dev->status = SNDRV_SEQ_DEVICE_FREE;
84187 dev->driver_data = NULL;
84188 ops->num_init_devices--;
84189 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
84190 index 2d5514b..3afae9c 100644
84191 --- a/sound/drivers/mts64.c
84192 +++ b/sound/drivers/mts64.c
84193 @@ -29,6 +29,7 @@
84194 #include <sound/initval.h>
84195 #include <sound/rawmidi.h>
84196 #include <sound/control.h>
84197 +#include <asm/local.h>
84198
84199 #define CARD_NAME "Miditerminal 4140"
84200 #define DRIVER_NAME "MTS64"
84201 @@ -67,7 +68,7 @@ struct mts64 {
84202 struct pardevice *pardev;
84203 int pardev_claimed;
84204
84205 - int open_count;
84206 + local_t open_count;
84207 int current_midi_output_port;
84208 int current_midi_input_port;
84209 u8 mode[MTS64_NUM_INPUT_PORTS];
84210 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
84211 {
84212 struct mts64 *mts = substream->rmidi->private_data;
84213
84214 - if (mts->open_count == 0) {
84215 + if (local_read(&mts->open_count) == 0) {
84216 /* We don't need a spinlock here, because this is just called
84217 if the device has not been opened before.
84218 So there aren't any IRQs from the device */
84219 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
84220
84221 msleep(50);
84222 }
84223 - ++(mts->open_count);
84224 + local_inc(&mts->open_count);
84225
84226 return 0;
84227 }
84228 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
84229 struct mts64 *mts = substream->rmidi->private_data;
84230 unsigned long flags;
84231
84232 - --(mts->open_count);
84233 - if (mts->open_count == 0) {
84234 + if (local_dec_return(&mts->open_count) == 0) {
84235 /* We need the spinlock_irqsave here because we can still
84236 have IRQs at this point */
84237 spin_lock_irqsave(&mts->lock, flags);
84238 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
84239
84240 msleep(500);
84241
84242 - } else if (mts->open_count < 0)
84243 - mts->open_count = 0;
84244 + } else if (local_read(&mts->open_count) < 0)
84245 + local_set(&mts->open_count, 0);
84246
84247 return 0;
84248 }
84249 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
84250 index b953fb4..1999c01 100644
84251 --- a/sound/drivers/opl4/opl4_lib.c
84252 +++ b/sound/drivers/opl4/opl4_lib.c
84253 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
84254 MODULE_DESCRIPTION("OPL4 driver");
84255 MODULE_LICENSE("GPL");
84256
84257 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
84258 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
84259 {
84260 int timeout = 10;
84261 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
84262 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
84263 index 8364855..59f2e2b 100644
84264 --- a/sound/drivers/portman2x4.c
84265 +++ b/sound/drivers/portman2x4.c
84266 @@ -48,6 +48,7 @@
84267 #include <sound/initval.h>
84268 #include <sound/rawmidi.h>
84269 #include <sound/control.h>
84270 +#include <asm/local.h>
84271
84272 #define CARD_NAME "Portman 2x4"
84273 #define DRIVER_NAME "portman"
84274 @@ -85,7 +86,7 @@ struct portman {
84275 struct pardevice *pardev;
84276 int pardev_claimed;
84277
84278 - int open_count;
84279 + local_t open_count;
84280 int mode[PORTMAN_NUM_INPUT_PORTS];
84281 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
84282 };
84283 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
84284 index ea995af..f1bfa37 100644
84285 --- a/sound/firewire/amdtp.c
84286 +++ b/sound/firewire/amdtp.c
84287 @@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
84288 ptr = s->pcm_buffer_pointer + data_blocks;
84289 if (ptr >= pcm->runtime->buffer_size)
84290 ptr -= pcm->runtime->buffer_size;
84291 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
84292 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
84293
84294 s->pcm_period_pointer += data_blocks;
84295 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
84296 @@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
84297 */
84298 void amdtp_out_stream_update(struct amdtp_out_stream *s)
84299 {
84300 - ACCESS_ONCE(s->source_node_id_field) =
84301 + ACCESS_ONCE_RW(s->source_node_id_field) =
84302 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
84303 }
84304 EXPORT_SYMBOL(amdtp_out_stream_update);
84305 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
84306 index b680c5e..061b7a0 100644
84307 --- a/sound/firewire/amdtp.h
84308 +++ b/sound/firewire/amdtp.h
84309 @@ -139,7 +139,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
84310 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
84311 struct snd_pcm_substream *pcm)
84312 {
84313 - ACCESS_ONCE(s->pcm) = pcm;
84314 + ACCESS_ONCE_RW(s->pcm) = pcm;
84315 }
84316
84317 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
84318 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
84319 index d428ffe..751ef78 100644
84320 --- a/sound/firewire/isight.c
84321 +++ b/sound/firewire/isight.c
84322 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
84323 ptr += count;
84324 if (ptr >= runtime->buffer_size)
84325 ptr -= runtime->buffer_size;
84326 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
84327 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
84328
84329 isight->period_counter += count;
84330 if (isight->period_counter >= runtime->period_size) {
84331 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
84332 if (err < 0)
84333 return err;
84334
84335 - ACCESS_ONCE(isight->pcm_active) = true;
84336 + ACCESS_ONCE_RW(isight->pcm_active) = true;
84337
84338 return 0;
84339 }
84340 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
84341 {
84342 struct isight *isight = substream->private_data;
84343
84344 - ACCESS_ONCE(isight->pcm_active) = false;
84345 + ACCESS_ONCE_RW(isight->pcm_active) = false;
84346
84347 mutex_lock(&isight->mutex);
84348 isight_stop_streaming(isight);
84349 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
84350
84351 switch (cmd) {
84352 case SNDRV_PCM_TRIGGER_START:
84353 - ACCESS_ONCE(isight->pcm_running) = true;
84354 + ACCESS_ONCE_RW(isight->pcm_running) = true;
84355 break;
84356 case SNDRV_PCM_TRIGGER_STOP:
84357 - ACCESS_ONCE(isight->pcm_running) = false;
84358 + ACCESS_ONCE_RW(isight->pcm_running) = false;
84359 break;
84360 default:
84361 return -EINVAL;
84362 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
84363 index 7bd5e33..1fcab12 100644
84364 --- a/sound/isa/cmi8330.c
84365 +++ b/sound/isa/cmi8330.c
84366 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
84367
84368 struct snd_pcm *pcm;
84369 struct snd_cmi8330_stream {
84370 - struct snd_pcm_ops ops;
84371 + snd_pcm_ops_no_const ops;
84372 snd_pcm_open_callback_t open;
84373 void *private_data; /* sb or wss */
84374 } streams[2];
84375 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
84376 index b2b3c01..e1c1e1f 100644
84377 --- a/sound/oss/sb_audio.c
84378 +++ b/sound/oss/sb_audio.c
84379 @@ -903,7 +903,7 @@ sb16_copy_from_user(int dev,
84380 buf16 = (signed short *)(localbuf + localoffs);
84381 while (c)
84382 {
84383 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
84384 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
84385 if (copy_from_user(lbuf8,
84386 userbuf+useroffs + p,
84387 locallen))
84388 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
84389 index 7d8803a..559f8d0 100644
84390 --- a/sound/oss/swarm_cs4297a.c
84391 +++ b/sound/oss/swarm_cs4297a.c
84392 @@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
84393 {
84394 struct cs4297a_state *s;
84395 u32 pwr, id;
84396 - mm_segment_t fs;
84397 int rval;
84398 #ifndef CONFIG_BCM_CS4297A_CSWARM
84399 u64 cfg;
84400 @@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
84401 if (!rval) {
84402 char *sb1250_duart_present;
84403
84404 +#if 0
84405 + mm_segment_t fs;
84406 fs = get_fs();
84407 set_fs(KERNEL_DS);
84408 -#if 0
84409 val = SOUND_MASK_LINE;
84410 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
84411 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
84412 val = initvol[i].vol;
84413 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
84414 }
84415 + set_fs(fs);
84416 // cs4297a_write_ac97(s, 0x18, 0x0808);
84417 #else
84418 // cs4297a_write_ac97(s, 0x5e, 0x180);
84419 cs4297a_write_ac97(s, 0x02, 0x0808);
84420 cs4297a_write_ac97(s, 0x18, 0x0808);
84421 #endif
84422 - set_fs(fs);
84423
84424 list_add(&s->list, &cs4297a_devs);
84425
84426 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
84427 index 4f4e545..9b75d10 100644
84428 --- a/sound/pci/hda/hda_codec.h
84429 +++ b/sound/pci/hda/hda_codec.h
84430 @@ -618,7 +618,7 @@ struct hda_bus_ops {
84431 /* notify power-up/down from codec to controller */
84432 void (*pm_notify)(struct hda_bus *bus, bool power_up);
84433 #endif
84434 -};
84435 +} __no_const;
84436
84437 /* template to pass to the bus constructor */
84438 struct hda_bus_template {
84439 @@ -716,6 +716,7 @@ struct hda_codec_ops {
84440 #endif
84441 void (*reboot_notify)(struct hda_codec *codec);
84442 };
84443 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
84444
84445 /* record for amp information cache */
84446 struct hda_cache_head {
84447 @@ -746,7 +747,7 @@ struct hda_pcm_ops {
84448 struct snd_pcm_substream *substream);
84449 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
84450 struct snd_pcm_substream *substream);
84451 -};
84452 +} __no_const;
84453
84454 /* PCM information for each substream */
84455 struct hda_pcm_stream {
84456 @@ -805,7 +806,7 @@ struct hda_codec {
84457 const char *modelname; /* model name for preset */
84458
84459 /* set by patch */
84460 - struct hda_codec_ops patch_ops;
84461 + hda_codec_ops_no_const patch_ops;
84462
84463 /* PCM to create, set by patch_ops.build_pcms callback */
84464 unsigned int num_pcms;
84465 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
84466 index f9d870e..c80188d 100644
84467 --- a/sound/pci/hda/hda_intel.c
84468 +++ b/sound/pci/hda/hda_intel.c
84469 @@ -512,7 +512,7 @@ struct azx {
84470 struct work_struct irq_pending_work;
84471
84472 /* reboot notifier (for mysterious hangup problem at power-down) */
84473 - struct notifier_block reboot_notifier;
84474 + notifier_block_no_const reboot_notifier;
84475
84476 /* card list (for power_save trigger) */
84477 struct list_head list;
84478 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
84479 index d0e7d87..49ec1bb 100644
84480 --- a/sound/pci/ice1712/ice1712.h
84481 +++ b/sound/pci/ice1712/ice1712.h
84482 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
84483 unsigned int mask_flags; /* total mask bits */
84484 struct snd_akm4xxx_ops {
84485 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
84486 - } ops;
84487 + } __no_const ops;
84488 };
84489
84490 struct snd_ice1712_spdif {
84491 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
84492 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
84493 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
84494 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
84495 - } ops;
84496 + } __no_const ops;
84497 };
84498
84499
84500 diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
84501 index 4631a23..001ae57 100644
84502 --- a/sound/pci/ymfpci/ymfpci.h
84503 +++ b/sound/pci/ymfpci/ymfpci.h
84504 @@ -358,7 +358,7 @@ struct snd_ymfpci {
84505 spinlock_t reg_lock;
84506 spinlock_t voice_lock;
84507 wait_queue_head_t interrupt_sleep;
84508 - atomic_t interrupt_sleep_count;
84509 + atomic_unchecked_t interrupt_sleep_count;
84510 struct snd_info_entry *proc_entry;
84511 const struct firmware *dsp_microcode;
84512 const struct firmware *controller_microcode;
84513 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
84514 index 3a6f03f..bc5c86c 100644
84515 --- a/sound/pci/ymfpci/ymfpci_main.c
84516 +++ b/sound/pci/ymfpci/ymfpci_main.c
84517 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
84518 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
84519 break;
84520 }
84521 - if (atomic_read(&chip->interrupt_sleep_count)) {
84522 - atomic_set(&chip->interrupt_sleep_count, 0);
84523 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
84524 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
84525 wake_up(&chip->interrupt_sleep);
84526 }
84527 __end:
84528 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
84529 continue;
84530 init_waitqueue_entry(&wait, current);
84531 add_wait_queue(&chip->interrupt_sleep, &wait);
84532 - atomic_inc(&chip->interrupt_sleep_count);
84533 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
84534 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
84535 remove_wait_queue(&chip->interrupt_sleep, &wait);
84536 }
84537 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
84538 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
84539 spin_unlock(&chip->reg_lock);
84540
84541 - if (atomic_read(&chip->interrupt_sleep_count)) {
84542 - atomic_set(&chip->interrupt_sleep_count, 0);
84543 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
84544 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
84545 wake_up(&chip->interrupt_sleep);
84546 }
84547 }
84548 @@ -2420,7 +2420,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
84549 spin_lock_init(&chip->reg_lock);
84550 spin_lock_init(&chip->voice_lock);
84551 init_waitqueue_head(&chip->interrupt_sleep);
84552 - atomic_set(&chip->interrupt_sleep_count, 0);
84553 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
84554 chip->card = card;
84555 chip->pci = pci;
84556 chip->irq = -1;
84557 diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
84558 index 5708a97..e8b503d 100644
84559 --- a/sound/soc/codecs/tlv320aic3x.c
84560 +++ b/sound/soc/codecs/tlv320aic3x.c
84561 @@ -65,7 +65,7 @@ static LIST_HEAD(reset_list);
84562 struct aic3x_priv;
84563
84564 struct aic3x_disable_nb {
84565 - struct notifier_block nb;
84566 + notifier_block_no_const nb;
84567 struct aic3x_priv *aic3x;
84568 };
84569
84570 diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
84571 index c7c0034..3102641 100644
84572 --- a/sound/soc/codecs/wm8770.c
84573 +++ b/sound/soc/codecs/wm8770.c
84574 @@ -49,7 +49,7 @@ static const u16 wm8770_reg_defs[WM8770_CACHEREGNUM] = {
84575 struct wm8770_priv {
84576 enum snd_soc_control_type control_type;
84577 struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES];
84578 - struct notifier_block disable_nb[WM8770_NUM_SUPPLIES];
84579 + notifier_block_no_const disable_nb[WM8770_NUM_SUPPLIES];
84580 struct snd_soc_codec *codec;
84581 int sysclk;
84582 };
84583 diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
84584 index c088020..5b99147 100644
84585 --- a/sound/soc/codecs/wm8804.c
84586 +++ b/sound/soc/codecs/wm8804.c
84587 @@ -62,7 +62,7 @@ static const struct reg_default wm8804_reg_defaults[] = {
84588 struct wm8804_priv {
84589 struct regmap *regmap;
84590 struct regulator_bulk_data supplies[WM8804_NUM_SUPPLIES];
84591 - struct notifier_block disable_nb[WM8804_NUM_SUPPLIES];
84592 + notifier_block_no_const disable_nb[WM8804_NUM_SUPPLIES];
84593 };
84594
84595 static int txsrc_get(struct snd_kcontrol *kcontrol,
84596 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
84597 index ce67200..7f6104f 100644
84598 --- a/sound/soc/codecs/wm8962.c
84599 +++ b/sound/soc/codecs/wm8962.c
84600 @@ -71,7 +71,7 @@ struct wm8962_priv {
84601 struct snd_soc_jack *jack;
84602
84603 struct regulator_bulk_data supplies[WM8962_NUM_SUPPLIES];
84604 - struct notifier_block disable_nb[WM8962_NUM_SUPPLIES];
84605 + notifier_block_no_const disable_nb[WM8962_NUM_SUPPLIES];
84606
84607 #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
84608 struct input_dev *beep;
84609 diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
84610 index 28c89b0..806f4fe 100644
84611 --- a/sound/soc/codecs/wm8995.c
84612 +++ b/sound/soc/codecs/wm8995.c
84613 @@ -384,7 +384,7 @@ struct wm8995_priv {
84614 int aifclk[2];
84615 struct fll_config fll[2], fll_suspend[2];
84616 struct regulator_bulk_data supplies[WM8995_NUM_SUPPLIES];
84617 - struct notifier_block disable_nb[WM8995_NUM_SUPPLIES];
84618 + notifier_block_no_const disable_nb[WM8995_NUM_SUPPLIES];
84619 struct snd_soc_codec *codec;
84620 };
84621
84622 diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
84623 index 6dcb02c..f7de227 100644
84624 --- a/sound/soc/codecs/wm8996.c
84625 +++ b/sound/soc/codecs/wm8996.c
84626 @@ -72,7 +72,7 @@ struct wm8996_priv {
84627 u16 hpout_pending;
84628
84629 struct regulator_bulk_data supplies[WM8996_NUM_SUPPLIES];
84630 - struct notifier_block disable_nb[WM8996_NUM_SUPPLIES];
84631 + notifier_block_no_const disable_nb[WM8996_NUM_SUPPLIES];
84632 int bg_ena;
84633
84634 struct wm8996_pdata pdata;
84635 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
84636 new file mode 100644
84637 index 0000000..50f2f2f
84638 --- /dev/null
84639 +++ b/tools/gcc/.gitignore
84640 @@ -0,0 +1 @@
84641 +size_overflow_hash.h
84642 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
84643 new file mode 100644
84644 index 0000000..1d09b7e
84645 --- /dev/null
84646 +++ b/tools/gcc/Makefile
84647 @@ -0,0 +1,43 @@
84648 +#CC := gcc
84649 +#PLUGIN_SOURCE_FILES := pax_plugin.c
84650 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
84651 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
84652 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
84653 +
84654 +ifeq ($(PLUGINCC),$(HOSTCC))
84655 +HOSTLIBS := hostlibs
84656 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
84657 +else
84658 +HOSTLIBS := hostcxxlibs
84659 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
84660 +endif
84661 +
84662 +$(HOSTLIBS)-y := constify_plugin.so
84663 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
84664 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
84665 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
84666 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
84667 +$(HOSTLIBS)-y += colorize_plugin.so
84668 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
84669 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
84670 +
84671 +always := $($(HOSTLIBS)-y)
84672 +
84673 +constify_plugin-objs := constify_plugin.o
84674 +stackleak_plugin-objs := stackleak_plugin.o
84675 +kallocstat_plugin-objs := kallocstat_plugin.o
84676 +kernexec_plugin-objs := kernexec_plugin.o
84677 +checker_plugin-objs := checker_plugin.o
84678 +colorize_plugin-objs := colorize_plugin.o
84679 +size_overflow_plugin-objs := size_overflow_plugin.o
84680 +latent_entropy_plugin-objs := latent_entropy_plugin.o
84681 +
84682 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
84683 +
84684 +quiet_cmd_build_size_overflow_hash = GENHASH $@
84685 + cmd_build_size_overflow_hash = \
84686 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
84687 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
84688 + $(call if_changed,build_size_overflow_hash)
84689 +
84690 +targets += size_overflow_hash.h
84691 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
84692 new file mode 100644
84693 index 0000000..d41b5af
84694 --- /dev/null
84695 +++ b/tools/gcc/checker_plugin.c
84696 @@ -0,0 +1,171 @@
84697 +/*
84698 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
84699 + * Licensed under the GPL v2
84700 + *
84701 + * Note: the choice of the license means that the compilation process is
84702 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
84703 + * but for the kernel it doesn't matter since it doesn't link against
84704 + * any of the gcc libraries
84705 + *
84706 + * gcc plugin to implement various sparse (source code checker) features
84707 + *
84708 + * TODO:
84709 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
84710 + *
84711 + * BUGS:
84712 + * - none known
84713 + */
84714 +#include "gcc-plugin.h"
84715 +#include "config.h"
84716 +#include "system.h"
84717 +#include "coretypes.h"
84718 +#include "tree.h"
84719 +#include "tree-pass.h"
84720 +#include "flags.h"
84721 +#include "intl.h"
84722 +#include "toplev.h"
84723 +#include "plugin.h"
84724 +//#include "expr.h" where are you...
84725 +#include "diagnostic.h"
84726 +#include "plugin-version.h"
84727 +#include "tm.h"
84728 +#include "function.h"
84729 +#include "basic-block.h"
84730 +#include "gimple.h"
84731 +#include "rtl.h"
84732 +#include "emit-rtl.h"
84733 +#include "tree-flow.h"
84734 +#include "target.h"
84735 +
84736 +extern void c_register_addr_space (const char *str, addr_space_t as);
84737 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
84738 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
84739 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
84740 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
84741 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
84742 +
84743 +extern void print_gimple_stmt(FILE *, gimple, int, int);
84744 +extern rtx emit_move_insn(rtx x, rtx y);
84745 +
84746 +int plugin_is_GPL_compatible;
84747 +
84748 +static struct plugin_info checker_plugin_info = {
84749 + .version = "201111150100",
84750 +};
84751 +
84752 +#define ADDR_SPACE_KERNEL 0
84753 +#define ADDR_SPACE_FORCE_KERNEL 1
84754 +#define ADDR_SPACE_USER 2
84755 +#define ADDR_SPACE_FORCE_USER 3
84756 +#define ADDR_SPACE_IOMEM 0
84757 +#define ADDR_SPACE_FORCE_IOMEM 0
84758 +#define ADDR_SPACE_PERCPU 0
84759 +#define ADDR_SPACE_FORCE_PERCPU 0
84760 +#define ADDR_SPACE_RCU 0
84761 +#define ADDR_SPACE_FORCE_RCU 0
84762 +
84763 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
84764 +{
84765 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
84766 +}
84767 +
84768 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
84769 +{
84770 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
84771 +}
84772 +
84773 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
84774 +{
84775 + return default_addr_space_valid_pointer_mode(mode, as);
84776 +}
84777 +
84778 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
84779 +{
84780 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
84781 +}
84782 +
84783 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
84784 +{
84785 + return default_addr_space_legitimize_address(x, oldx, mode, as);
84786 +}
84787 +
84788 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
84789 +{
84790 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
84791 + return true;
84792 +
84793 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
84794 + return true;
84795 +
84796 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
84797 + return true;
84798 +
84799 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
84800 + return true;
84801 +
84802 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
84803 + return true;
84804 +
84805 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
84806 + return true;
84807 +
84808 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
84809 + return true;
84810 +
84811 + return subset == superset;
84812 +}
84813 +
84814 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
84815 +{
84816 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
84817 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
84818 +
84819 + return op;
84820 +}
84821 +
84822 +static void register_checker_address_spaces(void *event_data, void *data)
84823 +{
84824 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
84825 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
84826 + c_register_addr_space("__user", ADDR_SPACE_USER);
84827 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
84828 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
84829 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
84830 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
84831 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
84832 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
84833 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
84834 +
84835 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
84836 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
84837 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
84838 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
84839 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
84840 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
84841 + targetm.addr_space.convert = checker_addr_space_convert;
84842 +}
84843 +
84844 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
84845 +{
84846 + const char * const plugin_name = plugin_info->base_name;
84847 + const int argc = plugin_info->argc;
84848 + const struct plugin_argument * const argv = plugin_info->argv;
84849 + int i;
84850 +
84851 + if (!plugin_default_version_check(version, &gcc_version)) {
84852 + error(G_("incompatible gcc/plugin versions"));
84853 + return 1;
84854 + }
84855 +
84856 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
84857 +
84858 + for (i = 0; i < argc; ++i)
84859 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
84860 +
84861 + if (TARGET_64BIT == 0)
84862 + return 0;
84863 +
84864 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
84865 +
84866 + return 0;
84867 +}
84868 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
84869 new file mode 100644
84870 index 0000000..846aeb0
84871 --- /dev/null
84872 +++ b/tools/gcc/colorize_plugin.c
84873 @@ -0,0 +1,148 @@
84874 +/*
84875 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
84876 + * Licensed under the GPL v2
84877 + *
84878 + * Note: the choice of the license means that the compilation process is
84879 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
84880 + * but for the kernel it doesn't matter since it doesn't link against
84881 + * any of the gcc libraries
84882 + *
84883 + * gcc plugin to colorize diagnostic output
84884 + *
84885 + */
84886 +
84887 +#include "gcc-plugin.h"
84888 +#include "config.h"
84889 +#include "system.h"
84890 +#include "coretypes.h"
84891 +#include "tree.h"
84892 +#include "tree-pass.h"
84893 +#include "flags.h"
84894 +#include "intl.h"
84895 +#include "toplev.h"
84896 +#include "plugin.h"
84897 +#include "diagnostic.h"
84898 +#include "plugin-version.h"
84899 +#include "tm.h"
84900 +
84901 +int plugin_is_GPL_compatible;
84902 +
84903 +static struct plugin_info colorize_plugin_info = {
84904 + .version = "201203092200",
84905 + .help = NULL,
84906 +};
84907 +
84908 +#define GREEN "\033[32m\033[2m"
84909 +#define LIGHTGREEN "\033[32m\033[1m"
84910 +#define YELLOW "\033[33m\033[2m"
84911 +#define LIGHTYELLOW "\033[33m\033[1m"
84912 +#define RED "\033[31m\033[2m"
84913 +#define LIGHTRED "\033[31m\033[1m"
84914 +#define BLUE "\033[34m\033[2m"
84915 +#define LIGHTBLUE "\033[34m\033[1m"
84916 +#define BRIGHT "\033[m\033[1m"
84917 +#define NORMAL "\033[m"
84918 +
84919 +static diagnostic_starter_fn old_starter;
84920 +static diagnostic_finalizer_fn old_finalizer;
84921 +
84922 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
84923 +{
84924 + const char *color;
84925 + char *newprefix;
84926 +
84927 + switch (diagnostic->kind) {
84928 + case DK_NOTE:
84929 + color = LIGHTBLUE;
84930 + break;
84931 +
84932 + case DK_PEDWARN:
84933 + case DK_WARNING:
84934 + color = LIGHTYELLOW;
84935 + break;
84936 +
84937 + case DK_ERROR:
84938 + case DK_FATAL:
84939 + case DK_ICE:
84940 + case DK_PERMERROR:
84941 + case DK_SORRY:
84942 + color = LIGHTRED;
84943 + break;
84944 +
84945 + default:
84946 + color = NORMAL;
84947 + }
84948 +
84949 + old_starter(context, diagnostic);
84950 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
84951 + return;
84952 + pp_destroy_prefix(context->printer);
84953 + pp_set_prefix(context->printer, newprefix);
84954 +}
84955 +
84956 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
84957 +{
84958 + old_finalizer(context, diagnostic);
84959 +}
84960 +
84961 +static void colorize_arm(void)
84962 +{
84963 + old_starter = diagnostic_starter(global_dc);
84964 + old_finalizer = diagnostic_finalizer(global_dc);
84965 +
84966 + diagnostic_starter(global_dc) = start_colorize;
84967 + diagnostic_finalizer(global_dc) = finalize_colorize;
84968 +}
84969 +
84970 +static unsigned int execute_colorize_rearm(void)
84971 +{
84972 + if (diagnostic_starter(global_dc) == start_colorize)
84973 + return 0;
84974 +
84975 + colorize_arm();
84976 + return 0;
84977 +}
84978 +
84979 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
84980 + .pass = {
84981 + .type = SIMPLE_IPA_PASS,
84982 + .name = "colorize_rearm",
84983 + .gate = NULL,
84984 + .execute = execute_colorize_rearm,
84985 + .sub = NULL,
84986 + .next = NULL,
84987 + .static_pass_number = 0,
84988 + .tv_id = TV_NONE,
84989 + .properties_required = 0,
84990 + .properties_provided = 0,
84991 + .properties_destroyed = 0,
84992 + .todo_flags_start = 0,
84993 + .todo_flags_finish = 0
84994 + }
84995 +};
84996 +
84997 +static void colorize_start_unit(void *gcc_data, void *user_data)
84998 +{
84999 + colorize_arm();
85000 +}
85001 +
85002 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85003 +{
85004 + const char * const plugin_name = plugin_info->base_name;
85005 + struct register_pass_info colorize_rearm_pass_info = {
85006 + .pass = &pass_ipa_colorize_rearm.pass,
85007 + .reference_pass_name = "*free_lang_data",
85008 + .ref_pass_instance_number = 1,
85009 + .pos_op = PASS_POS_INSERT_AFTER
85010 + };
85011 +
85012 + if (!plugin_default_version_check(version, &gcc_version)) {
85013 + error(G_("incompatible gcc/plugin versions"));
85014 + return 1;
85015 + }
85016 +
85017 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
85018 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
85019 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
85020 + return 0;
85021 +}
85022 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
85023 new file mode 100644
85024 index 0000000..92ed719
85025 --- /dev/null
85026 +++ b/tools/gcc/constify_plugin.c
85027 @@ -0,0 +1,331 @@
85028 +/*
85029 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
85030 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
85031 + * Licensed under the GPL v2, or (at your option) v3
85032 + *
85033 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
85034 + *
85035 + * Homepage:
85036 + * http://www.grsecurity.net/~ephox/const_plugin/
85037 + *
85038 + * Usage:
85039 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
85040 + * $ gcc -fplugin=constify_plugin.so test.c -O2
85041 + */
85042 +
85043 +#include "gcc-plugin.h"
85044 +#include "config.h"
85045 +#include "system.h"
85046 +#include "coretypes.h"
85047 +#include "tree.h"
85048 +#include "tree-pass.h"
85049 +#include "flags.h"
85050 +#include "intl.h"
85051 +#include "toplev.h"
85052 +#include "plugin.h"
85053 +#include "diagnostic.h"
85054 +#include "plugin-version.h"
85055 +#include "tm.h"
85056 +#include "function.h"
85057 +#include "basic-block.h"
85058 +#include "gimple.h"
85059 +#include "rtl.h"
85060 +#include "emit-rtl.h"
85061 +#include "tree-flow.h"
85062 +
85063 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
85064 +
85065 +int plugin_is_GPL_compatible;
85066 +
85067 +static struct plugin_info const_plugin_info = {
85068 + .version = "201205300030",
85069 + .help = "no-constify\tturn off constification\n",
85070 +};
85071 +
85072 +static void deconstify_tree(tree node);
85073 +
85074 +static void deconstify_type(tree type)
85075 +{
85076 + tree field;
85077 +
85078 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
85079 + tree type = TREE_TYPE(field);
85080 +
85081 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
85082 + continue;
85083 + if (!TYPE_READONLY(type))
85084 + continue;
85085 +
85086 + deconstify_tree(field);
85087 + }
85088 + TYPE_READONLY(type) = 0;
85089 + C_TYPE_FIELDS_READONLY(type) = 0;
85090 +}
85091 +
85092 +static void deconstify_tree(tree node)
85093 +{
85094 + tree old_type, new_type, field;
85095 +
85096 + old_type = TREE_TYPE(node);
85097 +
85098 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
85099 +
85100 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
85101 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
85102 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
85103 + DECL_FIELD_CONTEXT(field) = new_type;
85104 +
85105 + deconstify_type(new_type);
85106 +
85107 + TREE_READONLY(node) = 0;
85108 + TREE_TYPE(node) = new_type;
85109 +}
85110 +
85111 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
85112 +{
85113 + tree type;
85114 +
85115 + *no_add_attrs = true;
85116 + if (TREE_CODE(*node) == FUNCTION_DECL) {
85117 + error("%qE attribute does not apply to functions", name);
85118 + return NULL_TREE;
85119 + }
85120 +
85121 + if (TREE_CODE(*node) == VAR_DECL) {
85122 + error("%qE attribute does not apply to variables", name);
85123 + return NULL_TREE;
85124 + }
85125 +
85126 + if (TYPE_P(*node)) {
85127 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
85128 + *no_add_attrs = false;
85129 + else
85130 + error("%qE attribute applies to struct and union types only", name);
85131 + return NULL_TREE;
85132 + }
85133 +
85134 + type = TREE_TYPE(*node);
85135 +
85136 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
85137 + error("%qE attribute applies to struct and union types only", name);
85138 + return NULL_TREE;
85139 + }
85140 +
85141 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
85142 + error("%qE attribute is already applied to the type", name);
85143 + return NULL_TREE;
85144 + }
85145 +
85146 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
85147 + error("%qE attribute used on type that is not constified", name);
85148 + return NULL_TREE;
85149 + }
85150 +
85151 + if (TREE_CODE(*node) == TYPE_DECL) {
85152 + deconstify_tree(*node);
85153 + return NULL_TREE;
85154 + }
85155 +
85156 + return NULL_TREE;
85157 +}
85158 +
85159 +static void constify_type(tree type)
85160 +{
85161 + TYPE_READONLY(type) = 1;
85162 + C_TYPE_FIELDS_READONLY(type) = 1;
85163 +}
85164 +
85165 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
85166 +{
85167 + *no_add_attrs = true;
85168 + if (!TYPE_P(*node)) {
85169 + error("%qE attribute applies to types only", name);
85170 + return NULL_TREE;
85171 + }
85172 +
85173 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
85174 + error("%qE attribute applies to struct and union types only", name);
85175 + return NULL_TREE;
85176 + }
85177 +
85178 + *no_add_attrs = false;
85179 + constify_type(*node);
85180 + return NULL_TREE;
85181 +}
85182 +
85183 +static struct attribute_spec no_const_attr = {
85184 + .name = "no_const",
85185 + .min_length = 0,
85186 + .max_length = 0,
85187 + .decl_required = false,
85188 + .type_required = false,
85189 + .function_type_required = false,
85190 + .handler = handle_no_const_attribute,
85191 +#if BUILDING_GCC_VERSION >= 4007
85192 + .affects_type_identity = true
85193 +#endif
85194 +};
85195 +
85196 +static struct attribute_spec do_const_attr = {
85197 + .name = "do_const",
85198 + .min_length = 0,
85199 + .max_length = 0,
85200 + .decl_required = false,
85201 + .type_required = false,
85202 + .function_type_required = false,
85203 + .handler = handle_do_const_attribute,
85204 +#if BUILDING_GCC_VERSION >= 4007
85205 + .affects_type_identity = true
85206 +#endif
85207 +};
85208 +
85209 +static void register_attributes(void *event_data, void *data)
85210 +{
85211 + register_attribute(&no_const_attr);
85212 + register_attribute(&do_const_attr);
85213 +}
85214 +
85215 +static bool is_fptr(tree field)
85216 +{
85217 + tree ptr = TREE_TYPE(field);
85218 +
85219 + if (TREE_CODE(ptr) != POINTER_TYPE)
85220 + return false;
85221 +
85222 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
85223 +}
85224 +
85225 +static bool walk_struct(tree node)
85226 +{
85227 + tree field;
85228 +
85229 + if (TYPE_FIELDS(node) == NULL_TREE)
85230 + return false;
85231 +
85232 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
85233 + gcc_assert(!TYPE_READONLY(node));
85234 + deconstify_type(node);
85235 + return false;
85236 + }
85237 +
85238 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
85239 + tree type = TREE_TYPE(field);
85240 + enum tree_code code = TREE_CODE(type);
85241 +
85242 + if (node == type)
85243 + return false;
85244 + if (code == RECORD_TYPE || code == UNION_TYPE) {
85245 + if (!(walk_struct(type)))
85246 + return false;
85247 + } else if (!is_fptr(field) && !TREE_READONLY(field))
85248 + return false;
85249 + }
85250 + return true;
85251 +}
85252 +
85253 +static void finish_type(void *event_data, void *data)
85254 +{
85255 + tree type = (tree)event_data;
85256 +
85257 + if (type == NULL_TREE || type == error_mark_node)
85258 + return;
85259 +
85260 + if (TYPE_READONLY(type))
85261 + return;
85262 +
85263 + if (walk_struct(type))
85264 + constify_type(type);
85265 +}
85266 +
85267 +static unsigned int check_local_variables(void);
85268 +
85269 +struct gimple_opt_pass pass_local_variable = {
85270 + {
85271 + .type = GIMPLE_PASS,
85272 + .name = "check_local_variables",
85273 + .gate = NULL,
85274 + .execute = check_local_variables,
85275 + .sub = NULL,
85276 + .next = NULL,
85277 + .static_pass_number = 0,
85278 + .tv_id = TV_NONE,
85279 + .properties_required = 0,
85280 + .properties_provided = 0,
85281 + .properties_destroyed = 0,
85282 + .todo_flags_start = 0,
85283 + .todo_flags_finish = 0
85284 + }
85285 +};
85286 +
85287 +static unsigned int check_local_variables(void)
85288 +{
85289 + tree var;
85290 + referenced_var_iterator rvi;
85291 +
85292 +#if BUILDING_GCC_VERSION == 4005
85293 + FOR_EACH_REFERENCED_VAR(var, rvi) {
85294 +#else
85295 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
85296 +#endif
85297 + tree type = TREE_TYPE(var);
85298 +
85299 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
85300 + continue;
85301 +
85302 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
85303 + continue;
85304 +
85305 + if (!TYPE_READONLY(type))
85306 + continue;
85307 +
85308 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
85309 +// continue;
85310 +
85311 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
85312 +// continue;
85313 +
85314 + if (walk_struct(type)) {
85315 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
85316 + return 1;
85317 + }
85318 + }
85319 + return 0;
85320 +}
85321 +
85322 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85323 +{
85324 + const char * const plugin_name = plugin_info->base_name;
85325 + const int argc = plugin_info->argc;
85326 + const struct plugin_argument * const argv = plugin_info->argv;
85327 + int i;
85328 + bool constify = true;
85329 +
85330 + struct register_pass_info local_variable_pass_info = {
85331 + .pass = &pass_local_variable.pass,
85332 + .reference_pass_name = "*referenced_vars",
85333 + .ref_pass_instance_number = 1,
85334 + .pos_op = PASS_POS_INSERT_AFTER
85335 + };
85336 +
85337 + if (!plugin_default_version_check(version, &gcc_version)) {
85338 + error(G_("incompatible gcc/plugin versions"));
85339 + return 1;
85340 + }
85341 +
85342 + for (i = 0; i < argc; ++i) {
85343 + if (!(strcmp(argv[i].key, "no-constify"))) {
85344 + constify = false;
85345 + continue;
85346 + }
85347 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85348 + }
85349 +
85350 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
85351 + if (constify) {
85352 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
85353 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
85354 + }
85355 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
85356 +
85357 + return 0;
85358 +}
85359 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
85360 new file mode 100644
85361 index 0000000..e518932
85362 --- /dev/null
85363 +++ b/tools/gcc/generate_size_overflow_hash.sh
85364 @@ -0,0 +1,94 @@
85365 +#!/bin/bash
85366 +
85367 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
85368 +
85369 +header1="size_overflow_hash.h"
85370 +database="size_overflow_hash.data"
85371 +n=65536
85372 +
85373 +usage() {
85374 +cat <<EOF
85375 +usage: $0 options
85376 +OPTIONS:
85377 + -h|--help help
85378 + -o header file
85379 + -d database file
85380 + -n hash array size
85381 +EOF
85382 + return 0
85383 +}
85384 +
85385 +while true
85386 +do
85387 + case "$1" in
85388 + -h|--help) usage && exit 0;;
85389 + -n) n=$2; shift 2;;
85390 + -o) header1="$2"; shift 2;;
85391 + -d) database="$2"; shift 2;;
85392 + --) shift 1; break ;;
85393 + *) break ;;
85394 + esac
85395 +done
85396 +
85397 +create_defines() {
85398 + for i in `seq 0 31`
85399 + do
85400 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
85401 + done
85402 + echo >> "$header1"
85403 +}
85404 +
85405 +create_structs() {
85406 + rm -f "$header1"
85407 +
85408 + create_defines
85409 +
85410 + cat "$database" | while read data
85411 + do
85412 + data_array=($data)
85413 + struct_hash_name="${data_array[0]}"
85414 + funcn="${data_array[1]}"
85415 + params="${data_array[2]}"
85416 + next="${data_array[4]}"
85417 +
85418 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
85419 +
85420 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
85421 + echo -en "\t.param\t= " >> "$header1"
85422 + line=
85423 + for param_num in ${params//-/ };
85424 + do
85425 + line="${line}PARAM"$param_num"|"
85426 + done
85427 +
85428 + echo -e "${line%?},\n};\n" >> "$header1"
85429 + done
85430 +}
85431 +
85432 +create_headers() {
85433 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
85434 +}
85435 +
85436 +create_array_elements() {
85437 + index=0
85438 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
85439 + do
85440 + data_array=($data)
85441 + i="${data_array[3]}"
85442 + hash="${data_array[0]}"
85443 + while [[ $index -lt $i ]]
85444 + do
85445 + echo -e "\t["$index"]\t= NULL," >> "$header1"
85446 + index=$(($index + 1))
85447 + done
85448 + index=$(($index + 1))
85449 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
85450 + done
85451 + echo '};' >> $header1
85452 +}
85453 +
85454 +create_structs
85455 +create_headers
85456 +create_array_elements
85457 +
85458 +exit 0
85459 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
85460 new file mode 100644
85461 index 0000000..a86e422
85462 --- /dev/null
85463 +++ b/tools/gcc/kallocstat_plugin.c
85464 @@ -0,0 +1,167 @@
85465 +/*
85466 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85467 + * Licensed under the GPL v2
85468 + *
85469 + * Note: the choice of the license means that the compilation process is
85470 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85471 + * but for the kernel it doesn't matter since it doesn't link against
85472 + * any of the gcc libraries
85473 + *
85474 + * gcc plugin to find the distribution of k*alloc sizes
85475 + *
85476 + * TODO:
85477 + *
85478 + * BUGS:
85479 + * - none known
85480 + */
85481 +#include "gcc-plugin.h"
85482 +#include "config.h"
85483 +#include "system.h"
85484 +#include "coretypes.h"
85485 +#include "tree.h"
85486 +#include "tree-pass.h"
85487 +#include "flags.h"
85488 +#include "intl.h"
85489 +#include "toplev.h"
85490 +#include "plugin.h"
85491 +//#include "expr.h" where are you...
85492 +#include "diagnostic.h"
85493 +#include "plugin-version.h"
85494 +#include "tm.h"
85495 +#include "function.h"
85496 +#include "basic-block.h"
85497 +#include "gimple.h"
85498 +#include "rtl.h"
85499 +#include "emit-rtl.h"
85500 +
85501 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85502 +
85503 +int plugin_is_GPL_compatible;
85504 +
85505 +static const char * const kalloc_functions[] = {
85506 + "__kmalloc",
85507 + "kmalloc",
85508 + "kmalloc_large",
85509 + "kmalloc_node",
85510 + "kmalloc_order",
85511 + "kmalloc_order_trace",
85512 + "kmalloc_slab",
85513 + "kzalloc",
85514 + "kzalloc_node",
85515 +};
85516 +
85517 +static struct plugin_info kallocstat_plugin_info = {
85518 + .version = "201111150100",
85519 +};
85520 +
85521 +static unsigned int execute_kallocstat(void);
85522 +
85523 +static struct gimple_opt_pass kallocstat_pass = {
85524 + .pass = {
85525 + .type = GIMPLE_PASS,
85526 + .name = "kallocstat",
85527 + .gate = NULL,
85528 + .execute = execute_kallocstat,
85529 + .sub = NULL,
85530 + .next = NULL,
85531 + .static_pass_number = 0,
85532 + .tv_id = TV_NONE,
85533 + .properties_required = 0,
85534 + .properties_provided = 0,
85535 + .properties_destroyed = 0,
85536 + .todo_flags_start = 0,
85537 + .todo_flags_finish = 0
85538 + }
85539 +};
85540 +
85541 +static bool is_kalloc(const char *fnname)
85542 +{
85543 + size_t i;
85544 +
85545 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
85546 + if (!strcmp(fnname, kalloc_functions[i]))
85547 + return true;
85548 + return false;
85549 +}
85550 +
85551 +static unsigned int execute_kallocstat(void)
85552 +{
85553 + basic_block bb;
85554 +
85555 + // 1. loop through BBs and GIMPLE statements
85556 + FOR_EACH_BB(bb) {
85557 + gimple_stmt_iterator gsi;
85558 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85559 + // gimple match:
85560 + tree fndecl, size;
85561 + gimple call_stmt;
85562 + const char *fnname;
85563 +
85564 + // is it a call
85565 + call_stmt = gsi_stmt(gsi);
85566 + if (!is_gimple_call(call_stmt))
85567 + continue;
85568 + fndecl = gimple_call_fndecl(call_stmt);
85569 + if (fndecl == NULL_TREE)
85570 + continue;
85571 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
85572 + continue;
85573 +
85574 + // is it a call to k*alloc
85575 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
85576 + if (!is_kalloc(fnname))
85577 + continue;
85578 +
85579 + // is the size arg the result of a simple const assignment
85580 + size = gimple_call_arg(call_stmt, 0);
85581 + while (true) {
85582 + gimple def_stmt;
85583 + expanded_location xloc;
85584 + size_t size_val;
85585 +
85586 + if (TREE_CODE(size) != SSA_NAME)
85587 + break;
85588 + def_stmt = SSA_NAME_DEF_STMT(size);
85589 + if (!def_stmt || !is_gimple_assign(def_stmt))
85590 + break;
85591 + if (gimple_num_ops(def_stmt) != 2)
85592 + break;
85593 + size = gimple_assign_rhs1(def_stmt);
85594 + if (!TREE_CONSTANT(size))
85595 + continue;
85596 + xloc = expand_location(gimple_location(def_stmt));
85597 + if (!xloc.file)
85598 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
85599 + size_val = TREE_INT_CST_LOW(size);
85600 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
85601 + break;
85602 + }
85603 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
85604 +//debug_tree(gimple_call_fn(call_stmt));
85605 +//print_node(stderr, "pax", fndecl, 4);
85606 + }
85607 + }
85608 +
85609 + return 0;
85610 +}
85611 +
85612 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85613 +{
85614 + const char * const plugin_name = plugin_info->base_name;
85615 + struct register_pass_info kallocstat_pass_info = {
85616 + .pass = &kallocstat_pass.pass,
85617 + .reference_pass_name = "ssa",
85618 + .ref_pass_instance_number = 1,
85619 + .pos_op = PASS_POS_INSERT_AFTER
85620 + };
85621 +
85622 + if (!plugin_default_version_check(version, &gcc_version)) {
85623 + error(G_("incompatible gcc/plugin versions"));
85624 + return 1;
85625 + }
85626 +
85627 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
85628 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
85629 +
85630 + return 0;
85631 +}
85632 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
85633 new file mode 100644
85634 index 0000000..8856202
85635 --- /dev/null
85636 +++ b/tools/gcc/kernexec_plugin.c
85637 @@ -0,0 +1,432 @@
85638 +/*
85639 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85640 + * Licensed under the GPL v2
85641 + *
85642 + * Note: the choice of the license means that the compilation process is
85643 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85644 + * but for the kernel it doesn't matter since it doesn't link against
85645 + * any of the gcc libraries
85646 + *
85647 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
85648 + *
85649 + * TODO:
85650 + *
85651 + * BUGS:
85652 + * - none known
85653 + */
85654 +#include "gcc-plugin.h"
85655 +#include "config.h"
85656 +#include "system.h"
85657 +#include "coretypes.h"
85658 +#include "tree.h"
85659 +#include "tree-pass.h"
85660 +#include "flags.h"
85661 +#include "intl.h"
85662 +#include "toplev.h"
85663 +#include "plugin.h"
85664 +//#include "expr.h" where are you...
85665 +#include "diagnostic.h"
85666 +#include "plugin-version.h"
85667 +#include "tm.h"
85668 +#include "function.h"
85669 +#include "basic-block.h"
85670 +#include "gimple.h"
85671 +#include "rtl.h"
85672 +#include "emit-rtl.h"
85673 +#include "tree-flow.h"
85674 +
85675 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85676 +extern rtx emit_move_insn(rtx x, rtx y);
85677 +
85678 +#if BUILDING_GCC_VERSION <= 4006
85679 +#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
85680 +#endif
85681 +
85682 +int plugin_is_GPL_compatible;
85683 +
85684 +static struct plugin_info kernexec_plugin_info = {
85685 + .version = "201111291120",
85686 + .help = "method=[bts|or]\tinstrumentation method\n"
85687 +};
85688 +
85689 +static unsigned int execute_kernexec_reload(void);
85690 +static unsigned int execute_kernexec_fptr(void);
85691 +static unsigned int execute_kernexec_retaddr(void);
85692 +static bool kernexec_cmodel_check(void);
85693 +
85694 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
85695 +static void (*kernexec_instrument_retaddr)(rtx);
85696 +
85697 +static struct gimple_opt_pass kernexec_reload_pass = {
85698 + .pass = {
85699 + .type = GIMPLE_PASS,
85700 + .name = "kernexec_reload",
85701 + .gate = kernexec_cmodel_check,
85702 + .execute = execute_kernexec_reload,
85703 + .sub = NULL,
85704 + .next = NULL,
85705 + .static_pass_number = 0,
85706 + .tv_id = TV_NONE,
85707 + .properties_required = 0,
85708 + .properties_provided = 0,
85709 + .properties_destroyed = 0,
85710 + .todo_flags_start = 0,
85711 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
85712 + }
85713 +};
85714 +
85715 +static struct gimple_opt_pass kernexec_fptr_pass = {
85716 + .pass = {
85717 + .type = GIMPLE_PASS,
85718 + .name = "kernexec_fptr",
85719 + .gate = kernexec_cmodel_check,
85720 + .execute = execute_kernexec_fptr,
85721 + .sub = NULL,
85722 + .next = NULL,
85723 + .static_pass_number = 0,
85724 + .tv_id = TV_NONE,
85725 + .properties_required = 0,
85726 + .properties_provided = 0,
85727 + .properties_destroyed = 0,
85728 + .todo_flags_start = 0,
85729 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
85730 + }
85731 +};
85732 +
85733 +static struct rtl_opt_pass kernexec_retaddr_pass = {
85734 + .pass = {
85735 + .type = RTL_PASS,
85736 + .name = "kernexec_retaddr",
85737 + .gate = kernexec_cmodel_check,
85738 + .execute = execute_kernexec_retaddr,
85739 + .sub = NULL,
85740 + .next = NULL,
85741 + .static_pass_number = 0,
85742 + .tv_id = TV_NONE,
85743 + .properties_required = 0,
85744 + .properties_provided = 0,
85745 + .properties_destroyed = 0,
85746 + .todo_flags_start = 0,
85747 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
85748 + }
85749 +};
85750 +
85751 +static bool kernexec_cmodel_check(void)
85752 +{
85753 + tree section;
85754 +
85755 + if (ix86_cmodel != CM_KERNEL)
85756 + return false;
85757 +
85758 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
85759 + if (!section || !TREE_VALUE(section))
85760 + return true;
85761 +
85762 + section = TREE_VALUE(TREE_VALUE(section));
85763 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
85764 + return true;
85765 +
85766 + return false;
85767 +}
85768 +
85769 +/*
85770 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
85771 + */
85772 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
85773 +{
85774 + gimple asm_movabs_stmt;
85775 +
85776 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
85777 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
85778 + gimple_asm_set_volatile(asm_movabs_stmt, true);
85779 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
85780 + update_stmt(asm_movabs_stmt);
85781 +}
85782 +
85783 +/*
85784 + * find all asm() stmts that clobber r10 and add a reload of r10
85785 + */
85786 +static unsigned int execute_kernexec_reload(void)
85787 +{
85788 + basic_block bb;
85789 +
85790 + // 1. loop through BBs and GIMPLE statements
85791 + FOR_EACH_BB(bb) {
85792 + gimple_stmt_iterator gsi;
85793 +
85794 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85795 + // gimple match: __asm__ ("" : : : "r10");
85796 + gimple asm_stmt;
85797 + size_t nclobbers;
85798 +
85799 + // is it an asm ...
85800 + asm_stmt = gsi_stmt(gsi);
85801 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
85802 + continue;
85803 +
85804 + // ... clobbering r10
85805 + nclobbers = gimple_asm_nclobbers(asm_stmt);
85806 + while (nclobbers--) {
85807 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
85808 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
85809 + continue;
85810 + kernexec_reload_fptr_mask(&gsi);
85811 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
85812 + break;
85813 + }
85814 + }
85815 + }
85816 +
85817 + return 0;
85818 +}
85819 +
85820 +/*
85821 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
85822 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
85823 + */
85824 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
85825 +{
85826 + gimple assign_intptr, assign_new_fptr, call_stmt;
85827 + tree intptr, old_fptr, new_fptr, kernexec_mask;
85828 +
85829 + call_stmt = gsi_stmt(*gsi);
85830 + old_fptr = gimple_call_fn(call_stmt);
85831 +
85832 + // create temporary unsigned long variable used for bitops and cast fptr to it
85833 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
85834 + add_referenced_var(intptr);
85835 + mark_sym_for_renaming(intptr);
85836 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
85837 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
85838 + update_stmt(assign_intptr);
85839 +
85840 + // apply logical or to temporary unsigned long and bitmask
85841 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
85842 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
85843 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
85844 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
85845 + update_stmt(assign_intptr);
85846 +
85847 + // cast temporary unsigned long back to a temporary fptr variable
85848 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
85849 + add_referenced_var(new_fptr);
85850 + mark_sym_for_renaming(new_fptr);
85851 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
85852 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
85853 + update_stmt(assign_new_fptr);
85854 +
85855 + // replace call stmt fn with the new fptr
85856 + gimple_call_set_fn(call_stmt, new_fptr);
85857 + update_stmt(call_stmt);
85858 +}
85859 +
85860 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
85861 +{
85862 + gimple asm_or_stmt, call_stmt;
85863 + tree old_fptr, new_fptr, input, output;
85864 + VEC(tree, gc) *inputs = NULL;
85865 + VEC(tree, gc) *outputs = NULL;
85866 +
85867 + call_stmt = gsi_stmt(*gsi);
85868 + old_fptr = gimple_call_fn(call_stmt);
85869 +
85870 + // create temporary fptr variable
85871 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
85872 + add_referenced_var(new_fptr);
85873 + mark_sym_for_renaming(new_fptr);
85874 +
85875 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
85876 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
85877 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
85878 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
85879 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
85880 + VEC_safe_push(tree, gc, inputs, input);
85881 + VEC_safe_push(tree, gc, outputs, output);
85882 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
85883 + gimple_asm_set_volatile(asm_or_stmt, true);
85884 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
85885 + update_stmt(asm_or_stmt);
85886 +
85887 + // replace call stmt fn with the new fptr
85888 + gimple_call_set_fn(call_stmt, new_fptr);
85889 + update_stmt(call_stmt);
85890 +}
85891 +
85892 +/*
85893 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
85894 + */
85895 +static unsigned int execute_kernexec_fptr(void)
85896 +{
85897 + basic_block bb;
85898 +
85899 + // 1. loop through BBs and GIMPLE statements
85900 + FOR_EACH_BB(bb) {
85901 + gimple_stmt_iterator gsi;
85902 +
85903 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85904 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
85905 + tree fn;
85906 + gimple call_stmt;
85907 +
85908 + // is it a call ...
85909 + call_stmt = gsi_stmt(gsi);
85910 + if (!is_gimple_call(call_stmt))
85911 + continue;
85912 + fn = gimple_call_fn(call_stmt);
85913 + if (TREE_CODE(fn) == ADDR_EXPR)
85914 + continue;
85915 + if (TREE_CODE(fn) != SSA_NAME)
85916 + gcc_unreachable();
85917 +
85918 + // ... through a function pointer
85919 + fn = SSA_NAME_VAR(fn);
85920 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
85921 + continue;
85922 + fn = TREE_TYPE(fn);
85923 + if (TREE_CODE(fn) != POINTER_TYPE)
85924 + continue;
85925 + fn = TREE_TYPE(fn);
85926 + if (TREE_CODE(fn) != FUNCTION_TYPE)
85927 + continue;
85928 +
85929 + kernexec_instrument_fptr(&gsi);
85930 +
85931 +//debug_tree(gimple_call_fn(call_stmt));
85932 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
85933 + }
85934 + }
85935 +
85936 + return 0;
85937 +}
85938 +
85939 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
85940 +static void kernexec_instrument_retaddr_bts(rtx insn)
85941 +{
85942 + rtx btsq;
85943 + rtvec argvec, constraintvec, labelvec;
85944 + int line;
85945 +
85946 + // create asm volatile("btsq $63,(%%rsp)":::)
85947 + argvec = rtvec_alloc(0);
85948 + constraintvec = rtvec_alloc(0);
85949 + labelvec = rtvec_alloc(0);
85950 + line = expand_location(RTL_LOCATION(insn)).line;
85951 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
85952 + MEM_VOLATILE_P(btsq) = 1;
85953 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
85954 + emit_insn_before(btsq, insn);
85955 +}
85956 +
85957 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
85958 +static void kernexec_instrument_retaddr_or(rtx insn)
85959 +{
85960 + rtx orq;
85961 + rtvec argvec, constraintvec, labelvec;
85962 + int line;
85963 +
85964 + // create asm volatile("orq %%r10,(%%rsp)":::)
85965 + argvec = rtvec_alloc(0);
85966 + constraintvec = rtvec_alloc(0);
85967 + labelvec = rtvec_alloc(0);
85968 + line = expand_location(RTL_LOCATION(insn)).line;
85969 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
85970 + MEM_VOLATILE_P(orq) = 1;
85971 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
85972 + emit_insn_before(orq, insn);
85973 +}
85974 +
85975 +/*
85976 + * find all asm level function returns and forcibly set the highest bit of the return address
85977 + */
85978 +static unsigned int execute_kernexec_retaddr(void)
85979 +{
85980 + rtx insn;
85981 +
85982 + // 1. find function returns
85983 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
85984 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
85985 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
85986 + // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
85987 + rtx body;
85988 +
85989 + // is it a retn
85990 + if (!JUMP_P(insn))
85991 + continue;
85992 + body = PATTERN(insn);
85993 + if (GET_CODE(body) == PARALLEL)
85994 + body = XVECEXP(body, 0, 0);
85995 + if (!ANY_RETURN_P(body))
85996 + continue;
85997 + kernexec_instrument_retaddr(insn);
85998 + }
85999 +
86000 +// print_simple_rtl(stderr, get_insns());
86001 +// print_rtl(stderr, get_insns());
86002 +
86003 + return 0;
86004 +}
86005 +
86006 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86007 +{
86008 + const char * const plugin_name = plugin_info->base_name;
86009 + const int argc = plugin_info->argc;
86010 + const struct plugin_argument * const argv = plugin_info->argv;
86011 + int i;
86012 + struct register_pass_info kernexec_reload_pass_info = {
86013 + .pass = &kernexec_reload_pass.pass,
86014 + .reference_pass_name = "ssa",
86015 + .ref_pass_instance_number = 1,
86016 + .pos_op = PASS_POS_INSERT_AFTER
86017 + };
86018 + struct register_pass_info kernexec_fptr_pass_info = {
86019 + .pass = &kernexec_fptr_pass.pass,
86020 + .reference_pass_name = "ssa",
86021 + .ref_pass_instance_number = 1,
86022 + .pos_op = PASS_POS_INSERT_AFTER
86023 + };
86024 + struct register_pass_info kernexec_retaddr_pass_info = {
86025 + .pass = &kernexec_retaddr_pass.pass,
86026 + .reference_pass_name = "pro_and_epilogue",
86027 + .ref_pass_instance_number = 1,
86028 + .pos_op = PASS_POS_INSERT_AFTER
86029 + };
86030 +
86031 + if (!plugin_default_version_check(version, &gcc_version)) {
86032 + error(G_("incompatible gcc/plugin versions"));
86033 + return 1;
86034 + }
86035 +
86036 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
86037 +
86038 + if (TARGET_64BIT == 0)
86039 + return 0;
86040 +
86041 + for (i = 0; i < argc; ++i) {
86042 + if (!strcmp(argv[i].key, "method")) {
86043 + if (!argv[i].value) {
86044 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86045 + continue;
86046 + }
86047 + if (!strcmp(argv[i].value, "bts")) {
86048 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
86049 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
86050 + } else if (!strcmp(argv[i].value, "or")) {
86051 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
86052 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
86053 + fix_register("r10", 1, 1);
86054 + } else
86055 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
86056 + continue;
86057 + }
86058 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86059 + }
86060 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
86061 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
86062 +
86063 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
86064 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
86065 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
86066 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
86067 +
86068 + return 0;
86069 +}
86070 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
86071 new file mode 100644
86072 index 0000000..b8008f7
86073 --- /dev/null
86074 +++ b/tools/gcc/latent_entropy_plugin.c
86075 @@ -0,0 +1,295 @@
86076 +/*
86077 + * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
86078 + * Licensed under the GPL v2
86079 + *
86080 + * Note: the choice of the license means that the compilation process is
86081 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86082 + * but for the kernel it doesn't matter since it doesn't link against
86083 + * any of the gcc libraries
86084 + *
86085 + * gcc plugin to help generate a little bit of entropy from program state,
86086 + * used during boot in the kernel
86087 + *
86088 + * TODO:
86089 + * - add ipa pass to identify not explicitly marked candidate functions
86090 + * - mix in more program state (function arguments/return values, loop variables, etc)
86091 + * - more instrumentation control via attribute parameters
86092 + *
86093 + * BUGS:
86094 + * - LTO needs -flto-partition=none for now
86095 + */
86096 +#include "gcc-plugin.h"
86097 +#include "config.h"
86098 +#include "system.h"
86099 +#include "coretypes.h"
86100 +#include "tree.h"
86101 +#include "tree-pass.h"
86102 +#include "flags.h"
86103 +#include "intl.h"
86104 +#include "toplev.h"
86105 +#include "plugin.h"
86106 +//#include "expr.h" where are you...
86107 +#include "diagnostic.h"
86108 +#include "plugin-version.h"
86109 +#include "tm.h"
86110 +#include "function.h"
86111 +#include "basic-block.h"
86112 +#include "gimple.h"
86113 +#include "rtl.h"
86114 +#include "emit-rtl.h"
86115 +#include "tree-flow.h"
86116 +
86117 +int plugin_is_GPL_compatible;
86118 +
86119 +static tree latent_entropy_decl;
86120 +
86121 +static struct plugin_info latent_entropy_plugin_info = {
86122 + .version = "201207271820",
86123 + .help = NULL
86124 +};
86125 +
86126 +static unsigned int execute_latent_entropy(void);
86127 +static bool gate_latent_entropy(void);
86128 +
86129 +static struct gimple_opt_pass latent_entropy_pass = {
86130 + .pass = {
86131 + .type = GIMPLE_PASS,
86132 + .name = "latent_entropy",
86133 + .gate = gate_latent_entropy,
86134 + .execute = execute_latent_entropy,
86135 + .sub = NULL,
86136 + .next = NULL,
86137 + .static_pass_number = 0,
86138 + .tv_id = TV_NONE,
86139 + .properties_required = PROP_gimple_leh | PROP_cfg,
86140 + .properties_provided = 0,
86141 + .properties_destroyed = 0,
86142 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
86143 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
86144 + }
86145 +};
86146 +
86147 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86148 +{
86149 + if (TREE_CODE(*node) != FUNCTION_DECL) {
86150 + *no_add_attrs = true;
86151 + error("%qE attribute only applies to functions", name);
86152 + }
86153 + return NULL_TREE;
86154 +}
86155 +
86156 +static struct attribute_spec latent_entropy_attr = {
86157 + .name = "latent_entropy",
86158 + .min_length = 0,
86159 + .max_length = 0,
86160 + .decl_required = true,
86161 + .type_required = false,
86162 + .function_type_required = false,
86163 + .handler = handle_latent_entropy_attribute,
86164 +#if BUILDING_GCC_VERSION >= 4007
86165 + .affects_type_identity = false
86166 +#endif
86167 +};
86168 +
86169 +static void register_attributes(void *event_data, void *data)
86170 +{
86171 + register_attribute(&latent_entropy_attr);
86172 +}
86173 +
86174 +static bool gate_latent_entropy(void)
86175 +{
86176 + tree latent_entropy_attr;
86177 +
86178 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
86179 + return latent_entropy_attr != NULL_TREE;
86180 +}
86181 +
86182 +static unsigned HOST_WIDE_INT seed;
86183 +static unsigned HOST_WIDE_INT get_random_const(void)
86184 +{
86185 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
86186 + return seed;
86187 +}
86188 +
86189 +static enum tree_code get_op(tree *rhs)
86190 +{
86191 + static enum tree_code op;
86192 + unsigned HOST_WIDE_INT random_const;
86193 +
86194 + random_const = get_random_const();
86195 +
86196 + switch (op) {
86197 + case BIT_XOR_EXPR:
86198 + op = PLUS_EXPR;
86199 + break;
86200 +
86201 + case PLUS_EXPR:
86202 + if (rhs) {
86203 + op = LROTATE_EXPR;
86204 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
86205 + break;
86206 + }
86207 +
86208 + case LROTATE_EXPR:
86209 + default:
86210 + op = BIT_XOR_EXPR;
86211 + break;
86212 + }
86213 + if (rhs)
86214 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
86215 + return op;
86216 +}
86217 +
86218 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
86219 +{
86220 + gimple_stmt_iterator gsi;
86221 + gimple assign;
86222 + tree addxorrol, rhs;
86223 + enum tree_code op;
86224 +
86225 + op = get_op(&rhs);
86226 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
86227 + assign = gimple_build_assign(local_entropy, addxorrol);
86228 + find_referenced_vars_in(assign);
86229 +//debug_bb(bb);
86230 + gsi = gsi_after_labels(bb);
86231 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
86232 + update_stmt(assign);
86233 +}
86234 +
86235 +static void perturb_latent_entropy(basic_block bb, tree rhs)
86236 +{
86237 + gimple_stmt_iterator gsi;
86238 + gimple assign;
86239 + tree addxorrol, temp;
86240 +
86241 + // 1. create temporary copy of latent_entropy
86242 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
86243 + add_referenced_var(temp);
86244 + mark_sym_for_renaming(temp);
86245 +
86246 + // 2. read...
86247 + assign = gimple_build_assign(temp, latent_entropy_decl);
86248 + find_referenced_vars_in(assign);
86249 + gsi = gsi_after_labels(bb);
86250 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
86251 + update_stmt(assign);
86252 +
86253 + // 3. ...modify...
86254 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
86255 + assign = gimple_build_assign(temp, addxorrol);
86256 + find_referenced_vars_in(assign);
86257 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
86258 + update_stmt(assign);
86259 +
86260 + // 4. ...write latent_entropy
86261 + assign = gimple_build_assign(latent_entropy_decl, temp);
86262 + find_referenced_vars_in(assign);
86263 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
86264 + update_stmt(assign);
86265 +}
86266 +
86267 +static unsigned int execute_latent_entropy(void)
86268 +{
86269 + basic_block bb;
86270 + gimple assign;
86271 + gimple_stmt_iterator gsi;
86272 + tree local_entropy;
86273 +
86274 + if (!latent_entropy_decl) {
86275 + struct varpool_node *node;
86276 +
86277 + for (node = varpool_nodes; node; node = node->next) {
86278 + tree var = node->decl;
86279 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
86280 + continue;
86281 + latent_entropy_decl = var;
86282 +// debug_tree(var);
86283 + break;
86284 + }
86285 + if (!latent_entropy_decl) {
86286 +// debug_tree(current_function_decl);
86287 + return 0;
86288 + }
86289 + }
86290 +
86291 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
86292 +
86293 + // 1. create local entropy variable
86294 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
86295 + add_referenced_var(local_entropy);
86296 + mark_sym_for_renaming(local_entropy);
86297 +
86298 + // 2. initialize local entropy variable
86299 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
86300 + if (dom_info_available_p(CDI_DOMINATORS))
86301 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
86302 + gsi = gsi_start_bb(bb);
86303 +
86304 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
86305 +// gimple_set_location(assign, loc);
86306 + find_referenced_vars_in(assign);
86307 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
86308 + update_stmt(assign);
86309 + bb = bb->next_bb;
86310 +
86311 + // 3. instrument each BB with an operation on the local entropy variable
86312 + while (bb != EXIT_BLOCK_PTR) {
86313 + perturb_local_entropy(bb, local_entropy);
86314 + bb = bb->next_bb;
86315 + };
86316 +
86317 + // 4. mix local entropy into the global entropy variable
86318 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
86319 + return 0;
86320 +}
86321 +
86322 +static void start_unit_callback(void *gcc_data, void *user_data)
86323 +{
86324 +#if BUILDING_GCC_VERSION >= 4007
86325 + seed = get_random_seed(false);
86326 +#else
86327 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
86328 + seed *= seed;
86329 +#endif
86330 +
86331 + if (in_lto_p)
86332 + return;
86333 +
86334 + // extern u64 latent_entropy
86335 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
86336 +
86337 + TREE_STATIC(latent_entropy_decl) = 1;
86338 + TREE_PUBLIC(latent_entropy_decl) = 1;
86339 + TREE_USED(latent_entropy_decl) = 1;
86340 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
86341 + DECL_EXTERNAL(latent_entropy_decl) = 1;
86342 + DECL_ARTIFICIAL(latent_entropy_decl) = 0;
86343 + DECL_INITIAL(latent_entropy_decl) = NULL;
86344 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
86345 +// varpool_finalize_decl(latent_entropy_decl);
86346 +// varpool_mark_needed_node(latent_entropy_decl);
86347 +}
86348 +
86349 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86350 +{
86351 + const char * const plugin_name = plugin_info->base_name;
86352 + struct register_pass_info latent_entropy_pass_info = {
86353 + .pass = &latent_entropy_pass.pass,
86354 + .reference_pass_name = "optimized",
86355 + .ref_pass_instance_number = 1,
86356 + .pos_op = PASS_POS_INSERT_BEFORE
86357 + };
86358 +
86359 + if (!plugin_default_version_check(version, &gcc_version)) {
86360 + error(G_("incompatible gcc/plugin versions"));
86361 + return 1;
86362 + }
86363 +
86364 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
86365 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
86366 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
86367 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
86368 +
86369 + return 0;
86370 +}
86371 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
86372 new file mode 100644
86373 index 0000000..5921fd7
86374 --- /dev/null
86375 +++ b/tools/gcc/size_overflow_hash.data
86376 @@ -0,0 +1,3713 @@
86377 +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
86378 +ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
86379 +batadv_orig_node_del_if_4 batadv_orig_node_del_if 2 4 NULL
86380 +ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
86381 +xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
86382 +recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
86383 +sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
86384 +rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
86385 +diva_os_malloc_16406 diva_os_malloc 2 16406 NULL
86386 +compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
86387 +xfs_dir2_leaf_getdents_23841 xfs_dir2_leaf_getdents 3 23841 NULL
86388 +ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
86389 +carl9170_alloc_27 carl9170_alloc 1 27 NULL
86390 +dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
86391 +create_log_8225 create_log 2 8225 NULL
86392 +ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
86393 +rproc_name_read_32805 rproc_name_read 3 32805 NULL
86394 +rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
86395 +mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
86396 +il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL
86397 +sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
86398 +padzero_55 padzero 1 55 &sel_read_policyvers_55
86399 +cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
86400 +alloc_wr_24635 alloc_wr 1-2 24635 NULL
86401 +read_file_blob_57406 read_file_blob 3 57406 NULL
86402 +add_rx_skb_8257 add_rx_skb 3 8257 NULL
86403 +enclosure_register_57412 enclosure_register 3 57412 NULL
86404 +t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
86405 +_req_append_segment_41031 _req_append_segment 2 41031 NULL
86406 +gre_manip_pkt_57416 gre_manip_pkt 4 57416 NULL
86407 +netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
86408 +mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
86409 +DepcaSignature_80 DepcaSignature 2 80 NULL nohasharray
86410 +crypto_authenc_setkey_80 crypto_authenc_setkey 3 80 &DepcaSignature_80
86411 +init_cdev_8274 init_cdev 1 8274 NULL
86412 +shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
86413 +compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL nohasharray
86414 +alloc_ftrace_hash_57431 alloc_ftrace_hash 1 57431 &compat_keyctl_instantiate_key_iov_57431
86415 +copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
86416 +rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
86417 +snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
86418 +load_msg_95 load_msg 2 95 NULL
86419 +rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
86420 +new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
86421 +acpi_tb_check_xsdt_21862 acpi_tb_check_xsdt 1 21862 NULL
86422 +sys_pselect6_57449 sys_pselect6 1 57449 NULL
86423 +biovec_create_pools_9575 biovec_create_pools 2 9575 NULL
86424 +ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
86425 +tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
86426 +ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 NULL nohasharray
86427 +cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 &ath6kl_usb_submit_ctrl_in_32880
86428 +cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
86429 +send_midi_async_57463 send_midi_async 3 57463 NULL
86430 +sisusb_clear_vram_57466 sisusb_clear_vram 3-2 57466 NULL
86431 +ath6kl_usb_post_recv_transfers_32892 ath6kl_usb_post_recv_transfers 2 32892 NULL
86432 +ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL nohasharray
86433 +sep_lock_user_pages_57470 sep_lock_user_pages 2-3 57470 &ieee80211_if_read_flags_57470
86434 +rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
86435 +construct_key_and_link_8321 construct_key_and_link 4 8321 NULL
86436 +ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
86437 +init_q_132 init_q 4 132 NULL
86438 +roccat_read_41093 roccat_read 3 41093 NULL nohasharray
86439 +nvme_map_user_pages_41093 nvme_map_user_pages 3-4 41093 &roccat_read_41093
86440 +ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
86441 +unifi_net_data_malloc_24716 unifi_net_data_malloc 3 24716 NULL
86442 +memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
86443 +il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
86444 +uio_read_49300 uio_read 3 49300 NULL
86445 +f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
86446 +tracing_trace_options_write_153 tracing_trace_options_write 3 153 NULL
86447 +bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
86448 +firmwareUpload_32794 firmwareUpload 3 32794 NULL
86449 +simple_attr_read_24738 simple_attr_read 3 24738 NULL
86450 +play_iframe_8219 play_iframe 3 8219 NULL
86451 +qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
86452 +ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
86453 +ath_rxbuf_alloc_24745 ath_rxbuf_alloc 2 24745 NULL
86454 +nvme_create_queue_170 nvme_create_queue 3 170 NULL
86455 +init_tag_map_57515 init_tag_map 3 57515 NULL
86456 +il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 NULL
86457 +srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-3-4 49330 NULL
86458 +kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
86459 +lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
86460 +xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
86461 +DoC_Probe_57534 DoC_Probe 1 57534 NULL
86462 +cfpkt_setlen_49343 cfpkt_setlen 2 49343 NULL
86463 +agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
86464 +mI_alloc_skb_24770 mI_alloc_skb 1 24770 NULL
86465 +iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
86466 +virtblk_add_req_197 virtblk_add_req 2-3 197 NULL
86467 +il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
86468 +rds_tcp_data_recv_53476 rds_tcp_data_recv 3 53476 NULL
86469 +xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
86470 +skb_make_writable_24783 skb_make_writable 2 24783 NULL
86471 +datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
86472 +dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
86473 +cache_read_24790 cache_read 3 24790 NULL
86474 +px_raw_event_49371 px_raw_event 4 49371 NULL
86475 +tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
86476 +compat_filldir_32999 compat_filldir 3 32999 NULL
86477 +hci_si_event_1404 hci_si_event 3 1404 NULL
86478 +compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
86479 +dfs_file_write_41196 dfs_file_write 3 41196 NULL
86480 +afs_cell_create_27346 afs_cell_create 2 27346 NULL
86481 +iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
86482 +applesmc_create_nodes_49392 applesmc_create_nodes 2 49392 NULL
86483 +snd_usb_ctl_msg_8436 snd_usb_ctl_msg 8 8436 NULL
86484 +comedi_buf_alloc_24822 comedi_buf_alloc 3 24822 NULL
86485 +rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
86486 +tnode_alloc_49407 tnode_alloc 1 49407 NULL
86487 +tun_alloc_skb_41216 tun_alloc_skb 2-4-3 41216 NULL
86488 +proc_scsi_write_proc_267 proc_scsi_write_proc 3 267 NULL
86489 +__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
86490 +sk_stream_alloc_skb_57622 sk_stream_alloc_skb 2 57622 NULL
86491 +tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL
86492 +iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
86493 +mfd_add_devices_16668 mfd_add_devices 4 16668 NULL
86494 +packet_recv_error_16669 packet_recv_error 3 16669 NULL
86495 +osdmap_set_max_osd_57630 osdmap_set_max_osd 2 57630 NULL nohasharray
86496 +sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 &osdmap_set_max_osd_57630
86497 +mem_read_57631 mem_read 3 57631 NULL
86498 +afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
86499 +ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL
86500 +read_file_war_stats_292 read_file_war_stats 3 292 NULL
86501 +pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
86502 +l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
86503 +hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
86504 +stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
86505 +sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
86506 +queues_read_24877 queues_read 3 24877 NULL
86507 +__fprog_create_41263 __fprog_create 2 41263 NULL
86508 +syslog_print_307 syslog_print 2 307 NULL
86509 +platform_device_add_data_310 platform_device_add_data 3 310 NULL
86510 +agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
86511 +dn_setsockopt_314 dn_setsockopt 5 314 NULL
86512 +sys_mq_timedsend_57661 sys_mq_timedsend 3 57661 NULL
86513 +r3964_write_57662 r3964_write 4 57662 NULL
86514 +xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
86515 +savu_sysfs_read_49473 savu_sysfs_read 6 49473 NULL
86516 +dn_nsp_do_disc_49474 dn_nsp_do_disc 6-2 49474 NULL
86517 +alloc_context_41283 alloc_context 1 41283 NULL
86518 +__lgwrite_57669 __lgwrite 4 57669 NULL
86519 +ath9k_wmi_cmd_327 ath9k_wmi_cmd 4 327 NULL
86520 +codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
86521 +isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
86522 +alloc_pg_vec_8533 alloc_pg_vec 2 8533 NULL
86523 +pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
86524 +ieee80211_fragment_33112 ieee80211_fragment 4 33112 NULL
86525 +arch_gnttab_map_shared_41306 arch_gnttab_map_shared 3 41306 NULL
86526 +v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL
86527 +write_node_33121 write_node 4 33121 NULL
86528 +vring_new_virtqueue_54673 vring_new_virtqueue 2 54673 NULL
86529 +i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
86530 +profile_remove_8556 profile_remove 3 8556 NULL
86531 +rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
86532 +iscsi_recv_pdu_16755 iscsi_recv_pdu 4 16755 NULL
86533 +arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
86534 +cmtp_send_interopmsg_376 cmtp_send_interopmsg 7 376 NULL
86535 +ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL
86536 +mga_ioremap_8571 mga_ioremap 1-2 8571 NULL
86537 +isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
86538 +sys_rt_sigpending_24961 sys_rt_sigpending 2 24961 NULL
86539 +tower_write_8580 tower_write 3 8580 NULL
86540 +cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
86541 +compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 NULL nohasharray
86542 +pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 &compat_sys_set_mempolicy_57742
86543 +jbd2_alloc_41359 jbd2_alloc 1 41359 NULL
86544 +debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
86545 +nf_nat_sdp_port_24977 nf_nat_sdp_port 7 24977 NULL
86546 +smk_write_access_49561 smk_write_access 3 49561 NULL
86547 +llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
86548 +kmp_init_41373 kmp_init 2 41373 NULL
86549 +context_alloc_24645 context_alloc 3 24645 NULL
86550 +lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
86551 +get_server_iovec_16804 get_server_iovec 2 16804 NULL
86552 +alloc_chunk_49575 alloc_chunk 1 49575 NULL
86553 +tipc_send2name_16809 tipc_send2name 6 16809 NULL
86554 +sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
86555 +key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
86556 +shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
86557 +il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
86558 +dm_vcalloc_16814 dm_vcalloc 1-2 16814 NULL
86559 +it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
86560 +isr_commands_read_41398 isr_commands_read 3 41398 NULL
86561 +pp_read_33210 pp_read 3 33210 NULL
86562 +sys_flistxattr_41407 sys_flistxattr 3 41407 NULL
86563 +scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
86564 +ivtv_read_57796 ivtv_read 3 57796 NULL
86565 +isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
86566 +nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
86567 +xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
86568 +heap_init_49617 heap_init 2 49617 NULL
86569 +xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
86570 +ieee80211_send_probe_req_38307 ieee80211_send_probe_req 6-4 38307 NULL
86571 +isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
86572 +smk_write_doi_49621 smk_write_doi 3 49621 NULL
86573 +_alloc_get_attr_desc_470 _alloc_get_attr_desc 2 470 NULL
86574 +lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
86575 +btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
86576 +iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
86577 +ntfs_file_buffered_write_41442 ntfs_file_buffered_write 4-6 41442 NULL
86578 +pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
86579 +dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
86580 +bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
86581 +st_write_16874 st_write 3 16874 NULL
86582 +copy_to_user_57835 copy_to_user 3 57835 NULL
86583 +rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
86584 +pidlist_resize_496 pidlist_resize 2 496 NULL
86585 +flash_read_57843 flash_read 3 57843 NULL
86586 +read_vbt_r0_503 read_vbt_r0 1 503 NULL
86587 +rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
86588 +cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
86589 +rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
86590 +arcfb_write_8702 arcfb_write 3 8702 NULL
86591 +gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
86592 +smp_send_cmd_512 smp_send_cmd 3 512 NULL
86593 +rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
86594 +rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
86595 +vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
86596 +HDLC_irq_8709 HDLC_irq 2 8709 NULL
86597 +ctrl_out_8712 ctrl_out 3-5 8712 NULL
86598 +cxio_hal_init_rhdl_resource_25104 cxio_hal_init_rhdl_resource 1 25104 NULL
86599 +sock_wmalloc_16472 sock_wmalloc 2 16472 NULL
86600 +snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
86601 +aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
86602 +wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
86603 +hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
86604 +mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
86605 +psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 NULL nohasharray
86606 +snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 &psb_unlocked_ioctl_16926
86607 +iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
86608 +sys_gethostname_49698 sys_gethostname 2 49698 NULL
86609 +cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
86610 +ieee80211_rx_mgmt_probe_resp_6918 ieee80211_rx_mgmt_probe_resp 3 6918 NULL
86611 +devres_alloc_551 devres_alloc 2 551 NULL
86612 +ldisc_receive_41516 ldisc_receive 4 41516 NULL
86613 +tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
86614 +ip_append_data_16942 ip_append_data 5-6 16942 NULL
86615 +xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
86616 +_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
86617 +squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
86618 +emi26_writememory_57908 emi26_writememory 4 57908 NULL
86619 +start_isoc_chain_565 start_isoc_chain 2 565 NULL
86620 +iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL
86621 +gsm_mux_rx_netchar_33336 gsm_mux_rx_netchar 3 33336 NULL
86622 +brcmu_pkt_buf_get_skb_5556 brcmu_pkt_buf_get_skb 1 5556 NULL
86623 +joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
86624 +sys_prctl_8766 sys_prctl 4 8766 NULL
86625 +joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
86626 +sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL
86627 +compat_sys_preadv_583 compat_sys_preadv 3 583 NULL
86628 +sys_fsetxattr_49736 sys_fsetxattr 4 49736 NULL
86629 +keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
86630 +create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
86631 +irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
86632 +sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
86633 +sys_fgetxattr_25166 sys_fgetxattr 4 25166 NULL
86634 +ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL
86635 +zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
86636 +tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
86637 +ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
86638 +ath6kl_buf_alloc_57304 ath6kl_buf_alloc 1 57304 NULL
86639 +cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
86640 +gserial_setup_41558 gserial_setup 2 41558 NULL
86641 +rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
86642 +rx_57944 rx 4 57944 NULL
86643 +sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
86644 +nci_skb_alloc_49757 nci_skb_alloc 2 49757 NULL
86645 +key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
86646 +cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
86647 +sctp_ulpevent_new_33377 sctp_ulpevent_new 1 33377 NULL
86648 +fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
86649 +mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
86650 +isku_sysfs_write_49767 isku_sysfs_write 6 49767 NULL
86651 +i915_cache_sharing_write_57961 i915_cache_sharing_write 3 57961 NULL
86652 +batadv_receive_client_update_packet_41578 batadv_receive_client_update_packet 3 41578 NULL
86653 +ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
86654 +handle_response_icmp_39574 handle_response_icmp 7 39574 NULL
86655 +wbcir_tx_19219 wbcir_tx 3 19219 NULL
86656 +hfc_empty_fifo_57972 hfc_empty_fifo 2 57972 NULL
86657 +ceph_dns_resolve_name_62488 ceph_dns_resolve_name 2 62488 NULL
86658 +metronomefb_write_8823 metronomefb_write 3 8823 NULL
86659 +icmpv6_manip_pkt_8833 icmpv6_manip_pkt 4 8833 NULL
86660 +copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
86661 +read_file_rx_chainmask_41605 read_file_rx_chainmask 3 41605 NULL
86662 +vmw_du_crtc_cursor_set_28479 vmw_du_crtc_cursor_set 4-5 28479 NULL
86663 +_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
86664 +nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
86665 +a2mp_send_41615 a2mp_send 4 41615 NULL
86666 +ceph_copy_user_to_page_vector_656 ceph_copy_user_to_page_vector 4-3 656 NULL
86667 +rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
86668 +arch_gnttab_map_status_49812 arch_gnttab_map_status 3 49812 NULL
86669 +mon_stat_read_25238 mon_stat_read 3 25238 NULL
86670 +jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
86671 +tcf_csum_ipv6_udp_25241 tcf_csum_ipv6_udp 4 25241 NULL
86672 +wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
86673 +macvtap_alloc_skb_50629 macvtap_alloc_skb 2-4-3 50629 NULL
86674 +mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
86675 +ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6 49829 NULL
86676 +add_uuid_49831 add_uuid 4 49831 NULL
86677 +send_pages_8872 send_pages 3 8872 NULL
86678 +ath6kl_fwlog_block_read_49836 ath6kl_fwlog_block_read 3 49836 NULL
86679 +__btrfs_map_block_49839 __btrfs_map_block 3 49839 NULL
86680 +dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
86681 +mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
86682 +simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
86683 +rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
86684 +__kmalloc_reserve_17080 __kmalloc_reserve 1 17080 NULL
86685 +timeradd_entry_49850 timeradd_entry 3 49850 NULL
86686 +crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
86687 +vfs_writev_25278 vfs_writev 3 25278 NULL
86688 +rtl8169_try_rx_copy_705 rtl8169_try_rx_copy 3 705 NULL
86689 +alloc_async_14208 alloc_async 1 14208 NULL
86690 +ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
86691 +persistent_ram_vmap_709 persistent_ram_vmap 2-1 709 NULL
86692 +l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
86693 +create_entry_33479 create_entry 2 33479 NULL
86694 +mce_async_out_58056 mce_async_out 3 58056 NULL
86695 +alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
86696 +sys_preadv_17100 sys_preadv 3 17100 NULL
86697 +sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
86698 +ip_setsockopt_33487 ip_setsockopt 5 33487 NULL
86699 +netxen_nic_hw_write_wx_128M_33488 netxen_nic_hw_write_wx_128M 2 33488 NULL
86700 +aac_src_ioremap_41688 aac_src_ioremap 2 41688 NULL
86701 +dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
86702 +res_counter_read_33499 res_counter_read 4 33499 NULL
86703 +sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
86704 +cm4040_write_58079 cm4040_write 3 58079 NULL
86705 +fb_read_33506 fb_read 3 33506 NULL
86706 +help_25316 help 5 25316 NULL nohasharray
86707 +ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 &help_25316
86708 +rfcomm_wmalloc_58090 rfcomm_wmalloc 2 58090 NULL
86709 +mwifiex_get_common_rates_17131 mwifiex_get_common_rates 3 17131 NULL
86710 +musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
86711 +ddp_set_map_751 ddp_set_map 4 751 NULL
86712 +driver_stats_read_8944 driver_stats_read 3 8944 NULL
86713 +ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
86714 +dvb_video_write_754 dvb_video_write 3 754 NULL
86715 +nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
86716 +osd_req_add_get_attr_list_49278 osd_req_add_get_attr_list 3 49278 NULL
86717 +aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
86718 +bdx_tx_db_init_41719 bdx_tx_db_init 2 41719 NULL
86719 +nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
86720 +udi_log_event_58105 udi_log_event 3 58105 NULL
86721 +sys_pwritev_41722 sys_pwritev 3 41722 NULL
86722 +l2cap_sock_alloc_skb_cb_33532 l2cap_sock_alloc_skb_cb 2 33532 NULL
86723 +ib_send_cm_mra_60202 ib_send_cm_mra 4 60202 NULL nohasharray
86724 +qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 &ib_send_cm_mra_60202
86725 +read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
86726 +__copy_from_user_inatomic_nocache_49921 __copy_from_user_inatomic_nocache 3 49921 NULL
86727 +tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL
86728 +usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
86729 +tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
86730 +venus_mkdir_8967 venus_mkdir 4 8967 NULL
86731 +vol_cdev_read_8968 vol_cdev_read 3 8968 NULL nohasharray
86732 +seq_open_net_8968 seq_open_net 4 8968 &vol_cdev_read_8968
86733 +sep_read_17161 sep_read 3 17161 NULL
86734 +befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
86735 +tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
86736 +dup_array_33551 dup_array 3 33551 NULL
86737 +vxge_device_register_7752 vxge_device_register 4 7752 NULL
86738 +solo_enc_read_33553 solo_enc_read 3 33553 NULL
86739 +fillonedir_41746 fillonedir 3 41746 NULL
86740 +init_bch_64130 init_bch 1-2 64130 NULL
86741 +ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
86742 +slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
86743 +sel_read_mls_25369 sel_read_mls 3 25369 NULL
86744 +btrfs_alloc_free_block_8986 btrfs_alloc_free_block 3 8986 NULL
86745 +savemem_58129 savemem 3 58129 NULL
86746 +batadv_tt_realloc_packet_buff_49960 batadv_tt_realloc_packet_buff 4 49960 NULL
86747 +rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
86748 +driver_state_read_17194 driver_state_read 3 17194 NULL nohasharray
86749 +iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 &driver_state_read_17194
86750 +dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL
86751 +if_writecmd_815 if_writecmd 2 815 NULL
86752 +aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
86753 +read_fifo_826 read_fifo 3 826 NULL
86754 +keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
86755 +scsi_execute_33596 scsi_execute 5 33596 NULL
86756 +dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
86757 +ms_rw_17220 ms_rw 3-4 17220 NULL
86758 +read_tree_block_841 read_tree_block 3 841 NULL
86759 +hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
86760 +l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
86761 +dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
86762 +__pskb_copy_9038 __pskb_copy 2 9038 NULL
86763 +garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
86764 +asix_write_cmd_58192 asix_write_cmd 5 58192 NULL
86765 +um_idi_read_850 um_idi_read 3 850 NULL
86766 +__module_alloc_50004 __module_alloc 1 50004 NULL
86767 +sco_send_frame_41815 sco_send_frame 3 41815 NULL
86768 +ts_read_44687 ts_read 3 44687 NULL
86769 +nci_send_cmd_58206 nci_send_cmd 3 58206 NULL
86770 +snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
86771 +snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
86772 +provide_user_output_41105 provide_user_output 3 41105 NULL
86773 +error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL
86774 +o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
86775 +iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
86776 +fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
86777 +alloc_ep_17269 alloc_ep 1 17269 NULL
86778 +ath6kl_wmi_beginscan_cmd_25462 ath6kl_wmi_beginscan_cmd 8 25462 NULL
86779 +ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 NULL
86780 +generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
86781 +do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
86782 +raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
86783 +alloc_ebda_hpc_50046 alloc_ebda_hpc 1-2 50046 NULL
86784 +keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
86785 +create_queues_9088 create_queues 2-3 9088 NULL
86786 +irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
86787 +neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
86788 +btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
86789 +minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
86790 +rbd_alloc_coll_33678 rbd_alloc_coll 1 33678 NULL
86791 +read_file_debug_58256 read_file_debug 3 58256 NULL
86792 +skb_pad_17302 skb_pad 2 17302 NULL
86793 +tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
86794 +btmrvl_hsstate_read_920 btmrvl_hsstate_read 3 920 NULL
86795 +profile_load_58267 profile_load 3 58267 NULL
86796 +pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
86797 +ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4 25502 NULL
86798 +acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
86799 +snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
86800 +dev_set_alias_50084 dev_set_alias 3 50084 NULL
86801 +pcpu_get_vm_areas_50085 pcpu_get_vm_areas 3 50085 NULL
86802 +sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
86803 +altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
86804 +sys_keyctl_33708 sys_keyctl 4 33708 NULL nohasharray
86805 +netlink_sendmsg_33708 netlink_sendmsg 4 33708 &sys_keyctl_33708
86806 +ata_host_alloc_pinfo_17325 ata_host_alloc_pinfo 3 17325 NULL
86807 +iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
86808 +carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
86809 +pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
86810 +get_packet_41914 get_packet 3 41914 NULL
86811 +get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
86812 +ceph_get_direct_page_vector_41917 ceph_get_direct_page_vector 2 41917 NULL
86813 +read_file_slot_50111 read_file_slot 3 50111 NULL
86814 +netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
86815 +ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL
86816 +ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
86817 +serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
86818 +ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
86819 +tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
86820 +copy_items_50140 copy_items 6 50140 NULL
86821 +omfs_readpages_42490 omfs_readpages 4 42490 NULL
86822 +pcim_iomap_58334 pcim_iomap 3 58334 NULL
86823 +diva_init_dma_map_58336 diva_init_dma_map 3 58336 NULL
86824 +map_addr_56144 map_addr 7 56144 NULL
86825 +vifs_state_read_33762 vifs_state_read 3 33762 NULL
86826 +btmrvl_psstate_read_50683 btmrvl_psstate_read 3 50683 NULL
86827 +hdlcdev_rx_997 hdlcdev_rx 3 997 NULL
86828 +portnames_read_41958 portnames_read 3 41958 NULL
86829 +ubi_self_check_all_ff_41959 ubi_self_check_all_ff 4 41959 NULL
86830 +hashtab_create_33769 hashtab_create 3 33769 NULL
86831 +alloc_group_attrs_9194 alloc_group_attrs 2 9194 NULL nohasharray
86832 +altera_swap_ir_9194 altera_swap_ir 2 9194 &alloc_group_attrs_9194
86833 +vmalloc_to_sg_58354 vmalloc_to_sg 2 58354 NULL
86834 +aac_nark_ioremap_50163 aac_nark_ioremap 2 50163 NULL nohasharray
86835 +kmalloc_node_50163 kmalloc_node 1 50163 &aac_nark_ioremap_50163
86836 +cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
86837 +odev_update_50169 odev_update 2 50169 NULL
86838 +ubi_resize_volume_50172 ubi_resize_volume 2 50172 NULL
86839 +smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
86840 +__devres_alloc_25598 __devres_alloc 2 25598 NULL
86841 +snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
86842 +netpoll_send_udp_58955 netpoll_send_udp 3 58955 NULL
86843 +tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
86844 +ib_send_cm_drep_50186 ib_send_cm_drep 3 50186 NULL
86845 +do_write_orph_node_64343 do_write_orph_node 2 64343 NULL
86846 +qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
86847 +lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
86848 +il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
86849 +cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL
86850 +lguest_map_42008 lguest_map 1-2 42008 NULL
86851 +proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
86852 +sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
86853 +pool_allocate_42012 pool_allocate 3 42012 NULL
86854 +l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
86855 +sctp_make_init_58401 sctp_make_init 4 58401 NULL
86856 +ieee80211_skb_resize_50211 ieee80211_skb_resize 3 50211 NULL
86857 +gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
86858 +sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
86859 +skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
86860 +udplite_manip_pkt_33832 udplite_manip_pkt 4 33832 NULL
86861 +tcf_csum_ipv4_icmp_9258 tcf_csum_ipv4_icmp 3 9258 NULL
86862 +acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
86863 +mce_request_packet_1073 mce_request_packet 3 1073 NULL
86864 +agp_create_memory_1075 agp_create_memory 1 1075 NULL
86865 +sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
86866 +__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
86867 +iscsi_offload_mesg_58425 iscsi_offload_mesg 5 58425 NULL
86868 +mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
86869 +_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
86870 +oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
86871 +nfs_pgarray_set_1085 nfs_pgarray_set 2 1085 NULL
86872 +irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
86873 +sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
86874 +ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
86875 +llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
86876 +probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
86877 +InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
86878 +__alloc_session_17485 __alloc_session 2-1 17485 NULL
86879 +TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
86880 +pm860x_bulk_write_43875 pm860x_bulk_write 3 43875 NULL
86881 +afs_extract_data_50261 afs_extract_data 5 50261 NULL
86882 +config_proc_write_33878 config_proc_write 3 33878 NULL
86883 +capabilities_read_58457 capabilities_read 3 58457 NULL
86884 +sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
86885 +iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
86886 +lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
86887 +compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
86888 +scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
86889 +hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
86890 +rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
86891 +sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
86892 +rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL
86893 +ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
86894 +snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
86895 +event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
86896 +batadv_bla_is_backbone_gw_58488 batadv_bla_is_backbone_gw 3 58488 NULL
86897 +v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
86898 +submit_inquiry_42108 submit_inquiry 3 42108 NULL
86899 +sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
86900 +__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 &sel_write_context_25726
86901 +sysfs_read_file_42113 sysfs_read_file 3 42113 NULL
86902 +mcs_unwrap_fir_25733 mcs_unwrap_fir 3 25733 NULL
86903 +vme_user_write_15587 vme_user_write 3 15587 NULL
86904 +xlog_do_log_recovery_17550 xlog_do_log_recovery 3 17550 NULL
86905 +__copy_to_user_17551 __copy_to_user 3 17551 NULL
86906 +cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
86907 +sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
86908 +lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
86909 +read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
86910 +nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
86911 +lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
86912 +v9fs_alloc_rdir_buf_42150 v9fs_alloc_rdir_buf 2 42150 NULL
86913 +roccat_common2_send_with_status_50343 roccat_common2_send_with_status 4 50343 NULL
86914 +ipc_alloc_1192 ipc_alloc 1 1192 NULL
86915 +mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
86916 +ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
86917 +rndis_add_response_58544 rndis_add_response 2 58544 NULL
86918 +isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
86919 +read_9397 read 3 9397 NULL
86920 +i2cdev_read_1206 i2cdev_read 3 1206 NULL
86921 +read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
86922 +printer_write_60276 printer_write 3 60276 NULL
86923 +acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
86924 +neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
86925 +rts51x_write_mem_17598 rts51x_write_mem 4 17598 NULL
86926 +vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
86927 +roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
86928 +oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
86929 +osst_execute_17607 osst_execute 7-6 17607 NULL
86930 +nf_nat_sip_expect_9418 nf_nat_sip_expect 8 9418 NULL
86931 +sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
86932 +ipw_packet_received_skb_1230 ipw_packet_received_skb 2 1230 NULL
86933 +ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL
86934 +rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
86935 +sys32_rt_sigpending_25814 sys32_rt_sigpending 2 25814 NULL
86936 +bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
86937 +acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
86938 +joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
86939 +ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
86940 +xip_file_read_58592 xip_file_read 3 58592 NULL
86941 +iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
86942 +kmalloc_array_9444 kmalloc_array 1-2 9444 NULL
86943 +__ntfs_malloc_34022 __ntfs_malloc 1 34022 NULL
86944 +l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
86945 +mcs_unwrap_mir_9455 mcs_unwrap_mir 3 9455 NULL
86946 +ppp_write_34034 ppp_write 3 34034 NULL
86947 +qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
86948 +iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
86949 +tty_insert_flip_string_34042 tty_insert_flip_string 3 34042 NULL
86950 +packet_setsockopt_17662 packet_setsockopt 5 17662 NULL
86951 +batadv_tt_prepare_packet_buff_1280 batadv_tt_prepare_packet_buff 4 1280 NULL
86952 +do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
86953 +module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL nohasharray
86954 +efi_ioremap_58634 efi_ioremap 1-2 58634 &module_alloc_update_bounds_rx_58634
86955 +btmrvl_hsmode_write_42252 btmrvl_hsmode_write 3 42252 NULL
86956 +rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
86957 +dsp_tone_hw_message_17678 dsp_tone_hw_message 3 17678 NULL
86958 +netxen_nic_map_indirect_address_128M_42257 netxen_nic_map_indirect_address_128M 2 42257 NULL
86959 +ulog_alloc_skb_23427 ulog_alloc_skb 1 23427 NULL
86960 +__alloc_preds_9492 __alloc_preds 2 9492 NULL
86961 +pgctrl_write_50453 pgctrl_write 3 50453 NULL
86962 +pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
86963 +read_file_ant_diversity_34071 read_file_ant_diversity 3 34071 NULL
86964 +tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
86965 +ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
86966 +tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
86967 +savu_sysfs_write_42273 savu_sysfs_write 6 42273 NULL
86968 +uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 NULL
86969 +lp_write_9511 lp_write 3 9511 NULL
86970 +__einj_error_trigger_17707 __einj_error_trigger 1 17707 NULL nohasharray
86971 +venus_rename_17707 venus_rename 5-4 17707 &__einj_error_trigger_17707
86972 +cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
86973 +nfs_readdir_make_qstr_12509 nfs_readdir_make_qstr 3 12509 NULL
86974 +lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
86975 +scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
86976 +do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
86977 +do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
86978 +read_file_dma_9530 read_file_dma 3 9530 NULL
86979 +sel_read_perm_42302 sel_read_perm 3 42302 NULL
86980 +rcname_read_25919 rcname_read 3 25919 NULL
86981 +sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
86982 +ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
86983 +tps6586x_writes_58689 tps6586x_writes 3 58689 NULL
86984 +il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
86985 +xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL
86986 +exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
86987 +pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
86988 +snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
86989 +key_flags_read_25931 key_flags_read 3 25931 NULL
86990 +audit_log_n_untrustedstring_9548 audit_log_n_untrustedstring 3 9548 NULL
86991 +sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
86992 +ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
86993 +hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
86994 +islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
86995 +pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL
86996 +fw_node_create_9559 fw_node_create 2 9559 NULL
86997 +fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
86998 +ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
86999 +kobj_map_9566 kobj_map 2-3 9566 NULL
87000 +snd_pcm_plug_alloc_42339 snd_pcm_plug_alloc 2 42339 NULL
87001 +acpi_map_58725 acpi_map 1-2 58725 NULL
87002 +brcmf_usb_attach_17766 brcmf_usb_attach 2-3 17766 NULL
87003 +sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
87004 +fwnet_receive_packet_50537 fwnet_receive_packet 9 50537 NULL
87005 +do_msgsnd_1387 do_msgsnd 4 1387 NULL
87006 +ieee80211_tdls_mgmt_9581 ieee80211_tdls_mgmt 8 9581 NULL
87007 +snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
87008 +ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
87009 +udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
87010 +file_read_actor_1401 file_read_actor 4 1401 NULL
87011 +av7110_ipack_init_46655 av7110_ipack_init 2 46655 NULL
87012 +ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
87013 +ubifs_leb_change_17789 ubifs_leb_change 4 17789 NULL
87014 +udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
87015 +do_sync_9604 do_sync 1 9604 NULL
87016 +snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5 9605 NULL
87017 +scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
87018 +agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
87019 +__do_config_autodelink_58763 __do_config_autodelink 3 58763 NULL
87020 +sctp_sf_abort_violation_1420 sctp_sf_abort_violation 7 1420 NULL
87021 +afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
87022 +il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
87023 +lpfc_sli_probe_sriov_nr_virtfn_26004 lpfc_sli_probe_sriov_nr_virtfn 2 26004 NULL
87024 +qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
87025 +fat_readpages_50582 fat_readpages 4 50582 NULL nohasharray
87026 +pep_reply_50582 pep_reply 5 50582 &fat_readpages_50582
87027 +iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
87028 +saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
87029 +_snd_pcm_lib_alloc_vmalloc_buffer_17820 _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 NULL
87030 +xfs_readdir_41200 xfs_readdir 3 41200 NULL
87031 +sge_rx_50594 sge_rx 3 50594 NULL
87032 +stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
87033 +compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
87034 +skb_padto_50759 skb_padto 2 50759 NULL
87035 +raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
87036 +mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
87037 +selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
87038 +isku_sysfs_read_58806 isku_sysfs_read 6 58806 NULL
87039 +tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
87040 +uvc_alloc_buffers_9656 uvc_alloc_buffers 2-3 9656 NULL
87041 +queue_received_packet_9657 queue_received_packet 5 9657 NULL
87042 +ep_read_58813 ep_read 3 58813 NULL
87043 +xprt_alloc_1475 xprt_alloc 2 1475 NULL
87044 +gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
87045 +snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
87046 +pci_enable_sriov_35745 pci_enable_sriov 2 35745 NULL
87047 +sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
87048 +simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
87049 +key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
87050 +dns_query_9676 dns_query 3 9676 NULL
87051 +keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
87052 +sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
87053 +ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
87054 +orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL
87055 +bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
87056 +command_write_58841 command_write 3 58841 NULL
87057 +short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
87058 +dev_config_8506 dev_config 3 8506 NULL
87059 +compat_sys_pwritev_17886 compat_sys_pwritev 3 17886 NULL
87060 +sys_readv_50664 sys_readv 3 50664 NULL
87061 +bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
87062 +__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
87063 +ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
87064 +rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL
87065 +recover_head_17904 recover_head 3 17904 NULL
87066 +dccp_feat_register_sp_17914 dccp_feat_register_sp 5 17914 NULL
87067 +xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
87068 +brcmf_sdbrcm_bus_txctl_42492 brcmf_sdbrcm_bus_txctl 3 42492 NULL
87069 +srp_iu_pool_alloc_17920 srp_iu_pool_alloc 2 17920 NULL
87070 +gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
87071 +pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
87072 +cs553x_init_one_58886 cs553x_init_one 3 58886 NULL
87073 +ddb_input_read_9743 ddb_input_read 3 9743 NULL
87074 +skb_cow_26138 skb_cow 2 26138 NULL
87075 +smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
87076 +snd_emux_create_port_42533 snd_emux_create_port 3 42533 NULL
87077 +do_sigpending_9766 do_sigpending 2 9766 NULL
87078 +hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL
87079 +pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
87080 +blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
87081 +__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
87082 +copy_oldmem_page_26164 copy_oldmem_page 3-1 26164 NULL
87083 +i915_ring_stop_read_42549 i915_ring_stop_read 3 42549 NULL nohasharray
87084 +ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 &i915_ring_stop_read_42549
87085 +ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 NULL
87086 +snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
87087 +fc_frame_alloc_1596 fc_frame_alloc 2 1596 NULL
87088 +rngapi_reset_34366 rngapi_reset 3 34366 NULL nohasharray
87089 +p54_alloc_skb_34366 p54_alloc_skb 3 34366 &rngapi_reset_34366
87090 +iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
87091 +ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
87092 +solo_v4l2_read_59247 solo_v4l2_read 3 59247 NULL
87093 +smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
87094 +packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
87095 +reiserfs_resize_34377 reiserfs_resize 2 34377 NULL
87096 +get_registers_26187 get_registers 3 26187 NULL
87097 +cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
87098 +ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
87099 +btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
87100 +av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
87101 +usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
87102 +snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3 27332 NULL
87103 +udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 NULL nohasharray
87104 +ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 &udpv6_recvmsg_9813
87105 +tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL nohasharray
87106 +pipe_handler_request_50774 pipe_handler_request 5 50774 &tm6000_read_write_usb_50774
87107 +xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
87108 +mce_write_26201 mce_write 3 26201 NULL
87109 +iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
87110 +bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
87111 +alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
87112 +oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
87113 +smk_write_load2_52155 smk_write_load2 3 52155 NULL
87114 +__pskb_pull_42602 __pskb_pull 2 42602 NULL
87115 +sctp_make_heartbeat_ack_34411 sctp_make_heartbeat_ack 4 34411 NULL
87116 +tpm_write_50798 tpm_write 3 50798 NULL
87117 +btmrvl_hsmode_read_1647 btmrvl_hsmode_read 3 1647 NULL
87118 +tun_do_read_50800 tun_do_read 4 50800 NULL
87119 +handle_rx_packet_58993 handle_rx_packet 3 58993 NULL
87120 +write_flush_50803 write_flush 3 50803 NULL
87121 +_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
87122 +rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 NULL
87123 +ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
87124 +dvb_play_50814 dvb_play 3 50814 NULL
87125 +cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
87126 +sys_move_pages_42626 sys_move_pages 2 42626 NULL
87127 +ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
87128 +pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
87129 +btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
87130 +usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
87131 +dma_attach_50831 dma_attach 6-7 50831 NULL
87132 +scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
87133 +br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
87134 +vhci_put_user_12604 vhci_put_user 4 12604 NULL
87135 +packet_came_18072 packet_came 3 18072 NULL
87136 +init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
87137 +kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
87138 +sctp_make_abort_34459 sctp_make_abort 3 34459 NULL
87139 +_regmap_raw_write_42652 _regmap_raw_write 4 42652 NULL
87140 +selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
87141 +get_vm_area_18080 get_vm_area 1 18080 NULL
87142 +dvb_dvr_set_buffer_size_9840 dvb_dvr_set_buffer_size 2 9840 NULL
87143 +bm_register_write_9893 bm_register_write 3 9893 NULL nohasharray
87144 +snd_midi_event_new_9893 snd_midi_event_new 1 9893 &bm_register_write_9893
87145 +self_check_write_50856 self_check_write 5 50856 NULL
87146 +line6_dumpreq_init_34473 line6_dumpreq_init 3 34473 NULL
87147 +i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
87148 +mpi_alloc_18094 mpi_alloc 1 18094 NULL
87149 +coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
87150 +l2tp_xmit_skb_42672 l2tp_xmit_skb 3 42672 NULL
87151 +bitmap_resize_33054 bitmap_resize 2 33054 NULL
87152 +mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
87153 +mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
87154 +sis190_try_rx_copy_57069 sis190_try_rx_copy 3 57069 NULL
87155 +dfs_file_read_18116 dfs_file_read 3 18116 NULL
87156 +request_key_and_link_42693 request_key_and_link 4 42693 NULL
87157 +vb2_read_42703 vb2_read 3 42703 NULL
87158 +pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
87159 +hvc_alloc_12579 hvc_alloc 4 12579 NULL
87160 +tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
87161 +snd_pcm_plugin_alloc_12580 snd_pcm_plugin_alloc 2 12580 NULL
87162 +pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
87163 +read_file_misc_9948 read_file_misc 3 9948 NULL
87164 +xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
87165 +set_rxd_buffer_pointer_9950 set_rxd_buffer_pointer 8 9950 NULL
87166 +selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
87167 +csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
87168 +tracing_stats_read_34537 tracing_stats_read 3 34537 NULL
87169 +hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
87170 +dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
87171 +cosa_write_1774 cosa_write 3 1774 NULL
87172 +set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
87173 +hugetlbfs_read_actor_34547 hugetlbfs_read_actor 2-5-4 34547 NULL
87174 +ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
87175 +btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
87176 +bnx2fc_cmd_mgr_alloc_24873 bnx2fc_cmd_mgr_alloc 3-2 24873 NULL
87177 +cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
87178 +ath6kl_usb_submit_ctrl_out_9978 ath6kl_usb_submit_ctrl_out 6 9978 NULL
87179 +dup_to_netobj_26363 dup_to_netobj 3 26363 NULL
87180 +sock_bindtodevice_50942 sock_bindtodevice 3 50942 NULL
87181 +pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
87182 +fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
87183 +alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
87184 +mld_newpack_50950 mld_newpack 2 50950 NULL
87185 +framebuffer_alloc_59145 framebuffer_alloc 1 59145 NULL
87186 +i915_ring_stop_write_59010 i915_ring_stop_write 3 59010 NULL
87187 +radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
87188 +cfpkt_create_18197 cfpkt_create 1 18197 NULL
87189 +velocity_rx_copy_34583 velocity_rx_copy 2 34583 NULL
87190 +x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
87191 +init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
87192 +tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4 37428 NULL
87193 +xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
87194 +orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
87195 +gsm_control_message_18209 gsm_control_message 4 18209 NULL
87196 +do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
87197 +handle_request_10024 handle_request 9 10024 NULL
87198 +__tty_alloc_driver_53799 __tty_alloc_driver 1 53799 NULL
87199 +setup_window_59178 setup_window 4-2-5-7 59178 NULL
87200 +timeout_write_50991 timeout_write 3 50991 NULL
87201 +batadv_orig_hash_add_if_10033 batadv_orig_hash_add_if 2 10033 NULL
87202 +fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
87203 +ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
87204 +proc_write_51003 proc_write 3 51003 NULL
87205 +drm_ioctl_42813 drm_ioctl 2 42813 NULL
87206 +gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
87207 +iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
87208 +set_arg_42824 set_arg 3 42824 NULL
87209 +xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
87210 +fast_rx_path_59214 fast_rx_path 3 59214 NULL
87211 +lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
87212 +cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
87213 +audit_log_n_string_31705 audit_log_n_string 3 31705 NULL
87214 +fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 NULL
87215 +pstore_mkfile_50830 pstore_mkfile 5 50830 NULL
87216 +qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
87217 +hidp_queue_report_1881 hidp_queue_report 3 1881 NULL
87218 +dt3155_read_59226 dt3155_read 3 59226 NULL
87219 +xfs_buf_read_uncached_42844 xfs_buf_read_uncached 3 42844 NULL
87220 +ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
87221 +dump_midi_51040 dump_midi 3 51040 NULL
87222 +srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
87223 +gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
87224 +nf_nat_mangle_udp_packet_34661 nf_nat_mangle_udp_packet 8-6 34661 NULL
87225 +alloc_ring_18278 alloc_ring 2-4 18278 NULL
87226 +tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
87227 +nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 NULL
87228 +ext4_readpages_18283 ext4_readpages 4 18283 NULL
87229 +mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
87230 +em28xx_v4l2_read_16701 em28xx_v4l2_read 3 16701 NULL
87231 +configfs_read_file_1683 configfs_read_file 3 1683 NULL
87232 +ulong_write_file_26485 ulong_write_file 3 26485 NULL
87233 +wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
87234 +dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
87235 +dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 NULL
87236 +isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
87237 +pskb_expand_head_42881 pskb_expand_head 2-3 42881 NULL
87238 +ip6ip6_err_18308 ip6ip6_err 5 18308 NULL
87239 +read_vmcore_26501 read_vmcore 3 26501 NULL
87240 +tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
87241 +garp_attr_create_3883 garp_attr_create 3 3883 NULL
87242 +tipc_port_recv_sections_42890 tipc_port_recv_sections 4 42890 NULL
87243 +vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 3-4 26507 NULL
87244 +xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
87245 +jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
87246 +ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
87247 +cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
87248 +SendTxCommandPacket_42901 SendTxCommandPacket 3 42901 NULL
87249 +btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
87250 +W6692_empty_Bfifo_47804 W6692_empty_Bfifo 2 47804 NULL
87251 +ath6kl_wmi_send_probe_response_cmd_31728 ath6kl_wmi_send_probe_response_cmd 6 31728 NULL
87252 +ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
87253 +exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
87254 +oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
87255 +btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
87256 +aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
87257 +rds_message_inc_copy_to_user_26540 rds_message_inc_copy_to_user 3 26540 NULL
87258 +iscsi_nop_out_rsp_51117 iscsi_nop_out_rsp 4 51117 NULL
87259 +platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
87260 +hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL
87261 +reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
87262 +sctp_make_datafrag_empty_34737 sctp_make_datafrag_empty 3 34737 NULL
87263 +pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
87264 +asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
87265 +__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
87266 +fd_copyout_59323 fd_copyout 3 59323 NULL
87267 +nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
87268 +proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
87269 +read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
87270 +sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
87271 +diva_alloc_dma_map_23798 diva_alloc_dma_map 2 23798 NULL
87272 +solos_param_store_34755 solos_param_store 4 34755 NULL
87273 +simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
87274 +jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
87275 +__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
87276 +rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
87277 +xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
87278 +ib_send_cm_rtu_63138 ib_send_cm_rtu 3 63138 NULL
87279 +compat_sys_pwritev64_51151 compat_sys_pwritev64 3 51151 NULL
87280 +rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL nohasharray
87281 +batadv_receive_server_sync_packet_26577 batadv_receive_server_sync_packet 3 26577 &rts51x_read_mem_26577
87282 +xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
87283 +vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
87284 +batadv_tt_commit_changes_2008 batadv_tt_commit_changes 4 2008 NULL
87285 +sep_prepare_input_dma_table_2009 sep_prepare_input_dma_table 2-3 2009 NULL
87286 +qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
87287 +ubifs_write_node_11258 ubifs_write_node 5-3 11258 NULL
87288 +reada_tree_block_flagged_18402 reada_tree_block_flagged 3 18402 NULL
87289 +iscsi_if_send_reply_52219 iscsi_if_send_reply 7 52219 NULL
87290 +write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
87291 +__copy_in_user_34790 __copy_in_user 3 34790 NULL
87292 +crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
87293 +nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL
87294 +mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
87295 +BcmCopySection_2035 BcmCopySection 5 2035 NULL
87296 +devm_ioremap_nocache_2036 devm_ioremap_nocache 2-3 2036 NULL
87297 +carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
87298 +hdlc_rpr_irq_10240 hdlc_rpr_irq 2 10240 NULL
87299 +batadv_orig_node_add_if_18433 batadv_orig_node_add_if 2 18433 NULL
87300 +ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
87301 +pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
87302 +nfc_alloc_recv_skb_10244 nfc_alloc_recv_skb 1 10244 NULL
87303 +pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
87304 +mangle_sdp_packet_30381 mangle_sdp_packet 10 30381 NULL
87305 +isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
87306 +cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
87307 +hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
87308 +b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
87309 +subbuf_read_actor_2071 subbuf_read_actor 3 2071 NULL
87310 +fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
87311 +irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL
87312 +regset_tls_set_18459 regset_tls_set 4 18459 NULL
87313 +nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
87314 +receive_DataRequest_9904 receive_DataRequest 3 9904 NULL
87315 +acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
87316 +tipc_send_51238 tipc_send 4 51238 NULL
87317 +drm_property_create_51239 drm_property_create 4 51239 NULL
87318 +snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
87319 +squashfs_read_data_59440 squashfs_read_data 6 59440 NULL
87320 +idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
87321 +audit_expand_2098 audit_expand 2 2098 NULL
87322 +st_read_51251 st_read 3 51251 NULL
87323 +fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL
87324 +udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
87325 +iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
87326 +ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
87327 +compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
87328 +nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
87329 +rtsx_read_cfg_seq_48139 rtsx_read_cfg_seq 5-3 48139 NULL
87330 +__find_xattr_2117 __find_xattr 6 2117 NULL nohasharray
87331 +enable_read_2117 enable_read 3 2117 &__find_xattr_2117
87332 +dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
87333 +pcf50633_write_block_2124 pcf50633_write_block 3 2124 NULL
87334 +ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
87335 +ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
87336 +c4_add_card_54968 c4_add_card 3 54968 NULL
87337 +rtllib_authentication_req_26713 rtllib_authentication_req 3 26713 NULL
87338 +snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
87339 +check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
87340 +fd_do_readv_51297 fd_do_readv 3 51297 NULL
87341 +nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 5-6-9 18530 NULL
87342 +nfc_hci_send_cmd_async_26723 nfc_hci_send_cmd_async 5 26723 NULL
87343 +mlx4_init_icm_table_2151 mlx4_init_icm_table 5-4 2151 NULL
87344 +bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
87345 +ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
87346 +ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
87347 +seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
87348 +sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
87349 +ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
87350 +_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
87351 +nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
87352 +alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
87353 +pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL
87354 +ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
87355 +sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
87356 +fb_sys_write_33130 fb_sys_write 3 33130 NULL
87357 +sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
87358 +set_bypass_pwoff_pfs_27669 set_bypass_pwoff_pfs 3 27669 NULL
87359 +smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
87360 +srp_ring_alloc_26760 srp_ring_alloc 2 26760 NULL
87361 +read_file_dfs_43145 read_file_dfs 3 43145 NULL
87362 +ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
87363 +ntfs_malloc_nofs_nofail_63631 ntfs_malloc_nofs_nofail 1 63631 NULL
87364 +cosa_net_setup_rx_38594 cosa_net_setup_rx 2 38594 NULL
87365 +skb_gro_header_slow_34958 skb_gro_header_slow 2 34958 NULL
87366 +debug_output_18575 debug_output 3 18575 NULL
87367 +Realloc_34961 Realloc 2 34961 NULL
87368 +il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
87369 +do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
87370 +_rtl92s_firmware_downloadcode_14021 _rtl92s_firmware_downloadcode 3 14021 NULL
87371 +__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
87372 +slabinfo_write_18600 slabinfo_write 3 18600 NULL
87373 +ssb_bus_ssbbus_register_2217 ssb_bus_ssbbus_register 2 2217 NULL
87374 +radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
87375 +iowarrior_write_18604 iowarrior_write 3 18604 NULL
87376 +vhci_write_2224 vhci_write 3 2224 NULL
87377 +ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
87378 +acpi_os_ioremap_49523 acpi_os_ioremap 1-2 49523 NULL
87379 +rb_alloc_3102 rb_alloc 1 3102 NULL
87380 +uf_create_device_nodes_24948 uf_create_device_nodes 2 24948 NULL
87381 +rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
87382 +l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
87383 +write_pbl_59583 write_pbl 4 59583 NULL
87384 +from_buffer_18625 from_buffer 3 18625 NULL
87385 +uio_write_43202 uio_write 3 43202 NULL
87386 +memdup_user_59590 memdup_user 2 59590 NULL
87387 +ieee80211_wx_set_gen_ie_51399 ieee80211_wx_set_gen_ie 3 51399 NULL
87388 +iso_callback_43208 iso_callback 3 43208 NULL
87389 +ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
87390 +smk_write_load_26829 smk_write_load 3 26829 NULL
87391 +sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
87392 +do_update_counters_2259 do_update_counters 4 2259 NULL
87393 +coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
87394 +cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
87395 +ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
87396 +blk_register_region_51424 blk_register_region 1-2 51424 NULL
87397 +ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
87398 +mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
87399 +mtrr_write_59622 mtrr_write 3 59622 NULL
87400 +event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
87401 +ip_vs_icmp_xmit_59624 ip_vs_icmp_xmit 4 59624 NULL
87402 +netxen_nic_hw_read_wx_128M_26858 netxen_nic_hw_read_wx_128M 2 26858 NULL
87403 +edge_tty_recv_18667 edge_tty_recv 4 18667 NULL nohasharray
87404 +xfs_iext_insert_18667 xfs_iext_insert 3 18667 &edge_tty_recv_18667
87405 +btmrvl_gpiogap_write_35053 btmrvl_gpiogap_write 3 35053 NULL
87406 +tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
87407 +ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
87408 +debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
87409 +twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
87410 +fixup_leb_43256 fixup_leb 3 43256 NULL
87411 +dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
87412 +ubifs_recover_log_leb_12079 ubifs_recover_log_leb 3 12079 NULL
87413 +ubifs_setxattr_59650 ubifs_setxattr 4 59650 NULL nohasharray
87414 +hidraw_read_59650 hidraw_read 3 59650 &ubifs_setxattr_59650
87415 +kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
87416 +ca91cx42_alloc_resource_10502 ca91cx42_alloc_resource 2 10502 NULL
87417 +intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
87418 +qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
87419 +evtchn_write_43278 evtchn_write 3 43278 NULL
87420 +sel_write_disable_10511 sel_write_disable 3 10511 NULL
87421 +store_ifalias_35088 store_ifalias 4 35088 NULL
87422 +tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
87423 +osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
87424 +____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
87425 +iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
87426 +rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
87427 +ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
87428 +blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
87429 +get_vm_area_caller_10527 get_vm_area_caller 1 10527 NULL
87430 +capi_write_35104 capi_write 3 35104 NULL nohasharray
87431 +tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
87432 +mpage_alloc_43299 mpage_alloc 3 43299 NULL
87433 +sys_semtimedop_4486 sys_semtimedop 3 4486 NULL
87434 +ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
87435 +osd_req_write_sg_50908 osd_req_write_sg 5 50908 NULL
87436 +x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
87437 +zr364xx_read_2354 zr364xx_read 3 2354 NULL
87438 +mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
87439 +scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
87440 +pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
87441 +pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 NULL nohasharray
87442 +sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 &pwr_ps_enter_read_26935
87443 +o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
87444 +tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 NULL
87445 +viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
87446 +hecubafb_write_26942 hecubafb_write 3 26942 NULL
87447 +wep_packets_read_18751 wep_packets_read 3 18751 NULL
87448 +xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL nohasharray
87449 +rose_recvmsg_2368 rose_recvmsg 4 2368 &xfs_buf_map_from_irec_2368
87450 +il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
87451 +ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
87452 +do_trimming_26952 do_trimming 3 26952 NULL
87453 +ath6kl_wmi_set_ie_cmd_37260 ath6kl_wmi_set_ie_cmd 6 37260 NULL
87454 +read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
87455 +prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
87456 +iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
87457 +alloc_buf_34532 alloc_buf 1 34532 NULL
87458 +sock_rmalloc_59740 sock_rmalloc 2 59740 NULL nohasharray
87459 +ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 &sock_rmalloc_59740
87460 +__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
87461 +icn_writecmd_38629 icn_writecmd 2 38629 NULL
87462 +otp_read_10594 otp_read 2-4-5 10594 NULL
87463 +rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
87464 +sctp_manip_pkt_59749 sctp_manip_pkt 4 59749 NULL
87465 +icmp_manip_pkt_51560 icmp_manip_pkt 4 51560 NULL
87466 +brcmf_sdbrcm_read_control_22721 brcmf_sdbrcm_read_control 3 22721 NULL
87467 +supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
87468 +isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
87469 +roccat_common2_send_2422 roccat_common2_send 4 2422 NULL
87470 +ieee80211_auth_challenge_18810 ieee80211_auth_challenge 3 18810 NULL
87471 +ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
87472 +cxgb3_get_cpl_reply_skb_10620 cxgb3_get_cpl_reply_skb 2 10620 NULL
87473 +xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
87474 +venus_remove_59781 venus_remove 4 59781 NULL
87475 +ioremap_nocache_2439 ioremap_nocache 1-2 2439 NULL
87476 +sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
87477 +unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
87478 +tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
87479 +xlog_do_recover_59789 xlog_do_recover 3 59789 NULL
87480 +aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
87481 +rfcomm_tty_write_51603 rfcomm_tty_write 3 51603 NULL
87482 +xenfb_write_43412 xenfb_write 3 43412 NULL
87483 +chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
87484 +nfs4_alloc_slots_2454 nfs4_alloc_slots 1 2454 NULL nohasharray
87485 +ath6kl_usb_bmi_write_2454 ath6kl_usb_bmi_write 3 2454 &nfs4_alloc_slots_2454
87486 +rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL
87487 +mtf_test_write_18844 mtf_test_write 3 18844 NULL
87488 +__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL nohasharray
87489 +gdm_wimax_netif_rx_43423 gdm_wimax_netif_rx 3 43423 &__alloc_bootmem_low_43423
87490 +rtllib_wx_set_gen_ie_59808 rtllib_wx_set_gen_ie 3 59808 NULL
87491 +error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL
87492 +udp_manip_pkt_45467 udp_manip_pkt 4 45467 NULL
87493 +nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL
87494 +xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
87495 +ni65_alloc_mem_10664 ni65_alloc_mem 3 10664 NULL
87496 +b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
87497 +usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
87498 +cmd_complete_51629 cmd_complete 6 51629 NULL
87499 +sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
87500 +btmrvl_txdnldready_read_413 btmrvl_txdnldready_read 3 413 NULL
87501 +ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
87502 +set_fd_set_35249 set_fd_set 1 35249 NULL
87503 +wiphy_new_2482 wiphy_new 2 2482 NULL
87504 +bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
87505 +ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
87506 +__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
87507 +ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
87508 +tcp_push_10680 tcp_push 3 10680 NULL
87509 +sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
87510 +c101_run_37279 c101_run 2 37279 NULL
87511 +iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
87512 +dma_show_regs_35266 dma_show_regs 3 35266 NULL
87513 +tun_put_user_59849 tun_put_user 4 59849 NULL
87514 +squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
87515 +alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
87516 +irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
87517 +dm_write_2513 dm_write 3 2513 NULL
87518 +v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
87519 +isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
87520 +selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 NULL
87521 +ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
87522 +ntfs_malloc_nofs_49572 ntfs_malloc_nofs 1 49572 NULL
87523 +nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
87524 +pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 NULL nohasharray
87525 +shash_async_setkey_10720 shash_async_setkey 3 10720 &pvr2_ioread_read_10720
87526 +ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
87527 +sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
87528 +__iscsi_complete_pdu_10726 __iscsi_complete_pdu 4 10726 NULL
87529 +sfi_sysfs_install_table_51688 sfi_sysfs_install_table 1 51688 NULL
87530 +tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
87531 +pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
87532 +__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
87533 +l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
87534 +brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 NULL nohasharray
87535 +__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 &brcmf_sdio_forensic_read_35311
87536 +tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
87537 +sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL
87538 +compat_sys_msgsnd_10738 compat_sys_msgsnd 2 10738 NULL
87539 +ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
87540 +sel_write_access_51704 sel_write_access 3 51704 NULL
87541 +sys_syslog_10746 sys_syslog 3 10746 NULL
87542 +alloc_one_pg_vec_page_10747 alloc_one_pg_vec_page 1 10747 NULL
87543 +new_bind_ctl_35324 new_bind_ctl 2 35324 NULL
87544 +do_readlink_43518 do_readlink 2 43518 NULL
87545 +tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
87546 +gem_alloc_skb_51715 gem_alloc_skb 2 51715 NULL
87547 +fallback_on_nodma_alloc_35332 fallback_on_nodma_alloc 2 35332 NULL
87548 +read_file_reset_52310 read_file_reset 3 52310 NULL
87549 +pms_capture_27142 pms_capture 4 27142 NULL
87550 +btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
87551 +vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
87552 +gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
87553 +sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
87554 +msg_set_51725 msg_set 3 51725 NULL
87555 +cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
87556 +tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL
87557 +hpi_alloc_control_cache_35351 hpi_alloc_control_cache 1 35351 NULL
87558 +hid_parse_report_51737 hid_parse_report 3 51737 NULL
87559 +compat_filldir64_35354 compat_filldir64 3 35354 NULL
87560 +alc_auto_create_extra_outs_18975 alc_auto_create_extra_outs 2 18975 NULL
87561 +i2400m_net_rx_27170 i2400m_net_rx 5 27170 NULL
87562 +l3_alloc_skb_32289 l3_alloc_skb 1 32289 NULL
87563 +ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
87564 +ath_rx_init_43564 ath_rx_init 2 43564 NULL
87565 +il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 NULL nohasharray
87566 +dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 &il_dbgfs_rxon_flags_read_59950
87567 +sys_bind_10799 sys_bind 3 10799 NULL
87568 +_fc_frame_alloc_43568 _fc_frame_alloc 1 43568 NULL
87569 +nfcwilink_send_bts_cmd_10802 nfcwilink_send_bts_cmd 3 10802 NULL
87570 +ioremap_prot_51764 ioremap_prot 1-2 51764 NULL
87571 +rpc_malloc_43573 rpc_malloc 2 43573 NULL
87572 +dataflash_read_fact_otp_33204 dataflash_read_fact_otp 2-3 33204 NULL
87573 +smk_write_logging_2618 smk_write_logging 3 2618 NULL
87574 +rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
87575 +drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
87576 +send_command_10832 send_command 4 10832 NULL
87577 +lro_gen_skb_2644 lro_gen_skb 6 2644 NULL
87578 +osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
87579 +lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
87580 +pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
87581 +proc_read_43614 proc_read 3 43614 NULL
87582 +rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
87583 +rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
87584 +drm_fb_helper_init_19044 drm_fb_helper_init 3-4 19044 NULL
87585 +fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
87586 +xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
87587 +rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
87588 +mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
87589 +rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
87590 +buffer_to_user_35439 buffer_to_user 3 35439 NULL
87591 +lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
87592 +vmalloc_15464 vmalloc 1 15464 NULL
87593 +buffer_from_user_51826 buffer_from_user 3 51826 NULL
87594 +snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
87595 +ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
87596 +osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
87597 +cfpkt_add_trail_27260 cfpkt_add_trail 3 27260 NULL
87598 +xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
87599 +sys_sched_getaffinity_60033 sys_sched_getaffinity 2 60033 NULL
87600 +read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 NULL
87601 +do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
87602 +do_readv_writev_51849 do_readv_writev 4 51849 NULL
87603 +adu_write_30487 adu_write 3 30487 NULL
87604 +ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
87605 +get_scq_10897 get_scq 2 10897 NULL
87606 +sys_process_vm_readv_19090 sys_process_vm_readv 3-5 19090 NULL nohasharray
87607 +brcmf_usbdev_qinit_19090 brcmf_usbdev_qinit 2 19090 &sys_process_vm_readv_19090
87608 +memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
87609 +cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
87610 +pointer_size_read_51863 pointer_size_read 3 51863 NULL
87611 +load_module_60056 load_module 2 60056 NULL nohasharray
87612 +gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 &load_module_60056
87613 +__videobuf_alloc_cached_12740 __videobuf_alloc_cached 1 12740 NULL
87614 +get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
87615 +dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4-2 43679 NULL
87616 +ieee80211_build_probe_req_60064 ieee80211_build_probe_req 8-6 60064 NULL
87617 +compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
87618 +sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
87619 +__netdev_alloc_skb_ip_align_55067 __netdev_alloc_skb_ip_align 2 55067 NULL
87620 +read_file_regidx_33370 read_file_regidx 3 33370 NULL
87621 +cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
87622 +__copy_from_user_10918 __copy_from_user 3 10918 NULL
87623 +user_read_51881 user_read 3 51881 NULL
87624 +copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
87625 +__xip_file_write_2733 __xip_file_write 4-3 2733 NULL
87626 +cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL
87627 +ath6kl_wmi_test_cmd_27312 ath6kl_wmi_test_cmd 3 27312 NULL
87628 +max77693_bulk_write_43698 max77693_bulk_write 3 43698 NULL
87629 +rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
87630 +hidp_send_ctrl_message_43702 hidp_send_ctrl_message 4 43702 NULL
87631 +async_setkey_35521 async_setkey 3 35521 NULL
87632 +set_dev_class_39645 set_dev_class 4 39645 NULL nohasharray
87633 +dm_exception_table_init_39645 dm_exception_table_init 2 39645 &set_dev_class_39645
87634 +cxgb4_pktgl_to_skb_61899 cxgb4_pktgl_to_skb 2 61899 NULL
87635 +alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
87636 +iio_read_first_n_sw_rb_51911 iio_read_first_n_sw_rb 2 51911 NULL
87637 +hid_report_raw_event_2762 hid_report_raw_event 4 2762 NULL
87638 +add_tty_40055 add_tty 1 40055 NULL nohasharray
87639 +l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 &add_tty_40055
87640 +iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
87641 +rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
87642 +mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL
87643 +snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
87644 +ttm_bo_kmap_60118 ttm_bo_kmap 3-2 60118 NULL
87645 +sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
87646 +alloc_context_3194 alloc_context 1 3194 NULL
87647 +ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
87648 +bm_entry_read_10976 bm_entry_read 3 10976 NULL
87649 +smk_write_access2_19170 smk_write_access2 3 19170 NULL
87650 +pcbit_stat_27364 pcbit_stat 2 27364 NULL
87651 +i915_min_freq_write_10981 i915_min_freq_write 3 10981 NULL
87652 +sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
87653 +gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
87654 +sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
87655 +scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
87656 +koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
87657 +scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
87658 +xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
87659 +ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
87660 +rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL
87661 +init_state_60165 init_state 2 60165 NULL
87662 +udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
87663 +sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
87664 +__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3 19214 NULL
87665 +dev_counters_read_19216 dev_counters_read 3 19216 NULL
87666 +ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
87667 +sg_build_sgat_60179 sg_build_sgat 3 60179 NULL nohasharray
87668 +jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 &sg_build_sgat_60179
87669 +ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
87670 +ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
87671 +calc_hmac_32010 calc_hmac 3 32010 NULL
87672 +ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
87673 +dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
87674 +btrfs_copy_from_user_43806 btrfs_copy_from_user 3-1 43806 NULL
87675 +kernel_readv_35617 kernel_readv 3 35617 NULL
87676 +hci_send_cmd_43810 hci_send_cmd 3 43810 NULL
87677 +sys_lgetxattr_45531 sys_lgetxattr 4 45531 NULL
87678 +dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
87679 +bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
87680 +nouveau_gpio_create__11048 nouveau_gpio_create_ 4 11048 NULL
87681 +dccp_manip_pkt_476 dccp_manip_pkt 4 476 NULL
87682 +tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
87683 +set_tap_pfs_60203 set_tap_pfs 3 60203 NULL
87684 +sfq_alloc_2861 sfq_alloc 1 2861 NULL
87685 +skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
87686 +carl9170_handle_mpdu_11056 carl9170_handle_mpdu 3 11056 NULL
87687 +move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
87688 +ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
87689 +vxge_rx_alloc_52024 vxge_rx_alloc 3 52024 NULL
87690 +ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
87691 +__ip_append_data_16864 __ip_append_data 8-9 16864 NULL
87692 +p54_download_eeprom_43842 p54_download_eeprom 4 43842 NULL
87693 +spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
87694 +store_debug_level_35652 store_debug_level 3 35652 NULL
87695 +l2tp_ip6_sendmsg_7461 l2tp_ip6_sendmsg 4 7461 NULL
87696 +read_flush_43851 read_flush 3 43851 NULL
87697 +dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
87698 +cmm_write_2896 cmm_write 3 2896 NULL
87699 +il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
87700 +io_mapping_map_wc_19284 io_mapping_map_wc 2 19284 NULL
87701 +tunables_write_59563 tunables_write 3 59563 NULL
87702 +compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
87703 +rtsx_write_cfg_seq_27485 rtsx_write_cfg_seq 5-3 27485 NULL
87704 +v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
87705 +kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
87706 +isofs_readpages_52067 isofs_readpages 4 52067 NULL
87707 +lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
87708 +dm_table_create_35687 dm_table_create 3 35687 NULL
87709 +qib_create_cq_27497 qib_create_cq 2 27497 NULL
87710 +nfc_hci_execute_cmd_43882 nfc_hci_execute_cmd 5 43882 NULL
87711 +rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
87712 +tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
87713 +xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
87714 +tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
87715 +garmin_read_process_27509 garmin_read_process 3 27509 NULL
87716 +alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
87717 +nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
87718 +debug_read_19322 debug_read 3 19322 NULL
87719 +v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
87720 +__pskb_pull_tail_60287 __pskb_pull_tail 2 60287 NULL
87721 +gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
87722 +cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL
87723 +ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
87724 +dn_nsp_return_disc_60296 dn_nsp_return_disc 2 60296 NULL
87725 +o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
87726 +prism2_sta_send_mgmt_43916 prism2_sta_send_mgmt 5 43916 NULL
87727 +mgmt_device_found_14146 mgmt_device_found 10 14146 NULL
87728 +snd_seq_oss_readq_new_14283 snd_seq_oss_readq_new 2 14283 NULL
87729 +doc_probe_23285 doc_probe 1 23285 NULL
87730 +ppp_cp_event_2965 ppp_cp_event 6 2965 NULL
87731 +SendString_43928 SendString 3 43928 NULL
87732 +acpi_os_map_memory_11161 acpi_os_map_memory 1-2 11161 NULL
87733 +ceph_parse_server_name_60318 ceph_parse_server_name 2 60318 NULL
87734 +retry_count_read_52129 retry_count_read 3 52129 NULL
87735 +xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
87736 +ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL
87737 +read_zero_19366 read_zero 3 19366 NULL
87738 +bch_alloc_4593 bch_alloc 1 4593 NULL
87739 +stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
87740 +iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
87741 +libipw_alloc_txb_27579 libipw_alloc_txb 1-2-3 27579 NULL
87742 +raid5_resize_63306 raid5_resize 2 63306 NULL
87743 +interpret_user_input_19393 interpret_user_input 2 19393 NULL
87744 +handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
87745 +ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
87746 +do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
87747 +udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
87748 +depth_write_3021 depth_write 3 3021 NULL
87749 +dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
87750 +read_file_stations_35795 read_file_stations 3 35795 NULL
87751 +il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
87752 +tipc_cfg_reply_alloc_27606 tipc_cfg_reply_alloc 1 27606 NULL
87753 +bcm_recvmsg_43992 bcm_recvmsg 4 43992 NULL
87754 +proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
87755 +ubi_eba_atomic_leb_change_60379 ubi_eba_atomic_leb_change 5 60379 NULL
87756 +iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 NULL
87757 +dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
87758 +il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
87759 +il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
87760 +mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
87761 +write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
87762 +driver_names_read_60399 driver_names_read 3 60399 NULL
87763 +read_flush_procfs_27642 read_flush_procfs 3 27642 NULL
87764 +add_new_gdb_27643 add_new_gdb 3 27643 NULL
87765 +dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
87766 +hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
87767 +_alloc_mISDN_skb_52232 _alloc_mISDN_skb 3 52232 NULL
87768 +qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
87769 +tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
87770 +cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
87771 +do_dmabuf_dirty_ldu_52241 do_dmabuf_dirty_ldu 6 52241 NULL
87772 +mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
87773 +rx_data_60442 rx_data 4 60442 NULL
87774 +ttusb2_msg_3100 ttusb2_msg 4 3100 NULL
87775 +efivar_create_sysfs_entry_19485 efivar_create_sysfs_entry 2 19485 NULL
87776 +tcf_csum_ipv4_igmp_60446 tcf_csum_ipv4_igmp 3 60446 NULL
87777 +mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
87778 +rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
87779 +sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
87780 +simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
87781 +__tty_buffer_request_room_27700 __tty_buffer_request_room 2 27700 NULL
87782 +ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
87783 +fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL
87784 +tcp_mark_head_lost_35895 tcp_mark_head_lost 2 35895 NULL
87785 +skb_realloc_headroom_19516 skb_realloc_headroom 2 19516 NULL
87786 +atm_alloc_charge_19517 atm_alloc_charge 2 19517 NULL nohasharray
87787 +dev_alloc_skb_19517 dev_alloc_skb 1 19517 &atm_alloc_charge_19517
87788 +construct_key_11329 construct_key 3 11329 NULL
87789 +evm_write_key_27715 evm_write_key 3 27715 NULL
87790 +persistent_ram_buffer_map_11332 persistent_ram_buffer_map 1-2 11332 NULL
87791 +fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
87792 +filldir_55137 filldir 3 55137 NULL
87793 +igmpv3_newpack_35912 igmpv3_newpack 2 35912 NULL
87794 +kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
87795 +reg_w_buf_27724 reg_w_buf 3 27724 NULL
87796 +nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
87797 +compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
87798 +a4t_cs_init_27734 a4t_cs_init 3 27734 NULL
87799 +sel_write_create_11353 sel_write_create 3 11353 NULL
87800 +tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
87801 +request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL
87802 +hwflags_read_52318 hwflags_read 3 52318 NULL
87803 +nfc_alloc_send_skb_3167 nfc_alloc_send_skb 4 3167 NULL
87804 +batadv_skb_head_push_11360 batadv_skb_head_push 2 11360 NULL
87805 +put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
87806 +vmw_gmr_bind_44130 vmw_gmr_bind 3 44130 NULL
87807 +ath_tx_init_60515 ath_tx_init 2 60515 NULL
87808 +drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
87809 +ntfs_rl_split_52328 ntfs_rl_split 2-4 52328 NULL
87810 +qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
87811 +ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
87812 +test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
87813 +nfsd_read_19568 nfsd_read 5 19568 NULL
87814 +cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
87815 +hysdn_sched_rx_60533 hysdn_sched_rx 3 60533 NULL
87816 +mempool_create_node_3191 mempool_create_node 1 3191 NULL
87817 +kcalloc_27770 kcalloc 1-2 27770 NULL
87818 +shmem_pread_slow_3198 shmem_pread_slow 3 3198 NULL
87819 +bm_status_read_19583 bm_status_read 3 19583 NULL
87820 +v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
87821 +zd_mac_rx_38296 zd_mac_rx 3 38296 NULL
87822 +nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
87823 +ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL
87824 +acl_alloc_35979 acl_alloc 1 35979 NULL
87825 +copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
87826 +___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
87827 +str_to_user_11411 str_to_user 2 11411 NULL
87828 +mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
87829 +koneplus_sysfs_write_35993 koneplus_sysfs_write 6 35993 NULL
87830 +trace_options_read_11419 trace_options_read 3 11419 NULL
87831 +ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL
87832 +mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
87833 +xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
87834 +isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
87835 +kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
87836 +write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
87837 +iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
87838 +do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
87839 +console_store_36007 console_store 4 36007 NULL
87840 +bttv_read_11432 bttv_read 3 11432 NULL
87841 +key_key_read_3241 key_key_read 3 3241 NULL
87842 +aer_inject_write_52399 aer_inject_write 3 52399 NULL
87843 +il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
87844 +__feat_register_sp_64712 __feat_register_sp 6 64712 NULL
87845 +ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
87846 +check_vendor_extension_3254 check_vendor_extension 1 3254 NULL
87847 +ieee80211_amsdu_to_8023s_15561 ieee80211_amsdu_to_8023s 5 15561 NULL
87848 +sys_listxattr_27833 sys_listxattr 3 27833 NULL
87849 +aac_rx_ioremap_52410 aac_rx_ioremap 2 52410 NULL
87850 +ubi_eba_write_leb_36029 ubi_eba_write_leb 5-6 36029 NULL
87851 +um_idi_write_18293 um_idi_write 3 18293 NULL
87852 +cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
87853 +srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
87854 +usbvision_rvmalloc_19655 usbvision_rvmalloc 1 19655 NULL
87855 +line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
87856 +LoadBitmap_19658 LoadBitmap 2 19658 NULL
87857 +wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
87858 +sys_init_module_36047 sys_init_module 2 36047 NULL
87859 +read_profile_27859 read_profile 3 27859 NULL
87860 +acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
87861 +sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 NULL
87862 +enlarge_skb_44248 enlarge_skb 2 44248 NULL nohasharray
87863 +xfs_buf_readahead_map_44248 xfs_buf_readahead_map 3 44248 &enlarge_skb_44248
87864 +scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
87865 +refill_pool_19477 refill_pool 2 19477 NULL
87866 +ubifs_recover_leb_60639 ubifs_recover_leb 3 60639 NULL
87867 +ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL
87868 +iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
87869 +xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
87870 +__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
87871 +tcp_sacktag_walk_49703 tcp_sacktag_walk 6 49703 NULL
87872 +ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
87873 +arvo_sysfs_write_3311 arvo_sysfs_write 6 3311 NULL
87874 +sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
87875 +unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
87876 +kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
87877 +hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
87878 +dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
87879 +ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1-2 52477 NULL
87880 +ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
87881 +blk_init_tags_30592 blk_init_tags 1 30592 NULL
87882 +venus_symlink_23570 venus_symlink 4-6 23570 NULL
87883 +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
87884 +aac_rkt_ioremap_3333 aac_rkt_ioremap 2 3333 NULL
87885 +sctp_make_init_ack_3335 sctp_make_init_ack 4 3335 NULL
87886 +read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
87887 +tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
87888 +ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
87889 +vga_arb_write_36112 vga_arb_write 3 36112 NULL
87890 +int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
87891 +acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL
87892 +simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
87893 +il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
87894 +memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
87895 +gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
87896 +ath6kl_usb_ctrl_msg_exchange_33327 ath6kl_usb_ctrl_msg_exchange 4 33327 NULL
87897 +dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
87898 +pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
87899 +mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
87900 +gpio_power_read_36059 gpio_power_read 3 36059 NULL
87901 +vmalloc_exec_36132 vmalloc_exec 1 36132 NULL
87902 +init_data_container_60709 init_data_container 1 60709 NULL
87903 +p9_client_read_19750 p9_client_read 5 19750 NULL
87904 +skb_cow_data_11565 skb_cow_data 2 11565 NULL
87905 +pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
87906 +ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
87907 +ext3_readpages_36144 ext3_readpages 4 36144 NULL
87908 +mlx4_init_cmpt_table_11569 mlx4_init_cmpt_table 3 11569 NULL
87909 +iwl_trans_txq_alloc_36147 iwl_trans_txq_alloc 3 36147 NULL
87910 +alloc_vm_area_36149 alloc_vm_area 1 36149 NULL
87911 +ubi_eba_write_leb_st_44343 ubi_eba_write_leb_st 5 44343 NULL
87912 +tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
87913 +b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
87914 +oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
87915 +mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
87916 +nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 NULL nohasharray
87917 +blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 &nfs_fscache_get_super_cookie_44355
87918 +saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
87919 +send_stream_3397 send_stream 4 3397 NULL
87920 +snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
87921 +fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
87922 +ipx_recvmsg_44366 ipx_recvmsg 4 44366 NULL
87923 +hycapi_rx_capipkt_11602 hycapi_rx_capipkt 3 11602 NULL
87924 +msix_map_region_3411 msix_map_region 3 3411 NULL
87925 +sys_kexec_load_14222 sys_kexec_load 2 14222 NULL
87926 +rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
87927 +iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
87928 +pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
87929 +crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
87930 +sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
87931 +opticon_write_60775 opticon_write 4 60775 NULL
87932 +snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
87933 +acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
87934 +aoedev_flush_44398 aoedev_flush 2 44398 NULL
87935 +irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
87936 +drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
87937 +pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
87938 +vip_read_19832 vip_read 3 19832 NULL
87939 +osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
87940 +llc_shdlc_alloc_skb_11645 llc_shdlc_alloc_skb 2 11645 NULL
87941 +security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
87942 +sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
87943 +nfqnl_mangle_36226 nfqnl_mangle 4-2 36226 NULL
87944 +atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
87945 +crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
87946 +ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
87947 +sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
87948 +alloc_skb_fclone_3467 alloc_skb_fclone 1 3467 NULL
87949 +cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
87950 +viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
87951 +cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2 28053 NULL
87952 +ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
87953 +llcp_allocate_pdu_19866 llcp_allocate_pdu 3 19866 NULL
87954 +lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
87955 +btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
87956 +compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
87957 +security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
87958 +sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
87959 +blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
87960 +split_11691 split 2 11691 NULL
87961 +brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
87962 +snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
87963 +pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
87964 +usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL
87965 +__kfifo_alloc_22173 __kfifo_alloc 2-3 22173 NULL
87966 +codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
87967 +gdth_init_isa_28091 gdth_init_isa 1 28091 NULL
87968 +readahead_tree_block_36285 readahead_tree_block 3 36285 NULL
87969 +mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL nohasharray
87970 +ieee80211_wx_set_gen_ie_rsl_3521 ieee80211_wx_set_gen_ie_rsl 3 3521 &mem_tx_free_mem_blks_read_3521
87971 +nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
87972 +vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
87973 +ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
87974 +lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
87975 +ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
87976 +rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
87977 +ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
87978 +iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 NULL
87979 +tcf_csum_ipv6_icmp_11738 tcf_csum_ipv6_icmp 4 11738 NULL
87980 +smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
87981 +vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
87982 +spidev_write_44510 spidev_write 3 44510 NULL
87983 +macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
87984 +dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
87985 +iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
87986 +fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
87987 +iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
87988 +nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
87989 +iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
87990 +kone_receive_4690 kone_receive 4 4690 NULL
87991 +alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
87992 +jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
87993 +evtchn_read_3569 evtchn_read 3 3569 NULL
87994 +video_read_28148 video_read 3 28148 NULL
87995 +compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
87996 +sys_msgsnd_44537 sys_msgsnd 3 44537 NULL nohasharray
87997 +comm_write_44537 comm_write 3 44537 &sys_msgsnd_44537
87998 +stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
87999 +ax25_send_frame_19964 ax25_send_frame 2 19964 NULL
88000 +blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
88001 +relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
88002 +ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
88003 +vc_resize_3585 vc_resize 2-3 3585 NULL
88004 +gluebi_write_27905 gluebi_write 3 27905 NULL
88005 +ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
88006 +c4iw_reject_cr_28174 c4iw_reject_cr 3 28174 NULL
88007 +rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
88008 +attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
88009 +compat_sys_semtimedop_3606 compat_sys_semtimedop 3 3606 NULL
88010 +sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
88011 +macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
88012 +edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
88013 +key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
88014 +pti_char_write_60960 pti_char_write 3 60960 NULL
88015 +tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
88016 +nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
88017 +pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
88018 +read_vbt_r10_60679 read_vbt_r10 1 60679 NULL
88019 +aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
88020 +afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
88021 +error_error_frame_read_39947 error_error_frame_read 3 39947 NULL nohasharray
88022 +fwnet_pd_new_39947 fwnet_pd_new 4 39947 &error_error_frame_read_39947
88023 +snd_pcm_alloc_vmalloc_buffer_44595 snd_pcm_alloc_vmalloc_buffer 2 44595 NULL
88024 +zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
88025 +sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
88026 +rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
88027 +__a2mp_build_60987 __a2mp_build 3 60987 NULL
88028 +split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
88029 +hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
88030 +cm_copy_private_data_3649 cm_copy_private_data 2 3649 NULL
88031 +ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
88032 +ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 NULL nohasharray
88033 +mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &ieee80211_if_read_auto_open_plinks_38268
88034 +ip_set_alloc_57953 ip_set_alloc 1 57953 NULL
88035 +i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
88036 +mb_cache_create_17307 mb_cache_create 2 17307 NULL
88037 +ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
88038 +cfpkt_add_body_44630 cfpkt_add_body 3 44630 NULL
88039 +nf_nat_sdp_media_11863 nf_nat_sdp_media 9 11863 NULL
88040 +alloc_extent_buffer_52824 alloc_extent_buffer 3 52824 NULL
88041 +skb_cow_head_52495 skb_cow_head 2 52495 NULL
88042 +ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
88043 +sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
88044 +alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
88045 +alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
88046 +pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
88047 +ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
88048 +sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
88049 +rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
88050 +fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
88051 +btmrvl_psmode_write_3703 btmrvl_psmode_write 3 3703 NULL
88052 +symtab_init_61050 symtab_init 2 61050 NULL
88053 +team_options_register_20091 team_options_register 3 20091 NULL
88054 +videobuf_pages_to_sg_3708 videobuf_pages_to_sg 2 3708 NULL
88055 +mon_bin_get_event_52863 mon_bin_get_event 4 52863 NULL
88056 +oom_adj_read_21847 oom_adj_read 3 21847 NULL
88057 +b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
88058 +mpi_resize_44674 mpi_resize 2 44674 NULL
88059 +ip6_append_data_36490 ip6_append_data 4-5 36490 NULL nohasharray
88060 +tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 &ip6_append_data_36490
88061 +kmalloc_slab_11917 kmalloc_slab 1 11917 NULL
88062 +rng_dev_read_41581 rng_dev_read 3 41581 NULL
88063 +nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 NULL
88064 +cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
88065 +fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
88066 +hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
88067 +xfs_trans_read_buf_map_37487 xfs_trans_read_buf_map 5 37487 NULL
88068 +ci_ll_write_3740 ci_ll_write 4 3740 NULL
88069 +snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
88070 +kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
88071 +ima_show_htable_value_57136 ima_show_htable_value 2 57136 NULL
88072 +mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
88073 +dccp_feat_clone_sp_val_11942 dccp_feat_clone_sp_val 3 11942 NULL
88074 +pms_read_53873 pms_read 3 53873 NULL
88075 +ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
88076 +get_derived_key_61100 get_derived_key 4 61100 NULL
88077 +bm_entry_write_28338 bm_entry_write 3 28338 NULL
88078 +_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
88079 +tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
88080 +clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
88081 +dm_read_15674 dm_read 3 15674 NULL
88082 +cpu_type_read_36540 cpu_type_read 3 36540 NULL
88083 +__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL
88084 +nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
88085 +kone_send_63435 kone_send 4 63435 NULL
88086 +alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
88087 +key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
88088 +tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
88089 +iblock_get_bio_52936 iblock_get_bio 3 52936 NULL
88090 +__kfifo_to_user_36555 __kfifo_to_user 3 36555 NULL nohasharray
88091 +macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
88092 +wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
88093 +create_trace_probe_20175 create_trace_probe 1 20175 NULL
88094 +sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
88095 +afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
88096 +tnode_new_44757 tnode_new 3 44757 NULL nohasharray
88097 +pty_write_44757 pty_write 3 44757 &tnode_new_44757
88098 +ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
88099 +kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
88100 +iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
88101 +send_packet_52960 send_packet 4 52960 NULL
88102 +dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
88103 +ssb_bus_scan_36578 ssb_bus_scan 2 36578 NULL
88104 +ncp_file_write_3813 ncp_file_write 3 3813 NULL
88105 +tipc_port_reject_sections_55229 tipc_port_reject_sections 5 55229 NULL
88106 +tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
88107 +set_bypass_pfs_28395 set_bypass_pfs 3 28395 NULL
88108 +put_cmsg_36589 put_cmsg 4 36589 NULL
88109 +__vmalloc_61168 __vmalloc 1 61168 NULL
88110 +llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
88111 +sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
88112 +read_file_tx_chainmask_3829 read_file_tx_chainmask 3 3829 NULL
88113 +pcnet32_realloc_rx_ring_36598 pcnet32_realloc_rx_ring 3 36598 NULL
88114 +event_oom_late_read_61175 event_oom_late_read 3 61175 NULL nohasharray
88115 +pair_device_61175 pair_device 4 61175 &event_oom_late_read_61175
88116 +sys_lsetxattr_61177 sys_lsetxattr 4 61177 NULL
88117 +tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
88118 +nfs4_alloc_pages_48426 nfs4_alloc_pages 1 48426 NULL
88119 +rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
88120 +batadv_check_management_packet_52993 batadv_check_management_packet 3 52993 NULL
88121 +tpci200_slot_map_space_3848 tpci200_slot_map_space 2 3848 NULL
88122 +regmap_bulk_write_59049 regmap_bulk_write 4 59049 NULL
88123 +create_one_cdev_3852 create_one_cdev 2 3852 NULL
88124 +fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
88125 +smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
88126 +mpage_readpages_28436 mpage_readpages 3 28436 NULL
88127 +cfpkt_append_61206 cfpkt_append 3 61206 NULL
88128 +btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
88129 +rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
88130 +get_fd_set_3866 get_fd_set 1 3866 NULL
88131 +megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
88132 +rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
88133 +unlink_queued_645 unlink_queued 3-4 645 NULL
88134 +il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
88135 +sisusb_write_44834 sisusb_write 3 44834 NULL
88136 +smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
88137 +raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
88138 +alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
88139 +ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
88140 +hdlc_empty_fifo_18397 hdlc_empty_fifo 2 18397 NULL
88141 +uea_send_modem_cmd_3888 uea_send_modem_cmd 3 3888 NULL
88142 +h5_prepare_pkt_12085 h5_prepare_pkt 4 12085 NULL
88143 +nvram_write_3894 nvram_write 3 3894 NULL
88144 +osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
88145 +pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
88146 +iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
88147 +vcs_write_3910 vcs_write 3 3910 NULL
88148 +sctp_make_abort_violation_27959 sctp_make_abort_violation 4 27959 NULL
88149 +mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
88150 +dtim_interval_read_654 dtim_interval_read 3 654 NULL
88151 +btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL
88152 +packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
88153 +alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
88154 +do_tty_write_44896 do_tty_write 5 44896 NULL
88155 +set_powered_12129 set_powered 4 12129 NULL
88156 +qib_resize_cq_53090 qib_resize_cq 2 53090 NULL
88157 +snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
88158 +nfs_writedata_alloc_12133 nfs_writedata_alloc 2 12133 NULL
88159 +ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
88160 +ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
88161 +hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL
88162 +rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
88163 +vmw_fifo_reserve_12141 vmw_fifo_reserve 2 12141 NULL
88164 +i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
88165 +rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
88166 +btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
88167 +vmbus_open_12154 vmbus_open 2-3 12154 NULL
88168 +capinc_tty_write_28539 capinc_tty_write 3 28539 NULL
88169 +sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
88170 +mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 NULL
88171 +line6_dumpreq_initbuf_53123 line6_dumpreq_initbuf 3 53123 NULL
88172 +snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4 36740 NULL
88173 +gather_array_56641 gather_array 3 56641 NULL
88174 +cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
88175 +b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
88176 +dma_memcpy_to_iovec_12173 dma_memcpy_to_iovec 5 12173 NULL
88177 +debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
88178 +ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
88179 +ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
88180 +scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
88181 +regcache_rbtree_insert_to_block_58009 regcache_rbtree_insert_to_block 5 58009 NULL
88182 +do_add_counters_3992 do_add_counters 3 3992 NULL
88183 +mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 NULL
88184 +smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
88185 +st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
88186 +rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL
88187 +dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
88188 +ip4ip6_err_36772 ip4ip6_err 5 36772 NULL
88189 +mei_write_4005 mei_write 3 4005 NULL
88190 +snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
88191 +ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
88192 +__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL
88193 +tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
88194 +debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
88195 +receive_copy_12216 receive_copy 3 12216 NULL
88196 +aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 NULL
88197 +proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
88198 +bcsp_prepare_pkt_12961 bcsp_prepare_pkt 3 12961 NULL
88199 +ftdi_process_packet_45005 ftdi_process_packet 5 45005 NULL
88200 +change_xattr_61390 change_xattr 5 61390 NULL
88201 +find_skb_20431 find_skb 2 20431 NULL
88202 +hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
88203 +fmc_send_cmd_20435 fmc_send_cmd 5 20435 NULL
88204 +tcp_fragment_20436 tcp_fragment 3 20436 NULL
88205 +ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
88206 +ptrace_writedata_45021 ptrace_writedata 4 45021 NULL
88207 +simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
88208 +sys_sethostname_42962 sys_sethostname 2 42962 NULL
88209 +int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
88210 +tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
88211 +pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL
88212 +fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
88213 +shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
88214 +add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
88215 +sctp_make_asconf_4078 sctp_make_asconf 3 4078 NULL
88216 +vhci_get_user_45039 vhci_get_user 3 45039 NULL
88217 +ip_vs_icmp_xmit_v6_20464 ip_vs_icmp_xmit_v6 4 20464 NULL
88218 +compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
88219 +read_buf_20469 read_buf 2 20469 NULL
88220 +cm_write_36858 cm_write 3 36858 NULL
88221 +note_last_dentry_12285 note_last_dentry 3 12285 NULL
88222 +blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
88223 +il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 NULL
88224 +sel_write_user_45060 sel_write_user 3 45060 NULL
88225 +tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
88226 +__dev_alloc_skb_28681 __dev_alloc_skb 1 28681 NULL
88227 +svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
88228 +snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
88229 +fast_user_write_20494 fast_user_write 5 20494 NULL
88230 +unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
88231 +sctp_make_fwdtsn_53265 sctp_make_fwdtsn 3 53265 NULL
88232 +ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
88233 +hidraw_report_event_20503 hidraw_report_event 3 20503 NULL
88234 +bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
88235 +selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
88236 +lirc_buffer_init_53282 lirc_buffer_init 3-2 53282 NULL
88237 +tipc_msg_build_12326 tipc_msg_build 4 12326 NULL
88238 +xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
88239 +drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
88240 +pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
88241 +OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
88242 +osst_read_40237 osst_read 3 40237 NULL
88243 +tm6000_read_4151 tm6000_read 3 4151 NULL
88244 +amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
88245 +usbdev_read_45114 usbdev_read 3 45114 NULL
88246 +drm_plane_init_28731 drm_plane_init 6 28731 NULL
88247 +spi_execute_28736 spi_execute 5 28736 NULL
88248 +snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
88249 +mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
88250 +get_alua_req_4166 get_alua_req 3 4166 NULL
88251 +scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
88252 +blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
88253 +venus_create_20555 venus_create 4 20555 NULL
88254 +__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
88255 +batadv_interface_rx_53325 batadv_interface_rx 4 53325 NULL
88256 +receive_packet_12367 receive_packet 2 12367 NULL
88257 +squashfs_cache_init_41656 squashfs_cache_init 2 41656 NULL
88258 +mem_write_22232 mem_write 3 22232 NULL
88259 +read_file_bool_4180 read_file_bool 3 4180 NULL
88260 +send_to_tty_45141 send_to_tty 3 45141 NULL
88261 +fops_read_40672 fops_read 3 40672 NULL
88262 +cxio_init_resource_fifo_28764 cxio_init_resource_fifo 3 28764 NULL
88263 +write_leb_36957 write_leb 5 36957 NULL
88264 +xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
88265 +device_write_45156 device_write 3 45156 NULL
88266 +i915_max_freq_read_20581 i915_max_freq_read 3 20581 NULL
88267 +tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
88268 +sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
88269 +batadv_tt_append_diff_20588 batadv_tt_append_diff 4 20588 NULL
88270 +dvb_net_sec_callback_28786 dvb_net_sec_callback 2 28786 NULL
88271 +excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
88272 +isp1760_register_628 isp1760_register 1-2 628 NULL
88273 +dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
88274 +lirc_write_20604 lirc_write 3 20604 NULL
88275 +sel_write_member_28800 sel_write_member 3 28800 NULL
88276 +ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
88277 +ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
88278 +cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
88279 +sys_msgrcv_959 sys_msgrcv 3 959 NULL
88280 +snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL
88281 +pskb_network_may_pull_35336 pskb_network_may_pull 2 35336 NULL
88282 +auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
88283 +ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
88284 +setxattr_37006 setxattr 4 37006 NULL
88285 +add_child_45201 add_child 4 45201 NULL
88286 +seq_open_private_61589 seq_open_private 3 61589 NULL
88287 +iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
88288 +__get_vm_area_61599 __get_vm_area 1 61599 NULL
88289 +iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
88290 +nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
88291 +kfifo_copy_to_user_20646 kfifo_copy_to_user 3 20646 NULL
88292 +spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
88293 +ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
88294 +vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
88295 +oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
88296 +configfs_write_file_61621 configfs_write_file 3 61621 NULL
88297 +ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL
88298 +ieee80211_rx_bss_info_61630 ieee80211_rx_bss_info 3 61630 NULL
88299 +isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
88300 +ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
88301 +i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
88302 +snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
88303 +x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
88304 +dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 4 20682 NULL
88305 +get_packet_pg_28023 get_packet_pg 4 28023 NULL
88306 +rtllib_auth_challenge_12493 rtllib_auth_challenge 3 12493 NULL
88307 +acpi_tb_parse_root_table_53455 acpi_tb_parse_root_table 1 53455 NULL
88308 +resize_stripes_61650 resize_stripes 2 61650 NULL
88309 +n2_run_53459 n2_run 3 53459 NULL
88310 +packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
88311 +parse_command_37079 parse_command 2 37079 NULL
88312 +read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
88313 +alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
88314 +ttm_page_pool_free_61661 ttm_page_pool_free 2 61661 NULL
88315 +input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
88316 +pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
88317 +bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
88318 +insert_one_name_61668 insert_one_name 7 61668 NULL
88319 +nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
88320 +pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
88321 +iowarrior_read_53483 iowarrior_read 3 53483 NULL
88322 +osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
88323 +lock_loop_61681 lock_loop 1 61681 NULL
88324 +snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
88325 +security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
88326 +brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
88327 +ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
88328 +vring_add_indirect_20737 vring_add_indirect 3-4 20737 NULL
88329 +push_rx_28939 push_rx 3 28939 NULL
88330 +__copy_from_user_inatomic_4365 __copy_from_user_inatomic 3 4365 NULL
88331 +vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
88332 +idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
88333 +sys_setdomainname_4373 sys_setdomainname 2 4373 NULL
88334 +fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
88335 +copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
88336 +nouveau_dmaobj_create__61730 nouveau_dmaobj_create_ 6 61730 NULL
88337 +btrfs_trim_block_group_28963 btrfs_trim_block_group 3 28963 NULL
88338 +irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
88339 +aac_srcv_ioremap_6659 aac_srcv_ioremap 2 6659 NULL
88340 +ubi_leb_change_10289 ubi_leb_change 4 10289 NULL
88341 +read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
88342 +alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
88343 +pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
88344 +read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
88345 +read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
88346 +btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
88347 +fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
88348 +iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
88349 +cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 NULL
88350 +libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
88351 +hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
88352 +tstats_write_60432 tstats_write 3 60432 NULL nohasharray
88353 +kmalloc_60432 kmalloc 1 60432 &tstats_write_60432
88354 +bin_uuid_28999 bin_uuid 3 28999 NULL
88355 +sys_sendto_20809 sys_sendto 6 20809 NULL
88356 +alloc_page_cgroup_2919 alloc_page_cgroup 1 2919 NULL
88357 +set_registers_53582 set_registers 3 53582 NULL
88358 +fc_fcp_frame_alloc_12624 fc_fcp_frame_alloc 2 12624 NULL
88359 +do_pages_stat_4437 do_pages_stat 2 4437 NULL
88360 +lane2_associate_req_45398 lane2_associate_req 4 45398 NULL
88361 +tcf_csum_ipv4_udp_30777 tcf_csum_ipv4_udp 4 30777 NULL
88362 +pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
88363 +bchannel_get_rxbuf_37213 bchannel_get_rxbuf 2 37213 NULL
88364 +keymap_store_45406 keymap_store 4 45406 NULL
88365 +pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
88366 +dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
88367 +wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
88368 +il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL
88369 +pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
88370 +xz_dec_init_29029 xz_dec_init 2 29029 NULL
88371 +regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
88372 +tcp_dma_try_early_copy_4457 tcp_dma_try_early_copy 3 4457 NULL
88373 +__do_replace_37227 __do_replace 5 37227 NULL
88374 +dn_alloc_send_pskb_4465 dn_alloc_send_pskb 2 4465 NULL
88375 +ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
88376 +rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
88377 +at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
88378 +rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL
88379 +tso_fragment_29050 tso_fragment 3 29050 NULL
88380 +__alloc_pred_stack_26687 __alloc_pred_stack 2 26687 NULL
88381 +sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
88382 +sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
88383 +rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
88384 +xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
88385 +ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
88386 +__iio_allocate_sw_ring_buffer_4843 __iio_allocate_sw_ring_buffer 3 4843 NULL
88387 +init_per_cpu_17880 init_per_cpu 1 17880 NULL
88388 +iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
88389 +intel_render_ring_init_dri_45446 intel_render_ring_init_dri 2-3 45446 NULL
88390 +udp_sendmsg_4492 udp_sendmsg 4 4492 NULL
88391 +ieee80211_probereq_get_29069 ieee80211_probereq_get 4-6 29069 NULL
88392 +vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
88393 +bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
88394 +_alloc_cdb_cont_23609 _alloc_cdb_cont 2 23609 NULL
88395 +set_link_security_4502 set_link_security 4 4502 NULL
88396 +nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
88397 +l1oip_socket_parse_4507 l1oip_socket_parse 4 4507 NULL
88398 +tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
88399 +fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL
88400 +key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
88401 +srp_target_alloc_37288 srp_target_alloc 3 37288 NULL
88402 +mmio_read_40348 mmio_read 4 40348 NULL
88403 +vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
88404 +ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
88405 +compat_sys_readv_20911 compat_sys_readv 3 20911 NULL
88406 +ivtv_write_12721 ivtv_write 3 12721 NULL
88407 +fuse_fill_write_pages_53682 fuse_fill_write_pages 4 53682 NULL
88408 +islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
88409 +sys_llistxattr_4532 sys_llistxattr 3 4532 NULL
88410 +isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
88411 +da9052_group_write_4534 da9052_group_write 3 4534 NULL
88412 +v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
88413 +jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
88414 +key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
88415 +tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
88416 +videobuf_vmalloc_to_sg_4548 videobuf_vmalloc_to_sg 2 4548 NULL
88417 +rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
88418 +ieee80211_rtl_auth_challenge_61897 ieee80211_rtl_auth_challenge 3 61897 NULL
88419 +send_msg_37323 send_msg 4 37323 NULL
88420 +brcmf_sdbrcm_membytes_37324 brcmf_sdbrcm_membytes 3-5 37324 NULL
88421 +l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL
88422 +clear_refs_write_61904 clear_refs_write 3 61904 NULL
88423 +scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
88424 +rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
88425 +altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
88426 +virtqueue_add_buf_59470 virtqueue_add_buf 3-4 59470 NULL
88427 +proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
88428 +dsp_buffer_alloc_11684 dsp_buffer_alloc 2 11684 NULL
88429 +rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
88430 +reshape_ring_29147 reshape_ring 2 29147 NULL
88431 +cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
88432 +au0828_init_isoc_61917 au0828_init_isoc 3-2 61917 NULL
88433 +copy_macs_45534 copy_macs 4 45534 NULL
88434 +sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
88435 +listxattr_12769 listxattr 3 12769 NULL
88436 +xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
88437 +wdm_write_53735 wdm_write 3 53735 NULL
88438 +snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
88439 +send_bulk_static_data_61932 send_bulk_static_data 3 61932 NULL
88440 +cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
88441 +mempool_create_29437 mempool_create 1 29437 NULL
88442 +platform_create_bundle_12785 platform_create_bundle 4-6 12785 NULL
88443 +brcmf_tx_frame_20978 brcmf_tx_frame 3 20978 NULL
88444 +sock_alloc_send_pskb_21246 sock_alloc_send_pskb 2 21246 NULL
88445 +stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
88446 +alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
88447 +venus_rmdir_45564 venus_rmdir 4 45564 NULL
88448 +scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
88449 +rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL
88450 +squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
88451 +mgmt_event_12810 mgmt_event 4 12810 NULL
88452 +ntfs_rl_realloc_nofail_32173 ntfs_rl_realloc_nofail 3 32173 NULL
88453 +xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
88454 +ipath_create_cq_45586 ipath_create_cq 2 45586 NULL
88455 +wusb_prf_256_29203 wusb_prf_256 7 29203 NULL nohasharray
88456 +alloc_group_attrs_29203 alloc_group_attrs 3 29203 &wusb_prf_256_29203
88457 +comedi_alloc_subdevices_29207 comedi_alloc_subdevices 2 29207 NULL
88458 +rdma_set_ib_paths_45592 rdma_set_ib_paths 3 45592 NULL
88459 +compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
88460 +rds_iw_inc_copy_to_user_29214 rds_iw_inc_copy_to_user 3 29214 NULL
88461 +zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
88462 +TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
88463 +iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
88464 +virtnet_send_command_61993 virtnet_send_command 5-6 61993 NULL
88465 +sys_getxattr_37418 sys_getxattr 4 37418 NULL
88466 +regmap_raw_write_53803 regmap_raw_write 4 53803 NULL
88467 +hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
88468 +spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
88469 +t4vf_pktgl_to_skb_39005 t4vf_pktgl_to_skb 2 39005 NULL
88470 +audit_log_n_hex_45617 audit_log_n_hex 3 45617 NULL
88471 +devm_ioremap_29235 devm_ioremap 2-3 29235 NULL
88472 +irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
88473 +recover_peb_29238 recover_peb 6-7 29238 NULL
88474 +security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
88475 +proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
88476 +i915_gem_execbuffer_relocate_slow_25355 i915_gem_execbuffer_relocate_slow 7 25355 NULL
88477 +jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
88478 +tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
88479 +skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
88480 +cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
88481 +brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
88482 +pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
88483 +event_calibration_read_21083 event_calibration_read 3 21083 NULL
88484 +ppp_tx_cp_62044 ppp_tx_cp 5 62044 NULL
88485 +prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
88486 +sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
88487 +cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
88488 +compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
88489 +do_pselect_62061 do_pselect 1 62061 NULL
88490 +btmrvl_gpiogap_read_4718 btmrvl_gpiogap_read 3 4718 NULL
88491 +dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
88492 +kmem_realloc_37489 kmem_realloc 2 37489 NULL
88493 +ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
88494 +show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
88495 +ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
88496 +sn9c102_read_29305 sn9c102_read 3 29305 NULL
88497 +pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
88498 +smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
88499 +sg_read_25799 sg_read 3 25799 NULL
88500 +uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
88501 +ci_ll_init_12930 ci_ll_init 3 12930 NULL
88502 +unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL
88503 +nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
88504 +pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
88505 +vmalloc_32_user_37519 vmalloc_32_user 1 37519 NULL
88506 +fd_do_writev_29329 fd_do_writev 3 29329 NULL
88507 +hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
88508 +do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
88509 +ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
88510 +dvb_ca_en50221_init_45718 dvb_ca_en50221_init 4 45718 NULL
88511 +__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
88512 +jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
88513 +new_skb_21148 new_skb 1 21148 NULL
88514 +ath6kl_mgmt_tx_21153 ath6kl_mgmt_tx 9 21153 NULL
88515 +l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
88516 +bm_status_write_12964 bm_status_write 3 12964 NULL
88517 +mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
88518 +snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL
88519 +wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
88520 +ip6_ufo_append_data_4780 ip6_ufo_append_data 5-6-7 4780 NULL
88521 +sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL nohasharray
88522 +nf_nat_mangle_tcp_packet_37551 nf_nat_mangle_tcp_packet 6-8 37551 &sep_create_dcb_dmatables_context_37551
88523 +bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
88524 +rw_copy_check_uvector_45748 rw_copy_check_uvector 3 45748 NULL nohasharray
88525 +v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
88526 +qib_diag_write_62133 qib_diag_write 3 62133 NULL
88527 +gnttab_expand_15817 gnttab_expand 1 15817 NULL
88528 +lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
88529 +sctp_make_chunk_12986 sctp_make_chunk 4 12986 NULL
88530 +sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
88531 +TransmitTcb_12989 TransmitTcb 4 12989 NULL
88532 +mthca_setup_cmd_doorbells_53954 mthca_setup_cmd_doorbells 2 53954 NULL
88533 +ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
88534 +video_usercopy_62151 video_usercopy 2 62151 NULL
88535 +cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
88536 +repair_io_failure_4815 repair_io_failure 4 4815 NULL
88537 +xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
88538 +p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
88539 +bnx2i_send_nl_mesg_53353 bnx2i_send_nl_mesg 4 53353 NULL
88540 +ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
88541 +___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
88542 +subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
88543 +tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 NULL
88544 +raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
88545 +alloc_upcall_62186 alloc_upcall 2 62186 NULL
88546 +kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
88547 +drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
88548 +lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
88549 +pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
88550 +input_ff_create_21240 input_ff_create 2 21240 NULL
88551 +sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
88552 +key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
88553 +__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL
88554 +amthi_read_45831 amthi_read 4 45831 NULL
88555 +cmpk_message_handle_tx_54024 cmpk_message_handle_tx 4 54024 NULL
88556 +hid_register_field_4874 hid_register_field 2-3 4874 NULL
88557 +ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
88558 +vga_arb_read_4886 vga_arb_read 3 4886 NULL
88559 +sys_ipc_4889 sys_ipc 3 4889 NULL
88560 +bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
88561 +smp_build_cmd_45853 smp_build_cmd 3 45853 NULL
88562 +x509_process_extension_45854 x509_process_extension 5 45854 NULL
88563 +nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
88564 +pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
88565 +do_register_entry_29478 do_register_entry 4 29478 NULL
88566 +isdn_write_45863 isdn_write 3 45863 NULL
88567 +rproc_state_read_54057 rproc_state_read 3 54057 NULL
88568 +ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL
88569 +regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
88570 +alloc_smp_req_51337 alloc_smp_req 1 51337 NULL
88571 +vmw_gmr2_bind_21305 vmw_gmr2_bind 3 21305 NULL
88572 +get_rdac_req_45882 get_rdac_req 3 45882 NULL
88573 +_malloc_54077 _malloc 1 54077 NULL
88574 +add_res_range_21310 add_res_range 4 21310 NULL
88575 +bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
88576 +sys_process_vm_writev_4928 sys_process_vm_writev 3-5 4928 NULL
88577 +ntfs_rl_insert_4931 ntfs_rl_insert 2-4 4931 NULL
88578 +ip_make_skb_13129 ip_make_skb 5-6 13129 NULL
88579 +snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
88580 +ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
88581 +atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
88582 +altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL
88583 +il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
88584 +create_xattr_54106 create_xattr 5 54106 NULL
88585 +udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
88586 +ep_write_59008 ep_write 3 59008 NULL
88587 +dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
88588 +sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL
88589 +devm_kzalloc_4966 devm_kzalloc 2 4966 NULL
88590 +compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
88591 +udf_sb_alloc_partition_maps_62313 udf_sb_alloc_partition_maps 2 62313 NULL
88592 +alloc_mr_45935 alloc_mr 1 45935 NULL
88593 +read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
88594 +isku_receive_54130 isku_receive 4 54130 NULL
88595 +hfcpci_empty_bfifo_62323 hfcpci_empty_bfifo 4 62323 NULL
88596 +caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
88597 +ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
88598 +Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
88599 +ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
88600 +idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
88601 +alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
88602 +i915_max_freq_write_11350 i915_max_freq_write 3 11350 NULL
88603 +leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
88604 +dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
88605 +create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
88606 +btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
88607 +lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
88608 +video_ioctl2_21380 video_ioctl2 2 21380 NULL
88609 +dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
88610 +alloc_ldt_21972 alloc_ldt 2 21972 NULL
88611 +ipath_resize_cq_712 ipath_resize_cq 2 712 NULL
88612 +comedi_read_13199 comedi_read 3 13199 NULL
88613 +flash_write_62354 flash_write 3 62354 NULL
88614 +rb_simple_read_45972 rb_simple_read 3 45972 NULL
88615 +mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
88616 +i2400m_zrealloc_2x_54166 i2400m_zrealloc_2x 3 54166 NULL nohasharray
88617 +memcpy_toiovec_54166 memcpy_toiovec 3 54166 &i2400m_zrealloc_2x_54166
88618 +l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
88619 +proc_file_read_53905 proc_file_read 3 53905 NULL
88620 +mtd_device_parse_register_5024 mtd_device_parse_register 5 5024 NULL
88621 +acpi_tb_install_table_12988 acpi_tb_install_table 1 12988 NULL
88622 +set_wd_exp_mode_pfs_62372 set_wd_exp_mode_pfs 3 62372 NULL
88623 +reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
88624 +acpi_os_read_memory_54186 acpi_os_read_memory 1-3 54186 NULL
88625 +smk_read_logging_37804 smk_read_logging 3 37804 NULL
88626 +rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
88627 +mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
88628 +cru_detect_11272 cru_detect 1 11272 NULL
88629 +altera_irscan_62396 altera_irscan 2 62396 NULL
88630 +alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
88631 +aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
88632 +fw_download_code_13249 fw_download_code 3 13249 NULL
88633 +init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
88634 +tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
88635 +set_ssp_62411 set_ssp 4 62411 NULL
88636 +nfc_hci_send_event_21452 nfc_hci_send_event 5 21452 NULL
88637 +sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
88638 +get_free_entries_46030 get_free_entries 1 46030 NULL
88639 +__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
88640 +sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
88641 +snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
88642 +carl9170_rx_13272 carl9170_rx 3 13272 NULL
88643 +snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
88644 +il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 NULL
88645 +sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
88646 +kfifo_copy_from_user_5091 kfifo_copy_from_user 3 5091 NULL
88647 +netdev_alloc_skb_62437 netdev_alloc_skb 2 62437 NULL
88648 +dma_memcpy_pg_to_iovec_1725 dma_memcpy_pg_to_iovec 6 1725 NULL
88649 +platform_device_add_resources_13289 platform_device_add_resources 3 13289 NULL
88650 +xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
88651 +xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
88652 +read_file_xmit_21487 read_file_xmit 3 21487 NULL
88653 +e1000_check_copybreak_62448 e1000_check_copybreak 3 62448 NULL
88654 +ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
88655 +irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
88656 +wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
88657 +audio_write_54261 audio_write 4 54261 &wusb_prf_54261
88658 +sys_setxattr_37880 sys_setxattr 4 37880 NULL
88659 +dvb_net_sec_37884 dvb_net_sec 3 37884 NULL
88660 +mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
88661 +isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
88662 +mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
88663 +qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
88664 +v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL nohasharray
88665 +xz_dec_lzma2_create_36353 xz_dec_lzma2_create 2 36353 &v9fs_file_readn_36353
88666 +vfio_config_do_rw_46091 vfio_config_do_rw 3 46091 NULL
88667 +dma_skb_copy_datagram_iovec_21516 dma_skb_copy_datagram_iovec 3-5 21516 NULL
88668 +ata_host_alloc_46094 ata_host_alloc 2 46094 NULL
88669 +probes_write_29711 probes_write 3 29711 NULL
88670 +btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
88671 +us122l_ctl_msg_13330 us122l_ctl_msg 8 13330 NULL
88672 +altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
88673 +dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
88674 +kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
88675 +il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
88676 +il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 NULL
88677 +tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
88678 +mlx4_en_create_rx_ring_62498 mlx4_en_create_rx_ring 3 62498 NULL
88679 +emi62_writememory_29731 emi62_writememory 4 29731 NULL
88680 +iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
88681 +mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
88682 +pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL
88683 +hscx_empty_fifo_13360 hscx_empty_fifo 2 13360 NULL
88684 +rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
88685 +iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
88686 +hfcsusb_rx_frame_52745 hfcsusb_rx_frame 3 52745 NULL
88687 +pn_raw_send_54330 pn_raw_send 2 54330 NULL
88688 +pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
88689 +tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
88690 +sfi_map_memory_5183 sfi_map_memory 1-2 5183 NULL
88691 +iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
88692 +wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
88693 +test_iso_queue_62534 test_iso_queue 5 62534 NULL
88694 +__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
88695 +ddp_clear_map_46152 ddp_clear_map 4 46152 NULL
88696 +cxio_hal_init_resource_29771 cxio_hal_init_resource 2-6-7 29771 NULL nohasharray
88697 +ip_vs_conn_fill_param_sync_29771 ip_vs_conn_fill_param_sync 6 29771 &cxio_hal_init_resource_29771
88698 +__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
88699 +sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
88700 +_osd_realloc_seg_54352 _osd_realloc_seg 3 54352 NULL
88701 +pipe_set_size_5204 pipe_set_size 2 5204 NULL
88702 +tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
88703 +ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
88704 +isdn_read_50021 isdn_read 3 50021 NULL
88705 +vfs_readlink_54368 vfs_readlink 3 54368 NULL
88706 +pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
88707 +ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
88708 +subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
88709 +ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
88710 +netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
88711 +ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
88712 +ssb_ioremap_5228 ssb_ioremap 2 5228 NULL
88713 +xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
88714 +xlog_do_recovery_pass_21618 xlog_do_recovery_pass 3 21618 NULL
88715 +isdn_ppp_skb_push_5236 isdn_ppp_skb_push 2 5236 NULL
88716 +get_subdir_62581 get_subdir 3 62581 NULL
88717 +iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
88718 +sctp_abort_pkt_new_5241 sctp_abort_pkt_new 6 5241 NULL
88719 +vfs_readv_38011 vfs_readv 3 38011 NULL
88720 +keyring_read_13438 keyring_read 3 13438 NULL
88721 +sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL nohasharray
88722 +set_tap_pwup_pfs_13440 set_tap_pwup_pfs 3 13440 &sctp_setsockopt_peer_primary_addr_13440
88723 +ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 7-8-9 13443 NULL
88724 +crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
88725 +tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
88726 +packet_alloc_skb_62602 packet_alloc_skb 2-5-4 62602 NULL
88727 +prism2_send_mgmt_62605 prism2_send_mgmt 4 62605 NULL nohasharray
88728 +nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 &prism2_send_mgmt_62605
88729 +__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
88730 +aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
88731 +kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
88732 +ftrace_write_29551 ftrace_write 3 29551 NULL
88733 +il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 NULL
88734 +iscsi_post_host_event_13473 iscsi_post_host_event 4 13473 NULL
88735 +ems_pcmcia_add_card_62627 ems_pcmcia_add_card 2 62627 NULL
88736 +mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
88737 +dev_write_7708 dev_write 3 7708 NULL
88738 +_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL
88739 +nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
88740 +atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
88741 +ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
88742 +sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
88743 +lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
88744 +alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
88745 +nf_nat_ftp_46265 nf_nat_ftp 6 46265 NULL
88746 +nfc_targets_found_29886 nfc_targets_found 3 29886 NULL
88747 +mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
88748 +evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
88749 +request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
88750 +proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
88751 +smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
88752 +isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
88753 +bm_init_13529 bm_init 2 13529 NULL
88754 +check586_29914 check586 2 29914 NULL
88755 +snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
88756 +pep_alloc_skb_46303 pep_alloc_skb 3 46303 NULL
88757 +reiserfs_allocate_list_bitmaps_21732 reiserfs_allocate_list_bitmaps 3 21732 NULL
88758 +ioremap_wc_62695 ioremap_wc 1-2 62695 NULL
88759 +pg_read_17276 pg_read 3 17276 NULL
88760 +edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
88761 +ep0_read_38095 ep0_read 3 38095 NULL
88762 +batadv_iv_ogm_queue_add_46319 batadv_iv_ogm_queue_add 3 46319 NULL
88763 +__nf_nat_mangle_tcp_packet_21744 __nf_nat_mangle_tcp_packet 8-6 21744 NULL
88764 +ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
88765 +cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
88766 +bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
88767 +rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
88768 +cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
88769 +mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
88770 +l2down_create_21755 l2down_create 4 21755 NULL
88771 +alloc_tio_13564 alloc_tio 3 13564 NULL
88772 +viacam_read_54526 viacam_read 3 54526 NULL
88773 +btrfs_mksubvol_58240 btrfs_mksubvol 3 58240 NULL
88774 +tunables_read_36385 tunables_read 3 36385 NULL
88775 +opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
88776 +iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
88777 +read_file_antenna_13574 read_file_antenna 3 13574 NULL
88778 +__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 3-4 38153 NULL
88779 +setsockopt_54539 setsockopt 5 54539 NULL
88780 +gen_pool_add_21776 gen_pool_add 3 21776 NULL
88781 +iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
88782 +tty_register_device_4544 tty_register_device 2 4544 NULL
88783 +cache_write_13589 cache_write 3 13589 NULL
88784 +mpt_lan_receive_post_turbo_13592 mpt_lan_receive_post_turbo 2 13592 NULL
88785 +xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
88786 +key_replays_read_62746 key_replays_read 3 62746 NULL
88787 +smk_write_direct_46363 smk_write_direct 3 46363 NULL
88788 +aac_sa_ioremap_13596 aac_sa_ioremap 2 13596 NULL nohasharray
88789 +irias_new_octseq_value_13596 irias_new_octseq_value 2 13596 &aac_sa_ioremap_13596
88790 +mwifiex_usb_submit_rx_urb_54558 mwifiex_usb_submit_rx_urb 2 54558 NULL
88791 +irias_add_octseq_attrib_29983 irias_add_octseq_attrib 4 29983 NULL
88792 +cdev_add_38176 cdev_add 2-3 38176 NULL
88793 +brcmf_sdcard_recv_buf_38179 brcmf_sdcard_recv_buf 6 38179 NULL
88794 +__ioremap_caller_21800 __ioremap_caller 1-2 21800 NULL
88795 +alloc_and_copy_ftrace_hash_29368 alloc_and_copy_ftrace_hash 1 29368 NULL
88796 +ubi_dump_flash_46381 ubi_dump_flash 4 46381 NULL
88797 +swap_cgroup_swapon_13614 swap_cgroup_swapon 2 13614 NULL
88798 +wm8994_bulk_write_13615 wm8994_bulk_write 3 13615 NULL
88799 +init_chip_wc_pat_62768 init_chip_wc_pat 2 62768 NULL
88800 +nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
88801 +ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
88802 +rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
88803 +fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
88804 +vmalloc_user_32308 vmalloc_user 1 32308 NULL
88805 +get_ucode_user_38202 get_ucode_user 3 38202 NULL
88806 +ath6kl_wmi_startscan_cmd_33674 ath6kl_wmi_startscan_cmd 8 33674 NULL
88807 +fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
88808 +mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 NULL
88809 +packet_snd_13634 packet_snd 3 13634 NULL
88810 +alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
88811 +osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
88812 +sfi_map_table_5462 sfi_map_table 1 5462 NULL
88813 +blk_msg_write_13655 blk_msg_write 3 13655 NULL
88814 +scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
88815 +fwSendNullPacket_54618 fwSendNullPacket 2 54618 NULL
88816 +drp_wmove_30043 drp_wmove 4 30043 NULL
88817 +tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
88818 +cache_downcall_13666 cache_downcall 3 13666 NULL
88819 +xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
88820 +ubi_leb_write_5478 ubi_leb_write 4-5 5478 NULL
88821 +cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
88822 +cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
88823 +sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL
88824 +debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
88825 +tty_write_5494 tty_write 3 5494 NULL
88826 +iscsi_ping_comp_event_38263 iscsi_ping_comp_event 5 38263 NULL
88827 +tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL
88828 +rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
88829 +irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
88830 +teiup_create_43201 teiup_create 3 43201 NULL
88831 +dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
88832 +filldir64_46469 filldir64 3 46469 NULL
88833 +line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL nohasharray
88834 +set_dis_disc_pfs_28225 set_dis_disc_pfs 3 28225 &line6_alloc_sysex_buffer_28225
88835 +fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
88836 +ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
88837 +cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
88838 +snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
88839 +tg3_run_loopback_30093 tg3_run_loopback 2 30093 NULL
88840 +spidev_message_5518 spidev_message 3 5518 NULL
88841 +vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
88842 +bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
88843 +ezusb_writememory_45976 ezusb_writememory 4 45976 NULL
88844 +evm_read_key_54674 evm_read_key 3 54674 NULL
88845 +sctp_make_op_error_space_5528 sctp_make_op_error_space 3 5528 NULL
88846 +l2tp_ip6_recvmsg_62874 l2tp_ip6_recvmsg 4 62874 NULL
88847 +qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
88848 +do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
88849 +em28xx_init_isoc_62883 em28xx_init_isoc 4 62883 NULL nohasharray
88850 +aoechr_write_62883 aoechr_write 3 62883 &em28xx_init_isoc_62883
88851 +resize_info_buffer_62889 resize_info_buffer 2 62889 NULL
88852 +if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
88853 +u32_array_read_2219 u32_array_read 3 2219 NULL
88854 +pin_code_reply_46510 pin_code_reply 4 46510 NULL
88855 +mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
88856 +sys_add_key_61288 sys_add_key 4 61288 NULL
88857 +kmsg_read_46514 kmsg_read 3 46514 NULL
88858 +audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
88859 +isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
88860 +rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
88861 +recv_stream_30138 recv_stream 4 30138 NULL
88862 +u_memcpya_30139 u_memcpya 2-3 30139 NULL
88863 +getdqbuf_62908 getdqbuf 1 62908 NULL
88864 +bdx_rxdb_create_46525 bdx_rxdb_create 1 46525 NULL
88865 +pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
88866 +_add_sg_continuation_descriptor_54721 _add_sg_continuation_descriptor 3 54721 NULL
88867 +fc_frame_alloc_fill_59394 fc_frame_alloc_fill 2 59394 NULL
88868 +fir16_create_5574 fir16_create 3 5574 NULL
88869 +ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
88870 +pt_write_40159 pt_write 3 40159 NULL
88871 +bioset_create_5580 bioset_create 1 5580 NULL
88872 +ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
88873 +fb_sys_read_13778 fb_sys_read 3 13778 NULL
88874 +oz_ep_alloc_5587 oz_ep_alloc 2 5587 NULL
88875 +kzalloc_54740 kzalloc 1 54740 NULL
88876 +ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL nohasharray
88877 +mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 &ipath_reg_phys_mr_23918
88878 +do_msgrcv_5590 do_msgrcv 4 5590 NULL
88879 +wep_iv_read_54744 wep_iv_read 3 54744 NULL
88880 +link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
88881 +ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
88882 +iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
88883 +batadv_iv_ogm_aggregate_new_54761 batadv_iv_ogm_aggregate_new 2 54761 NULL
88884 +ldm_frag_add_5611 ldm_frag_add 2 5611 NULL
88885 +cx25821_video_ioctl_30188 cx25821_video_ioctl 2 30188 NULL
88886 +mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
88887 +rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL nohasharray
88888 +compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 3 22001 &rxpipe_descr_host_int_trig_rx_data_read_22001
88889 +drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
88890 +dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
88891 +usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
88892 +hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
88893 +hidp_output_raw_report_5629 hidp_output_raw_report 3 5629 NULL
88894 +nfs_idmap_request_key_30208 nfs_idmap_request_key 3 30208 NULL
88895 +read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
88896 +flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
88897 +snd_ac97_pcm_assign_30218 snd_ac97_pcm_assign 2 30218 NULL
88898 +ti_recv_22027 ti_recv 4 22027 NULL
88899 +ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2 34135 NULL
88900 +ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
88901 +nfsd_write_54809 nfsd_write 6 54809 NULL
88902 +evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
88903 +pcnet32_realloc_tx_ring_38428 pcnet32_realloc_tx_ring 3 38428 NULL
88904 +posix_clock_register_5662 posix_clock_register 2 5662 NULL
88905 +pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
88906 +get_skb_63008 get_skb 2 63008 NULL
88907 +zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
88908 +netlink_send_38434 netlink_send 5 38434 NULL
88909 +atalk_recvmsg_22053 atalk_recvmsg 4 22053 NULL
88910 +compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL nohasharray
88911 +alloc_trace_uprobe_13870 alloc_trace_uprobe 3 13870 &compat_ip_setsockopt_13870
88912 +aircable_process_packet_46639 aircable_process_packet 5 46639 NULL
88913 +generic_perform_write_54832 generic_perform_write 3 54832 NULL
88914 +write_rio_54837 write_rio 3 54837 NULL
88915 +nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 NULL
88916 +__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL
88917 +pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
88918 +get_arg_5694 get_arg 3 5694 NULL
88919 +isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
88920 +ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
88921 +compat_readv_30273 compat_readv 3 30273 NULL
88922 +printer_read_54851 printer_read 3 54851 NULL
88923 +mem_rw_22085 mem_rw 3 22085 NULL
88924 +i915_min_freq_read_38470 i915_min_freq_read 3 38470 NULL
88925 +alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
88926 +lowpan_fragment_xmit_22095 lowpan_fragment_xmit 3-4 22095 NULL
88927 +broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
88928 +skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
88929 +unlink1_63059 unlink1 3 63059 NULL
88930 +picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL
88931 +pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
88932 +__do_krealloc_54389 __do_krealloc 2 54389 NULL
88933 +tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
88934 +tcf_csum_ipv6_tcp_54877 tcf_csum_ipv6_tcp 4 54877 NULL
88935 +vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
88936 +replay_log_leb_18704 replay_log_leb 3 18704 NULL
88937 +rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
88938 +rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
88939 +alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
88940 +dev_names_read_38509 dev_names_read 3 38509 NULL
88941 +iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
88942 +sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 NULL
88943 +get_packet_5747 get_packet 3 5747 NULL
88944 +ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL
88945 +drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
88946 +event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
88947 +iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
88948 +ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
88949 +erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
88950 +ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
88951 +lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
88952 +xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
88953 +iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL
88954 +_l2_alloc_skb_11883 _l2_alloc_skb 1 11883 NULL
88955 +resource_from_user_30341 resource_from_user 3 30341 NULL
88956 +scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
88957 +sound_write_5102 sound_write 3 5102 NULL
88958 +pn533_dep_link_up_22154 pn533_dep_link_up 5 22154 NULL
88959 +iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
88960 +irq_domain_add_simple_46734 irq_domain_add_simple 2 46734 NULL
88961 +sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
88962 +__vmalloc_node_flags_30352 __vmalloc_node_flags 1 30352 NULL
88963 +btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
88964 +tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
88965 +com90xx_found_13974 com90xx_found 3 13974 NULL
88966 +compat_sys_writev_5784 compat_sys_writev 3 5784 NULL
88967 +qcam_read_13977 qcam_read 3 13977 NULL
88968 +__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
88969 +dvb_demux_read_13981 dvb_demux_read 3 13981 NULL
88970 +virtblk_add_buf_wait_54943 virtblk_add_buf_wait 3-4 54943 NULL
88971 +wl12xx_cmd_build_probe_req_54946 wl12xx_cmd_build_probe_req 6-8 54946 NULL
88972 +irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
88973 +il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL
88974 +generic_readlink_32654 generic_readlink 3 32654 NULL
88975 +ieee80211_bss_info_update_13991 ieee80211_bss_info_update 4 13991 NULL
88976 +sys_get_mempolicy_30379 sys_get_mempolicy 3 30379 NULL
88977 +iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
88978 +skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
88979 +wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
88980 +trace_options_core_read_47390 trace_options_core_read 3 47390 NULL
88981 +int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
88982 +c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
88983 +__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
88984 +__proc_file_read_54978 __proc_file_read 3 54978 NULL
88985 +concat_writev_21451 concat_writev 3 21451 NULL
88986 +smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
88987 +_queue_data_54983 _queue_data 4 54983 NULL
88988 +_sys_packet_req_46793 _sys_packet_req 4 46793 NULL
88989 +pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
88990 +extend_netdev_table_21453 extend_netdev_table 2 21453 NULL
88991 +rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 NULL
88992 +vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
88993 +ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
88994 +ixgb_check_copybreak_5847 ixgb_check_copybreak 3 5847 NULL
88995 +setup_req_5848 setup_req 3 5848 NULL
88996 +read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
88997 +rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
88998 +rds_ib_inc_copy_to_user_55007 rds_ib_inc_copy_to_user 3 55007 NULL
88999 +alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
89000 +rbd_create_rw_ops_55297 rbd_create_rw_ops 1 55297 NULL
89001 +compat_sys_move_pages_5861 compat_sys_move_pages 2 5861 NULL
89002 +cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
89003 +sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
89004 +compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
89005 +ext2_readpages_38640 ext2_readpages 4 38640 NULL
89006 +cma_create_area_38642 cma_create_area 2 38642 NULL
89007 +audit_init_entry_38644 audit_init_entry 1 38644 NULL
89008 +sriov_enable_59689 sriov_enable 2 59689 NULL
89009 +enable_write_30456 enable_write 3 30456 NULL
89010 +shmem_pwrite_fast_46842 shmem_pwrite_fast 3 46842 NULL
89011 +tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
89012 +mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
89013 +zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
89014 +tcp_manip_pkt_16563 tcp_manip_pkt 4 16563 NULL
89015 +qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
89016 +nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
89017 +ieee80211_mgmt_tx_46860 ieee80211_mgmt_tx 9 46860 NULL
89018 +port_show_regs_5904 port_show_regs 3 5904 NULL
89019 +nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
89020 +ptp_read_63251 ptp_read 4 63251 NULL
89021 +uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
89022 +compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
89023 +iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
89024 +__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL
89025 +stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
89026 +mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
89027 +ttm_bo_kmap_ttm_5922 ttm_bo_kmap_ttm 3 5922 NULL
89028 +o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
89029 +bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
89030 +iscsi_iser_recv_41948 iscsi_iser_recv 4 41948 NULL
89031 +lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
89032 +em28xx_alloc_isoc_46892 em28xx_alloc_isoc 4 46892 NULL
89033 +read_dma_55086 read_dma 3 55086 NULL
89034 +isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
89035 +dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
89036 +edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
89037 +ntfs_rl_replace_14136 ntfs_rl_replace 2-4 14136 NULL
89038 +ip_send_unicast_reply_38714 ip_send_unicast_reply 6 38714 NULL
89039 +tcp_collapse_63294 tcp_collapse 6-5 63294 NULL
89040 +alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
89041 +isdn_ppp_ccp_xmit_reset_63297 isdn_ppp_ccp_xmit_reset 6 63297 NULL
89042 +rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
89043 +tipc_subseq_alloc_5957 tipc_subseq_alloc 1 5957 NULL
89044 +em_canid_change_14150 em_canid_change 3 14150 NULL
89045 +tracing_ctrl_read_46922 tracing_ctrl_read 3 46922 NULL
89046 +gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
89047 +fb_write_46924 fb_write 3 46924 NULL
89048 +btmrvl_curpsmode_read_46939 btmrvl_curpsmode_read 3 46939 NULL
89049 +wlcore_alloc_hw_22365 wlcore_alloc_hw 1 22365 NULL
89050 +crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
89051 +br_send_bpdu_29669 br_send_bpdu 3 29669 NULL
89052 +disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
89053 +evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
89054 +__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
89055 +udf_readpages_38761 udf_readpages 4 38761 NULL
89056 +reada_add_block_54247 reada_add_block 2 54247 NULL
89057 +ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
89058 +proc_info_read_63344 proc_info_read 3 63344 NULL
89059 +pep_indicate_38611 pep_indicate 5 38611 NULL
89060 +set_le_30581 set_le 4 30581 NULL
89061 +alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL
89062 +btmrvl_psmode_read_22395 btmrvl_psmode_read 3 22395 NULL
89063 +alloc_private_22399 alloc_private 2 22399 NULL
89064 +snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
89065 +ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
89066 +zoran_write_22404 zoran_write 3 22404 NULL
89067 +dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 NULL
89068 +idmouse_read_63374 idmouse_read 3 63374 NULL
89069 +queue_reply_22416 queue_reply 3 22416 NULL
89070 +sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
89071 +sel_write_bool_46996 sel_write_bool 3 46996 NULL
89072 +ntfs_rl_append_6037 ntfs_rl_append 2-4 6037 NULL
89073 +dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
89074 +ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
89075 +ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL
89076 +sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
89077 +edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL
89078 +ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
89079 +sched_feat_write_55202 sched_feat_write 3 55202 NULL
89080 +dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4-2 14244 NULL
89081 +snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 4 14245 NULL
89082 +ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL nohasharray
89083 +isdn_net_ciscohdlck_alloc_skb_55209 isdn_net_ciscohdlck_alloc_skb 2 55209 &ht40allow_map_read_55209
89084 +compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
89085 +sys_select_38827 sys_select 1 38827 NULL
89086 +rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
89087 +do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
89088 +cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2 47024 NULL
89089 +direct_entry_38836 direct_entry 3 38836 NULL
89090 +__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
89091 +gntdev_alloc_map_35145 gntdev_alloc_map 2 35145 NULL
89092 +compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
89093 +handle_received_packet_22457 handle_received_packet 3 22457 NULL
89094 +ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
89095 +write_head_30481 write_head 4 30481 NULL
89096 +mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
89097 +set_dis_bypass_pfs_47038 set_dis_bypass_pfs 3 47038 NULL
89098 +add_numbered_child_14273 add_numbered_child 5 14273 NULL
89099 +l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
89100 +OS_mem_token_alloc_14276 OS_mem_token_alloc 1 14276 NULL
89101 +sep_prepare_input_output_dma_table_63429 sep_prepare_input_output_dma_table 2-4-3 63429 NULL
89102 +register_unifi_sdio_55239 register_unifi_sdio 2 55239 NULL
89103 +ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
89104 +agp_remap_30665 agp_remap 2 30665 NULL
89105 +interfaces_38859 interfaces 2 38859 NULL
89106 +memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
89107 +nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
89108 +ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
89109 +cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
89110 +qc_capture_19298 qc_capture 3 19298 NULL
89111 +read_default_ldt_14302 read_default_ldt 2 14302 NULL
89112 +dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
89113 +alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
89114 +pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
89115 +rtl_port_map_2385 rtl_port_map 1-2 2385 NULL
89116 +dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
89117 +dbgfs_state_38894 dbgfs_state 3 38894 NULL
89118 +sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
89119 +snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
89120 +nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
89121 +process_bulk_data_command_38906 process_bulk_data_command 4 38906 NULL
89122 +rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
89123 +reada_find_extent_63486 reada_find_extent 2 63486 NULL
89124 +read_kcore_63488 read_kcore 3 63488 NULL
89125 +lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
89126 +__skb_cow_39254 __skb_cow 2 39254 NULL
89127 +gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
89128 +__get_vm_area_node_55305 __get_vm_area_node 1 55305 NULL
89129 +ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
89130 +rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
89131 +ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
89132 +pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
89133 +mousedev_read_47123 mousedev_read 3 47123 NULL
89134 +rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
89135 +agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
89136 +vdma_mem_alloc_6171 vdma_mem_alloc 1 6171 NULL
89137 +wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
89138 +ses_recv_diag_47143 ses_recv_diag 4 47143 NULL nohasharray
89139 +acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 &ses_recv_diag_47143
89140 +alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
89141 +vme_user_read_55338 vme_user_read 3 55338 NULL
89142 +sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 NULL
89143 +cxio_init_resource_fifo_random_47151 cxio_init_resource_fifo_random 3 47151 NULL
89144 +persistent_ram_iomap_47156 persistent_ram_iomap 1-2 47156 NULL
89145 +ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
89146 +__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
89147 +__hidp_send_ctrl_message_28303 __hidp_send_ctrl_message 4 28303 NULL
89148 +rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
89149 +append_to_buffer_63550 append_to_buffer 3 63550 NULL
89150 +smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
89151 +acpi_system_read_event_55362 acpi_system_read_event 3 55362 NULL
89152 +dbg_leb_write_63555 dbg_leb_write 4-5 63555 NULL nohasharray
89153 +kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 &dbg_leb_write_63555
89154 +snapshot_read_22601 snapshot_read 3 22601 NULL
89155 +OSDSetBlock_38986 OSDSetBlock 4-2 38986 NULL
89156 +v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
89157 +mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
89158 +mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
89159 +ioremap_cache_47189 ioremap_cache 1-2 47189 NULL
89160 +__send_to_port_55383 __send_to_port 3 55383 NULL
89161 +rproc_alloc_63577 rproc_alloc 5 63577 NULL
89162 +nf_nat_ipv4_manip_pkt_55387 nf_nat_ipv4_manip_pkt 2 55387 NULL
89163 +smk_read_doi_30813 smk_read_doi 3 30813 NULL
89164 +f_hidg_read_6238 f_hidg_read 3 6238 NULL
89165 +proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
89166 +sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
89167 +get_nodes_39012 get_nodes 3 39012 NULL
89168 +fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
89169 +sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
89170 +ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
89171 +iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
89172 +pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
89173 +create_subvol_30836 create_subvol 4 30836 NULL
89174 +mthca_map_reg_5664 mthca_map_reg 2-3 5664 NULL
89175 +ci13xxx_add_device_14456 ci13xxx_add_device 3 14456 NULL
89176 +iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
89177 +_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
89178 +sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
89179 +read_oldmem_55658 read_oldmem 3 55658 NULL
89180 +tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
89181 +xenbus_file_write_6282 xenbus_file_write 3 6282 NULL
89182 +options_write_47243 options_write 3 47243 NULL
89183 +module_alloc_63630 module_alloc 1 63630 NULL
89184 +alloc_skb_55439 alloc_skb 1 55439 NULL
89185 +nf_nat_ipv6_manip_pkt_6289 nf_nat_ipv6_manip_pkt 2 6289 NULL
89186 +portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
89187 +ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
89188 +ubifs_leb_write_22679 ubifs_leb_write 4-5 22679 NULL
89189 +nf_nat_sack_adjust_6297 nf_nat_sack_adjust 2 6297 NULL
89190 +proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
89191 +mid_get_vbt_data_r10_6308 mid_get_vbt_data_r10 2 6308 NULL
89192 +vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
89193 +__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
89194 +pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
89195 +rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
89196 +hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
89197 +ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
89198 +lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
89199 +tty_audit_log_47280 tty_audit_log 8 47280 NULL
89200 +alloc_libipw_22708 alloc_libipw 1 22708 NULL
89201 +gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
89202 +fc_host_post_vendor_event_30903 fc_host_post_vendor_event 3 30903 NULL
89203 +vbi_read_63673 vbi_read 3 63673 NULL
89204 +tun_get_user_39099 tun_get_user 4 39099 NULL
89205 +i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
89206 +alloc_tty_driver_63681 alloc_tty_driver 1 63681 NULL
89207 +read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
89208 +tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
89209 +long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
89210 +nfc_hci_hcp_message_tx_14534 nfc_hci_hcp_message_tx 6 14534 NULL
89211 +iommu_map_mmio_space_30919 iommu_map_mmio_space 1 30919 NULL
89212 +ep0_write_14536 ep0_write 3 14536 NULL nohasharray
89213 +dataflash_read_user_otp_14536 dataflash_read_user_otp 2-3 14536 &ep0_write_14536
89214 +dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 2-3-5 4303 NULL
89215 +cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4 22735 NULL
89216 +ax25_output_22736 ax25_output 2 22736 NULL
89217 +__kfifo_to_user_r_39123 __kfifo_to_user_r 3 39123 NULL
89218 +l2cap_send_cmd_14548 l2cap_send_cmd 4 14548 NULL
89219 +picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
89220 +drm_vmalloc_dma_14550 drm_vmalloc_dma 1 14550 NULL
89221 +cfpkt_pad_trail_55511 cfpkt_pad_trail 2 55511 NULL nohasharray
89222 +tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 &cfpkt_pad_trail_55511
89223 +cmtp_add_msgpart_9252 cmtp_add_msgpart 4 9252 NULL
89224 +sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
89225 +nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
89226 +hid_input_report_32458 hid_input_report 4 32458 NULL
89227 +_proc_do_string_6376 _proc_do_string 2 6376 NULL
89228 +osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
89229 +read_cis_cache_29735 read_cis_cache 4 29735 NULL
89230 +ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
89231 +alloc_ring_39151 alloc_ring 2-4 39151 NULL
89232 +proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
89233 +create_bounce_buffer_39155 create_bounce_buffer 3 39155 NULL
89234 +tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
89235 +tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
89236 +asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
89237 +ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
89238 +idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
89239 +selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
89240 +isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
89241 +bt_skb_alloc_6404 bt_skb_alloc 1 6404 NULL
89242 +get_info_55681 get_info 3 55681 NULL
89243 +setkey_14987 setkey 3 14987 NULL
89244 +__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
89245 +init_list_set_39188 init_list_set 2-3 39188 NULL
89246 +ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
89247 +snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
89248 +l2up_create_6430 l2up_create 3 6430 NULL
89249 +ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
89250 +dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
89251 +spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
89252 +add_partition_55588 add_partition 2 55588 NULL
89253 +lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
89254 +snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
89255 +depth_read_31112 depth_read 3 31112 NULL
89256 +macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
89257 +ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
89258 +selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
89259 +profile_replace_14652 profile_replace 3 14652 NULL
89260 +vzalloc_47421 vzalloc 1 47421 NULL
89261 +mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
89262 +agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
89263 +sys_writev_28384 sys_writev 3 28384 NULL
89264 +batadv_tt_response_fill_table_39236 batadv_tt_response_fill_table 1 39236 NULL
89265 +read_file_rcstat_22854 read_file_rcstat 3 22854 NULL
89266 +__videobuf_copy_stream_44769 __videobuf_copy_stream 4 44769 NULL
89267 +rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
89268 +pktgen_if_write_55628 pktgen_if_write 3 55628 NULL
89269 +create_attr_set_22861 create_attr_set 1 22861 NULL
89270 +r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
89271 +pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
89272 +compat_sys_mq_timedsend_31060 compat_sys_mq_timedsend 3 31060 NULL
89273 +sel_write_load_63830 sel_write_load 3 63830 NULL
89274 +lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
89275 +pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL
89276 +bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
89277 +dvb_dmxdev_set_buffer_size_55643 dvb_dmxdev_set_buffer_size 2 55643 NULL
89278 +tsi148_master_set_14685 tsi148_master_set 4 14685 NULL
89279 +ath6kl_wmi_set_appie_cmd_39266 ath6kl_wmi_set_appie_cmd 5 39266 NULL
89280 +probe_bios_17467 probe_bios 1 17467 NULL
89281 +vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
89282 +ttm_bo_ioremap_31082 ttm_bo_ioremap 2-3 31082 NULL
89283 +mei_read_6507 mei_read 3 6507 NULL
89284 +lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
89285 +mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
89286 +rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
89287 +sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
89288 +il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
89289 +SetArea_50835 SetArea 4 50835 NULL
89290 +tpm_read_50344 tpm_read 3 50344 NULL
89291 +newpart_47485 newpart 6 47485 NULL
89292 +jffs2_do_link_42048 jffs2_do_link 6 42048 NULL
89293 +compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
89294 +core_sys_select_47494 core_sys_select 1 47494 NULL
89295 +read_file_disable_ani_6536 read_file_disable_ani 3 6536 NULL
89296 +sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
89297 +__vmalloc_node_39308 __vmalloc_node 1 39308 NULL
89298 +libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
89299 +alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
89300 +unlink_simple_47506 unlink_simple 3 47506 NULL
89301 +rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
89302 +wdm_read_6549 wdm_read 3 6549 NULL
89303 +init_map_ipmac_63896 init_map_ipmac 4-3 63896 NULL
89304 +tipc_multicast_49144 tipc_multicast 5 49144 NULL
89305 +nfs4_realloc_slot_table_22859 nfs4_realloc_slot_table 2 22859 NULL
89306 +fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
89307 +xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL
89308 +__videobuf_alloc_uncached_55711 __videobuf_alloc_uncached 1 55711 NULL
89309 +rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
89310 +nfc_hci_send_cmd_55714 nfc_hci_send_cmd 5 55714 NULL
89311 +pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
89312 +filter_read_61692 filter_read 3 61692 NULL
89313 +mtdswap_init_55719 mtdswap_init 2 55719 NULL
89314 +rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
89315 +debugfs_read_62535 debugfs_read 3 62535 NULL
89316 +w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
89317 +process_vm_rw_47533 process_vm_rw 3-5 47533 NULL
89318 +divas_write_63901 divas_write 3 63901 NULL
89319 +alloc_sglist_22960 alloc_sglist 1-2-3 22960 NULL
89320 +caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
89321 +snd_compr_write_63923 snd_compr_write 3 63923 NULL
89322 +cfpkt_split_47541 cfpkt_split 2 47541 NULL
89323 +__copy_from_user_nocache_39351 __copy_from_user_nocache 3 39351 NULL
89324 +btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
89325 +__iio_allocate_kfifo_55738 __iio_allocate_kfifo 3-2 55738 NULL
89326 +ipw_write_59807 ipw_write 3 59807 NULL
89327 +sta_dev_read_14782 sta_dev_read 3 14782 NULL
89328 +tipc_send2port_63935 tipc_send2port 5 63935 NULL
89329 +do_write_log_from_user_39362 do_write_log_from_user 3 39362 NULL
89330 +ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
89331 +afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
89332 +mwifiex_cfg80211_mgmt_tx_12022 mwifiex_cfg80211_mgmt_tx 9 12022 NULL
89333 +cycx_setup_47562 cycx_setup 4 47562 NULL
89334 +remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
89335 +ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
89336 +set_local_name_55757 set_local_name 4 55757 NULL
89337 +printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
89338 +btrfs_init_new_buffer_55761 btrfs_init_new_buffer 4 55761 NULL
89339 +read_ldt_47570 read_ldt 2 47570 NULL
89340 +regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
89341 +pci_iomap_47575 pci_iomap 3 47575 NULL
89342 +acpi_ex_system_memory_space_handler_31192 acpi_ex_system_memory_space_handler 2 31192 NULL
89343 +kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
89344 +module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
89345 +ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
89346 +drm_ht_create_18853 drm_ht_create 2 18853 NULL
89347 +mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
89348 +qlcnic_alloc_msix_entries_46160 qlcnic_alloc_msix_entries 2 46160 NULL
89349 +ieee80211_authentication_req_63973 ieee80211_authentication_req 3 63973 NULL
89350 +dn_alloc_skb_6631 dn_alloc_skb 2 6631 NULL
89351 +conf_read_55786 conf_read 3 55786 NULL
89352 +do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
89353 +rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
89354 +viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
89355 +virtscsi_alloc_tgt_6643 virtscsi_alloc_tgt 2 6643 NULL
89356 +atm_get_addr_31221 atm_get_addr 3 31221 NULL
89357 +user_power_read_39414 user_power_read 3 39414 NULL
89358 +uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
89359 +uea_request_47613 uea_request 4 47613 NULL
89360 +cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
89361 +read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
89362 +alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
89363 +btrfs_find_create_tree_block_55812 btrfs_find_create_tree_block 3 55812 NULL
89364 +subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
89365 +lcd_write_14857 lcd_write 3 14857 NULL nohasharray
89366 +__krealloc_14857 __krealloc 2 14857 &lcd_write_14857
89367 +_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
89368 +get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
89369 +kmemdup_64015 kmemdup 2 64015 NULL
89370 +compat_sys_select_16131 compat_sys_select 1 16131 NULL
89371 +reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
89372 +unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
89373 +process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
89374 +tcf_csum_skb_nextlayer_64025 tcf_csum_skb_nextlayer 3 64025 NULL
89375 +oz_events_read_47535 oz_events_read 3 47535 NULL
89376 +sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
89377 +resize_async_buffer_64031 resize_async_buffer 4 64031 NULL
89378 +sys_semop_39457 sys_semop 3 39457 NULL
89379 +vm_map_ram_23078 vm_map_ram 2 23078 NULL nohasharray
89380 +raw_sendmsg_23078 raw_sendmsg 4 23078 &vm_map_ram_23078
89381 +update_pmkid_2481 update_pmkid 4 2481 NULL
89382 +sriov_enable_migration_14889 sriov_enable_migration 2 14889 NULL
89383 +sep_lli_table_secure_dma_64042 sep_lli_table_secure_dma 2-3 64042 NULL
89384 +ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
89385 +acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
89386 +hysdn_rx_netpkt_16136 hysdn_rx_netpkt 3 16136 NULL
89387 +setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
89388 +rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
89389 +mpeg_read_6708 mpeg_read 3 6708 NULL
89390 +hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
89391 +ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
89392 +sky2_receive_13407 sky2_receive 2 13407 NULL
89393 +krealloc_14908 krealloc 2 14908 NULL
89394 +pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
89395 +bt_skb_send_alloc_6581 bt_skb_send_alloc 2 6581 NULL
89396 +dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
89397 +gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
89398 +video_proc_write_6724 video_proc_write 3 6724 NULL
89399 +xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
89400 +mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
89401 +uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
89402 +ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
89403 +qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
89404 +drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
89405 +pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
89406 +packet_recvmsg_47700 packet_recvmsg 4 47700 NULL
89407 +command_file_write_31318 command_file_write 3 31318 NULL
89408 +gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
89409 +lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
89410 +wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
89411 +i915_cache_sharing_read_24775 i915_cache_sharing_read 3 24775 NULL
89412 +array_zalloc_7519 array_zalloc 1-2 7519 NULL
89413 +tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
89414 +ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
89415 +unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
89416 +ca91cx42_master_set_23146 ca91cx42_master_set 4 23146 NULL
89417 +videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
89418 +ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
89419 +sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
89420 +vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
89421 +sfi_check_table_6772 sfi_check_table 1 6772 NULL
89422 +bits_to_user_47733 bits_to_user 2-3 47733 NULL
89423 +int_proc_write_39542 int_proc_write 3 39542 NULL
89424 +do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
89425 +intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
89426 +read_file_ani_23161 read_file_ani 3 23161 NULL
89427 +carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
89428 +iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
89429 +ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
89430 +pp_write_39554 pp_write 3 39554 NULL
89431 +ioremap_23172 ioremap 1-2 23172 NULL
89432 +mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
89433 +hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
89434 +usblp_write_23178 usblp_write 3 23178 NULL
89435 +sel_read_policy_55947 sel_read_policy 3 55947 NULL
89436 +xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
89437 +vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3 31374 NULL
89438 +tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
89439 +datablob_format_39571 datablob_format 2 39571 NULL nohasharray
89440 +ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
89441 +fix_unclean_leb_23188 fix_unclean_leb 3 23188 NULL
89442 +simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
89443 +dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
89444 +vmalloc_32_1135 vmalloc_32 1 1135 NULL
89445 +tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
89446 +tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
89447 +__team_options_register_63941 __team_options_register 3 63941 NULL
89448 +error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL
89449 +ip_ufo_append_data_12775 ip_ufo_append_data 6-7-8 12775 NULL
89450 +rvmalloc_46873 rvmalloc 1 46873 NULL
89451 +vmap_15025 vmap 2 15025 NULL
89452 +key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
89453 +mcs7830_set_reg_31413 mcs7830_set_reg 3 31413 NULL
89454 +mon_bin_read_6841 mon_bin_read 3 6841 NULL
89455 +tty_buffer_request_room_23228 tty_buffer_request_room 2 23228 NULL
89456 +xlog_get_bp_23229 xlog_get_bp 2 23229 NULL
89457 +snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
89458 +nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
89459 +rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
89460 +TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
89461 +ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
89462 +ping_recvmsg_25597 ping_recvmsg 4 25597 NULL
89463 +macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
89464 +ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL
89465 +cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
89466 +ssb_bus_pcmciabus_register_56020 ssb_bus_pcmciabus_register 3 56020 NULL
89467 +fm_send_cmd_39639 fm_send_cmd 5 39639 NULL
89468 +ip6gre_err_19869 ip6gre_err 5 19869 NULL
89469 +nvme_alloc_iod_56027 nvme_alloc_iod 1 56027 NULL
89470 +opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
89471 +nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
89472 +ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5 15072 NULL
89473 +sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL
89474 +snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
89475 +get_new_cssid_51665 get_new_cssid 2 51665 NULL
89476 +raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 NULL
89477 +prism2_info_hostscanresults_39657 prism2_info_hostscanresults 3 39657 NULL
89478 +ced_ioctl_36647 ced_ioctl 2 36647 NULL
89479 +dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
89480 +__videobuf_alloc_vb_5665 __videobuf_alloc_vb 1 5665 NULL
89481 +kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
89482 +redrat3_transmit_ir_64244 redrat3_transmit_ir 3 64244 NULL
89483 +__alloc_extent_buffer_15093 __alloc_extent_buffer 3 15093 NULL
89484 +v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
89485 +dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
89486 +alg_setkey_31485 alg_setkey 3 31485 NULL
89487 +do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
89488 +spi_show_regs_6911 spi_show_regs 3 6911 NULL nohasharray
89489 +proc_sessionid_read_6911 proc_sessionid_read 3 6911 &spi_show_regs_6911
89490 +qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
89491 +vhci_read_47878 vhci_read 3 47878 NULL
89492 +__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
89493 +ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
89494 +i2cdev_write_23310 i2cdev_write 3 23310 NULL
89495 +keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
89496 +kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
89497 +ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
89498 +event_id_read_64288 event_id_read 3 64288 NULL nohasharray
89499 +xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 &event_id_read_64288
89500 +osd_req_read_sg_47905 osd_req_read_sg 5 47905 NULL
89501 +sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
89502 +pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
89503 +timeout_read_47915 timeout_read 3 47915 NULL
89504 +hidraw_write_31536 hidraw_write 3 31536 NULL
89505 +error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
89506 +page_readlink_23346 page_readlink 3 23346 NULL
89507 +videobuf_dma_init_kernel_6963 videobuf_dma_init_kernel 3 6963 NULL
89508 +comedi_write_47926 comedi_write 3 47926 NULL
89509 +usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
89510 +dsp_write_46218 dsp_write 2 46218 NULL
89511 +kmem_zalloc_large_56128 kmem_zalloc_large 1 56128 NULL
89512 +usbvision_read_31555 usbvision_read 3 31555 NULL
89513 +pd_video_read_24510 pd_video_read 3 24510 NULL
89514 +crypto_authenc_esn_setkey_6985 crypto_authenc_esn_setkey 3 6985 NULL
89515 +sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
89516 +request_key_async_6990 request_key_async 4 6990 NULL
89517 +ts_write_64336 ts_write 3 64336 NULL
89518 +handle_response_55951 handle_response 5 55951 NULL
89519 +usbtmc_write_64340 usbtmc_write 3 64340 NULL
89520 +tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
89521 +r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
89522 +iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
89523 +osst_write_31581 osst_write 3 31581 NULL
89524 +tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
89525 +rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
89526 +wm8350_block_write_19727 wm8350_block_write 3 19727 NULL
89527 +diva_xdi_write_63975 diva_xdi_write 4 63975 NULL
89528 +llc_alloc_frame_64366 llc_alloc_frame 4 64366 NULL
89529 +iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 NULL nohasharray
89530 +mempool_resize_47983 mempool_resize 2 47983 &iwl_dbgfs_ucode_tracing_read_47983
89531 +mangle_packet_18920 mangle_packet 7-9 18920 NULL
89532 +bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
89533 +tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
89534 +iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
89535 +ib_umad_write_47993 ib_umad_write 3 47993 NULL
89536 +ilo_write_64378 ilo_write 3 64378 NULL
89537 +btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
89538 +nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
89539 +vzalloc_node_23424 vzalloc_node 1 23424 NULL
89540 +arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
89541 +ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
89542 +variax_alloc_sysex_buffer_15237 variax_alloc_sysex_buffer 3 15237 NULL
89543 +copy_from_user_17559 copy_from_user 3 17559 NULL
89544 +ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
89545 +sys_migrate_pages_39825 sys_migrate_pages 2 39825 NULL
89546 +ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
89547 +pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
89548 +videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
89549 +rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
89550 +hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
89551 +beiscsi_process_async_pdu_39834 beiscsi_process_async_pdu 7 39834 NULL
89552 +sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
89553 +hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
89554 +snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
89555 +keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
89556 +pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
89557 +cp210x_get_config_56229 cp210x_get_config 4 56229 NULL
89558 +pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
89559 +udl_prime_create_57159 udl_prime_create 2 57159 NULL
89560 +oom_adj_write_64428 oom_adj_write 3 64428 NULL
89561 +dn_nsp_send_disc_23469 dn_nsp_send_disc 2 23469 NULL
89562 +do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
89563 +ping_sendmsg_3782 ping_sendmsg 4 3782 NULL
89564 +beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
89565 +__lgread_31668 __lgread 4 31668 NULL
89566 +scrub_setup_recheck_block_56245 scrub_setup_recheck_block 4-3 56245 NULL
89567 +fd_copyin_56247 fd_copyin 3 56247 NULL
89568 +wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
89569 +ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
89570 +sys_connect_15291 sys_connect 3 15291 NULL nohasharray
89571 +xlate_dev_mem_ptr_15291 xlate_dev_mem_ptr 1 15291 &sys_connect_15291
89572 +linear_conf_23485 linear_conf 2 23485 NULL nohasharray
89573 +divasa_remap_pci_bar_23485 divasa_remap_pci_bar 3-4 23485 &linear_conf_23485
89574 +posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
89575 +ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
89576 +_usb_writeN_sync_31682 _usb_writeN_sync 4 31682 NULL
89577 +pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
89578 +forced_ps_read_31685 forced_ps_read 3 31685 NULL
89579 +event_filter_read_23494 event_filter_read 3 23494 NULL
89580 +tpm_tis_init_15304 tpm_tis_init 2-3 15304 NULL
89581 +fcoe_ctlr_send_keep_alive_15308 fcoe_ctlr_send_keep_alive 3 15308 NULL
89582 +pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL
89583 +sys_setsockopt_35320 sys_setsockopt 5 35320 NULL
89584 +il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
89585 +pkt_add_39897 pkt_add 3 39897 NULL
89586 +RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
89587 +send_mpa_reject_7135 send_mpa_reject 3 7135 NULL
89588 +sctp_make_op_error_7057 sctp_make_op_error 5-6 7057 NULL
89589 +mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
89590 +read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
89591 +skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
89592 +dvb_aplay_56296 dvb_aplay 3 56296 NULL
89593 +gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
89594 +dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
89595 +p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
89596 +sctp_make_asconf_ack_31726 sctp_make_asconf_ack 3 31726 NULL
89597 +aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
89598 +ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
89599 +alloc_ring_15345 alloc_ring 2-4 15345 NULL
89600 +alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
89601 +remove_uuid_64505 remove_uuid 4 64505 NULL
89602 +shmem_pwrite_slow_31741 shmem_pwrite_slow 3 31741 NULL
89603 +NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 NULL
89604 +create_table_16213 create_table 2 16213 NULL
89605 +acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
89606 +pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
89607 +dbg_leb_change_23555 dbg_leb_change 4 23555 NULL
89608 +vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
89609 +bcm_char_read_31750 bcm_char_read 3 31750 NULL
89610 +snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
89611 +journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
89612 +set_discoverable_48141 set_discoverable 4 48141 NULL
89613 +compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
89614 +ses_send_diag_64527 ses_send_diag 4 64527 NULL
89615 +tcp_match_skb_to_sack_23568 tcp_match_skb_to_sack 4 23568 NULL
89616 +snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
89617 +fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
89618 +tty_prepare_flip_string_39955 tty_prepare_flip_string 3 39955 NULL
89619 +__tcp_push_pending_frames_48148 __tcp_push_pending_frames 2 48148 NULL
89620 +iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
89621 +prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
89622 +ipv6_recv_error_56347 ipv6_recv_error 3 56347 NULL
89623 +vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
89624 +isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
89625 +c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
89626 +dma_push_rx_39973 dma_push_rx 2 39973 NULL
89627 +regmap_register_patch_21681 regmap_register_patch 3 21681 NULL
89628 +broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
89629 +cfpkt_create_pfx_23594 cfpkt_create_pfx 1-2 23594 NULL
89630 +pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
89631 +iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4 56368 NULL
89632 +dev_read_56369 dev_read 3 56369 NULL
89633 +mthca_array_init_39987 mthca_array_init 2 39987 NULL
89634 +alloc_dummy_extent_buffer_56374 alloc_dummy_extent_buffer 2 56374 NULL
89635 +diva_os_alloc_message_buffer_64568 diva_os_alloc_message_buffer 1 64568 NULL
89636 +dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
89637 +alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
89638 +init_ipath_48187 init_ipath 1 48187 NULL
89639 +isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
89640 +__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4 15423 NULL
89641 +tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
89642 +tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
89643 +sys32_ipc_7238 sys32_ipc 3 7238 NULL
89644 +sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
89645 +rtllib_alloc_txb_21687 rtllib_alloc_txb 1-2 21687 NULL
89646 +dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
89647 +ddp_ppod_write_idata_25610 ddp_ppod_write_idata 5 25610 NULL
89648 +ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
89649 +nf_nat_icmpv6_reply_translation_40023 nf_nat_icmpv6_reply_translation 5 40023 NULL nohasharray
89650 +ivtvfb_write_40023 ivtvfb_write 3 40023 &nf_nat_icmpv6_reply_translation_40023
89651 +hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
89652 +tcp_write_xmit_64602 tcp_write_xmit 2 64602 NULL
89653 +use_pool_64607 use_pool 2 64607 NULL
89654 +__get_vm_area_caller_56416 __get_vm_area_caller 1 56416 NULL nohasharray
89655 +acpi_os_write_memory_56416 acpi_os_write_memory 1-3 56416 &__get_vm_area_caller_56416
89656 +store_msg_56417 store_msg 3 56417 NULL
89657 +__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
89658 +nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
89659 +datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
89660 +read_file_recv_48232 read_file_recv 3 48232 NULL
89661 +xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
89662 +set_tpl_pfs_27490 set_tpl_pfs 3 27490 NULL
89663 +fanotify_write_64623 fanotify_write 3 64623 NULL
89664 +batadv_add_packet_12136 batadv_add_packet 3 12136 NULL
89665 +rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
89666 +fl_create_56435 fl_create 5 56435 NULL
89667 +gnttab_map_56439 gnttab_map 2 56439 NULL
89668 +nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
89669 +nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
89670 +event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
89671 +cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2 56453 NULL
89672 +drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
89673 +set_connectable_56458 set_connectable 4 56458 NULL
89674 +a2mp_chan_alloc_skb_cb_27159 a2mp_chan_alloc_skb_cb 2 27159 NULL
89675 +nfc_hci_send_response_56462 nfc_hci_send_response 5 56462 NULL
89676 +add_port_54941 add_port 2 54941 NULL
89677 +osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
89678 +cx18_read_23699 cx18_read 3 23699 NULL
89679 +tlbflush_read_file_64661 tlbflush_read_file 3 64661 NULL
89680 +ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
89681 +efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
89682 +rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
89683 +ddb_output_write_31902 ddb_output_write 3 31902 NULL
89684 +send_set_info_48288 send_set_info 7 48288 NULL
89685 +sock_alloc_send_skb_23720 sock_alloc_send_skb 2 23720 NULL
89686 +wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
89687 +set_disc_pwup_pfs_48300 set_disc_pwup_pfs 3 48300 NULL
89688 +lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
89689 +p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
89690 +new_dir_31919 new_dir 3 31919 NULL
89691 +kmem_alloc_31920 kmem_alloc 1 31920 NULL
89692 +timblogiw_read_48305 timblogiw_read 3 48305 NULL
89693 +sec_bulk_write_64691 sec_bulk_write 3 64691 NULL
89694 +mgmt_control_7349 mgmt_control 3 7349 NULL
89695 +hash_setkey_48310 hash_setkey 3 48310 NULL
89696 +ql_process_mac_rx_page_15543 ql_process_mac_rx_page 4 15543 NULL
89697 +hfcpci_empty_fifo_2427 hfcpci_empty_fifo 4 2427 NULL
89698 +ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
89699 +sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
89700 +cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
89701 +rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
89702 +iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4 31942 NULL
89703 +ql_process_mac_rx_skb_6689 ql_process_mac_rx_skb 4 6689 NULL
89704 +ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
89705 +pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
89706 +dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
89707 +vb2_write_31948 vb2_write 3 31948 NULL
89708 +cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
89709 +bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
89710 +l1oip_socket_recv_56537 l1oip_socket_recv 6 56537 NULL
89711 +ip_options_get_56538 ip_options_get 4 56538 NULL
89712 +write_62671 write 3 62671 NULL
89713 +copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
89714 +tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
89715 +squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
89716 +sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
89717 +ceph_copy_page_vector_to_user_31270 ceph_copy_page_vector_to_user 3-4 31270 NULL
89718 +allocate_cnodes_5329 allocate_cnodes 1 5329 NULL
89719 +skb_add_data_48363 skb_add_data 3 48363 NULL
89720 +bio_map_kern_64751 bio_map_kern 3 64751 NULL
89721 +alloc_apertures_56561 alloc_apertures 1 56561 NULL
89722 +iscsi_complete_pdu_48372 iscsi_complete_pdu 4 48372 NULL
89723 +drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
89724 +rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
89725 +rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
89726 +debug_debug2_read_30526 debug_debug2_read 3 30526 NULL
89727 +compat_fillonedir_15620 compat_fillonedir 3 15620 NULL
89728 +set_dis_tap_pfs_15621 set_dis_tap_pfs 3 15621 NULL
89729 +ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL
89730 +dsp_cmx_send_member_15625 dsp_cmx_send_member 2 15625 NULL
89731 +portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
89732 +system_enable_read_25815 system_enable_read 3 25815 NULL
89733 +allocate_probes_40204 allocate_probes 1 40204 NULL
89734 +sd_alloc_ctl_entry_29708 sd_alloc_ctl_entry 1 29708 NULL
89735 +proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
89736 +__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
89737 +realloc_buffer_25816 realloc_buffer 2 25816 NULL
89738 +isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
89739 +rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
89740 +ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
89741 +tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray
89742 +pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &tomoyo_scan_bprm_15642
89743 +ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
89744 +au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
89745 +lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
89746 +fs_path_add_15648 fs_path_add 3 15648 NULL
89747 +event_filter_write_56609 event_filter_write 3 56609 NULL
89748 +xfs_buf_read_map_40226 xfs_buf_read_map 3 40226 NULL
89749 +ms_rw_multi_sector_7459 ms_rw_multi_sector 3-4 7459 NULL
89750 +xsd_read_15653 xsd_read 3 15653 NULL
89751 +pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
89752 +p54_init_common_23850 p54_init_common 1 23850 NULL
89753 +ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
89754 +vmw_cursor_update_dmabuf_32045 vmw_cursor_update_dmabuf 3-4 32045 NULL
89755 +sys_sched_setaffinity_32046 sys_sched_setaffinity 2 32046 NULL
89756 +garp_request_join_7471 garp_request_join 4 7471 NULL
89757 +ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
89758 +copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
89759 +unifi_read_14899 unifi_read 3 14899 NULL
89760 +il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL
89761 +compat_sys_msgrcv_7482 compat_sys_msgrcv 2 7482 NULL
89762 +do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
89763 +brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
89764 +proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
89765 +pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
89766 +ipv6_recv_rxpmtu_7142 ipv6_recv_rxpmtu 3 7142 NULL
89767 +ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
89768 +uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
89769 +ieee80211_if_read_channel_type_23884 ieee80211_if_read_channel_type 3 23884 NULL
89770 +tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
89771 +tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
89772 +uf_sme_queue_message_15697 uf_sme_queue_message 3 15697 NULL
89773 +gdth_search_isa_58595 gdth_search_isa 1 58595 NULL
89774 +sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 NULL
89775 +_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
89776 +rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
89777 +iwch_reject_cr_23901 iwch_reject_cr 3 23901 NULL
89778 +altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
89779 +bio_alloc_32095 bio_alloc 2 32095 NULL
89780 +shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
89781 +rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
89782 +ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
89783 +add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL
89784 +r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
89785 +snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL
89786 +ubi_io_write_data_40305 ubi_io_write_data 4-5 40305 NULL
89787 +send_control_msg_48498 send_control_msg 6 48498 NULL
89788 +ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
89789 +mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
89790 +ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
89791 +request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
89792 +diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
89793 +dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3 56702 NULL
89794 +uvc_alloc_entity_20836 uvc_alloc_entity 4-3 20836 NULL
89795 +batadv_tt_changes_fill_buff_40323 batadv_tt_changes_fill_buff 4 40323 NULL
89796 +__alloc_skb_23940 __alloc_skb 1 23940 NULL
89797 +sta_flags_read_56710 sta_flags_read 3 56710 NULL
89798 +ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
89799 +HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
89800 +smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
89801 +alloc_tx_32143 alloc_tx 2 32143 NULL
89802 +wiimote_hid_send_48528 wiimote_hid_send 3 48528 NULL
89803 +compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
89804 +hsc_write_55875 hsc_write 3 55875 NULL
89805 +cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
89806 +do_test_15766 do_test 1 15766 NULL
89807 +ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
89808 +ip_recv_error_23109 ip_recv_error 3 23109 NULL
89809 +named_distribute_48544 named_distribute 4 48544 NULL
89810 +ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
89811 +venus_link_32165 venus_link 5 32165 NULL
89812 +event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
89813 +drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
89814 +vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
89815 +btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
89816 +set_std_nic_pfs_15792 set_std_nic_pfs 3 15792 NULL
89817 +ubifs_wbuf_write_nolock_64946 ubifs_wbuf_write_nolock 3 64946 NULL
89818 +usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
89819 +llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL
89820 +smk_read_direct_15803 smk_read_direct 3 15803 NULL
89821 +fwnet_incoming_packet_40380 fwnet_incoming_packet 3 40380 NULL
89822 +groups_alloc_7614 groups_alloc 1 7614 NULL
89823 +cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
89824 +traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
89825 +suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
89826 +ext_sd_execute_read_data_48589 ext_sd_execute_read_data 9 48589 NULL
89827 +afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
89828 +__carl9170_rx_56784 __carl9170_rx 3 56784 NULL
89829 +__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL
89830 +oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
89831 +ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
89832 +tcf_csum_ipv4_tcp_39713 tcf_csum_ipv4_tcp 4 39713 NULL
89833 +snapshot_write_28351 snapshot_write 3 28351 NULL
89834 +event_enable_read_7074 event_enable_read 3 7074 NULL
89835 +brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
89836 +do_syslog_56807 do_syslog 3 56807 NULL
89837 +sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
89838 +pskb_pull_65005 pskb_pull 2 65005 NULL
89839 +caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
89840 +lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
89841 +sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
89842 +unifi_write_65012 unifi_write 3 65012 NULL
89843 +agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL
89844 +nfs_readdata_alloc_65015 nfs_readdata_alloc 2 65015 NULL
89845 +ubi_io_write_15870 ubi_io_write 5-4 15870 NULL nohasharray
89846 +media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
89847 +mtdchar_write_56831 mtdchar_write 3 56831 NULL nohasharray
89848 +ntfs_rl_realloc_56831 ntfs_rl_realloc 3 56831 &mtdchar_write_56831
89849 +skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
89850 +mid_get_vbt_data_r1_26170 mid_get_vbt_data_r1 2 26170 NULL
89851 +skb_copy_expand_7685 skb_copy_expand 2-3 7685 NULL nohasharray
89852 +acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 &skb_copy_expand_7685
89853 +if_write_51756 if_write 3 51756 NULL
89854 +insert_dent_65034 insert_dent 7 65034 NULL
89855 +blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
89856 +snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4 56847 NULL
89857 +vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
89858 +brcmf_sdcard_rwdata_65041 brcmf_sdcard_rwdata 5 65041 NULL
89859 +nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
89860 +lc_create_48662 lc_create 3 48662 NULL
89861 +aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
89862 +sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL nohasharray
89863 +sys_setgroups_48668 sys_setgroups 1 48668 &sm501_create_subdev_48668
89864 +ath9k_multi_regread_65056 ath9k_multi_regread 4 65056 NULL
89865 +brcmf_sdcard_send_buf_7713 brcmf_sdcard_send_buf 6 7713 NULL
89866 +l2cap_build_cmd_48676 l2cap_build_cmd 4 48676 NULL
89867 +batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
89868 +pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
89869 +request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
89870 +bnx2fc_process_l2_frame_compl_65072 bnx2fc_process_l2_frame_compl 3 65072 NULL
89871 +__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
89872 +persistent_ram_new_40501 persistent_ram_new 1-2 40501 NULL
89873 +ieee80211_send_auth_24121 ieee80211_send_auth 5 24121 NULL
89874 +altera_drscan_48698 altera_drscan 2 48698 NULL
89875 +tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
89876 +set_bypass_pwup_pfs_7742 set_bypass_pwup_pfs 3 7742 NULL
89877 +kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
89878 +power_read_15939 power_read 3 15939 NULL
89879 +recv_msg_48709 recv_msg 4 48709 NULL
89880 +befs_utf2nls_25628 befs_utf2nls 3 25628 NULL
89881 +ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
89882 +TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
89883 +btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL
89884 +irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
89885 +nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
89886 +process_vm_rw_pages_15954 process_vm_rw_pages 6-5 15954 NULL
89887 +revalidate_19043 revalidate 2 19043 NULL
89888 +t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
89889 +aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 NULL
89890 +trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
89891 +alloc_candev_7776 alloc_candev 1-2 7776 NULL
89892 +check_header_56930 check_header 2 56930 NULL
89893 +ima_write_policy_40548 ima_write_policy 3 40548 NULL
89894 +journal_init_revoke_56933 journal_init_revoke 2 56933 NULL
89895 +__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
89896 +ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
89897 +sel_read_initcon_32362 sel_read_initcon 3 32362 NULL
89898 +ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
89899 +frame_alloc_15981 frame_alloc 4 15981 NULL
89900 +esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
89901 +nf_nat_seq_adjust_44989 nf_nat_seq_adjust 4 44989 NULL
89902 +diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL
89903 +adu_read_24177 adu_read 3 24177 NULL
89904 +alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
89905 +send_mpa_reply_32372 send_mpa_reply 3 32372 NULL
89906 +alloc_vm_area_15989 alloc_vm_area 1 15989 NULL
89907 +variax_set_raw2_32374 variax_set_raw2 4 32374 NULL
89908 +vfd_write_14717 vfd_write 3 14717 NULL
89909 +carl9170_rx_copy_data_21656 carl9170_rx_copy_data 2 21656 NULL
89910 +usbtmc_read_32377 usbtmc_read 3 32377 NULL
89911 +qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
89912 +l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL
89913 +dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
89914 +cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
89915 +rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
89916 +xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
89917 +viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
89918 +__cxio_init_resource_fifo_23447 __cxio_init_resource_fifo 3 23447 NULL
89919 +skge_rx_get_40598 skge_rx_get 3 40598 NULL
89920 +nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
89921 +register_device_60015 register_device 2-3 60015 NULL
89922 +got_frame_16028 got_frame 2 16028 NULL
89923 +ssb_bus_register_65183 ssb_bus_register 3 65183 NULL
89924 +pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 1-2-3 24224 NULL
89925 +il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
89926 +scsi_register_49094 scsi_register 2 49094 NULL
89927 +twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
89928 +vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
89929 +sel_read_bool_24236 sel_read_bool 3 24236 NULL
89930 +batadv_check_unicast_packet_10866 batadv_check_unicast_packet 2 10866 NULL
89931 +tcp_push_one_48816 tcp_push_one 2 48816 NULL
89932 +nfulnl_alloc_skb_65207 nfulnl_alloc_skb 2 65207 NULL
89933 +dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
89934 +gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
89935 +atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
89936 +rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
89937 +vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
89938 +svc_pool_map_alloc_arrays_47181 svc_pool_map_alloc_arrays 2 47181 NULL
89939 +create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
89940 +dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
89941 +snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
89942 +xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
89943 +compat_sys_preadv64_24283 compat_sys_preadv64 3 24283 NULL
89944 +pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL
89945 +viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL
89946 +wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
89947 +__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
89948 +cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
89949 +isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
89950 +sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
89951 +mid_get_vbt_data_r0_10876 mid_get_vbt_data_r0 2 10876 NULL
89952 +pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
89953 +ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
89954 +dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 NULL nohasharray
89955 +isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 &dma_tx_requested_read_16110
89956 +sys_setgroups16_48882 sys_setgroups16 1 48882 NULL
89957 +kmem_zalloc_greedy_65268 kmem_zalloc_greedy 3-2 65268 NULL
89958 +nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
89959 +f_hidg_write_7932 f_hidg_write 3 7932 NULL
89960 +ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
89961 +kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
89962 +mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
89963 +mac_drv_rx_init_48898 mac_drv_rx_init 2 48898 NULL
89964 +sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
89965 +xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
89966 +compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
89967 +fsm_init_16134 fsm_init 2 16134 NULL
89968 +ext_sd_execute_write_data_8175 ext_sd_execute_write_data 9 8175 NULL
89969 +tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
89970 +disconnect_32521 disconnect 4 32521 NULL
89971 +__seq_open_private_40715 __seq_open_private 3 40715 NULL
89972 +tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
89973 +ath6kl_wmi_add_wow_pattern_cmd_12842 ath6kl_wmi_add_wow_pattern_cmd 4 12842 NULL
89974 +mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
89975 +redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
89976 +ilo_read_32531 ilo_read 3 32531 NULL
89977 +ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
89978 +smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
89979 +pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
89980 +sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
89981 +gdth_isa_probe_one_48925 gdth_isa_probe_one 1 48925 NULL
89982 +kzalloc_node_24352 kzalloc_node 1 24352 NULL
89983 +nfc_hci_execute_cmd_async_65314 nfc_hci_execute_cmd_async 5 65314 NULL
89984 +msnd_fifo_alloc_23179 msnd_fifo_alloc 2 23179 NULL
89985 +format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
89986 +nfcwilink_skb_alloc_16167 nfcwilink_skb_alloc 1 16167 NULL
89987 +xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL
89988 +remap_pci_mem_15966 remap_pci_mem 1-2 15966 NULL
89989 +qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
89990 +cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
89991 +btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
89992 +aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
89993 +card_send_command_40757 card_send_command 3 40757 NULL
89994 +sys_mbind_7990 sys_mbind 5 7990 NULL
89995 +dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
89996 +pg_write_40766 pg_write 3 40766 NULL
89997 +event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
89998 +uea_idma_write_64139 uea_idma_write 3 64139 NULL
89999 +brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
90000 +carl9170_rx_stream_1334 carl9170_rx_stream 3 1334 NULL
90001 +nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
90002 +ath_descdma_setup_12257 ath_descdma_setup 5 12257 NULL
90003 +alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
90004 +dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
90005 +batadv_orig_hash_del_if_48972 batadv_orig_hash_del_if 2 48972 NULL
90006 +tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
90007 +pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 NULL nohasharray
90008 +getxattr_24398 getxattr 4 24398 &pvr2_v4l2_ioctl_24398
90009 +stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
90010 +vcs_read_8017 vcs_read 3 8017 NULL
90011 +read_file_beacon_32595 read_file_beacon 3 32595 NULL
90012 +gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
90013 +rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
90014 +iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
90015 +_alloc_set_attr_list_48991 _alloc_set_attr_list 4 48991 NULL
90016 +sys_set_mempolicy_32608 sys_set_mempolicy 3 32608 NULL
90017 +atomic_read_file_16227 atomic_read_file 3 16227 NULL
90018 +vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
90019 +copy_and_check_19089 copy_and_check 3 19089 NULL
90020 +b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
90021 +netdev_alloc_skb_ip_align_40811 netdev_alloc_skb_ip_align 2 40811 NULL
90022 +i915_wedged_read_35474 i915_wedged_read 3 35474 NULL
90023 +ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
90024 +iser_rcv_completion_8048 iser_rcv_completion 2 8048 NULL
90025 +trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
90026 +ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
90027 +__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
90028 +trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
90029 +ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL
90030 +smk_user_access_24440 smk_user_access 3 24440 NULL
90031 +xd_rw_49020 xd_rw 3-4 49020 NULL
90032 +dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
90033 +tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
90034 +mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
90035 +kvmalloc_32646 kvmalloc 1 32646 NULL
90036 +alloc_targets_8074 alloc_targets 2 8074 NULL nohasharray
90037 +qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 &alloc_targets_8074
90038 +evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
90039 +drm_calloc_large_65421 drm_calloc_large 1-2 65421 NULL
90040 +set_disc_pfs_16270 set_disc_pfs 3 16270 NULL
90041 +skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL
90042 +__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
90043 +caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL
90044 +drbd_setsockopt_16280 drbd_setsockopt 5 16280 NULL nohasharray
90045 +nand_bch_init_16280 nand_bch_init 3-2 16280 &drbd_setsockopt_16280
90046 +xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
90047 +v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
90048 +fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
90049 +pn533_init_target_frame_65438 pn533_init_target_frame 3 65438 NULL
90050 +__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
90051 +move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
90052 +i915_wedged_write_47771 i915_wedged_write 3 47771 NULL
90053 +usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
90054 +aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
90055 +l2cap_bredr_sig_cmd_49065 l2cap_bredr_sig_cmd 3 49065 NULL
90056 +tipc_buf_acquire_60437 tipc_buf_acquire 1 60437 NULL
90057 +alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
90058 +venus_lookup_8121 venus_lookup 4 8121 NULL
90059 +compat_writev_60063 compat_writev 3 60063 NULL
90060 +io_mapping_create_wc_1354 io_mapping_create_wc 1-2 1354 NULL
90061 +jfs_readpages_32702 jfs_readpages 4 32702 NULL
90062 +read_file_queue_40895 read_file_queue 3 40895 NULL
90063 +request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
90064 +waiters_read_40902 waiters_read 3 40902 NULL
90065 +pstore_file_read_57288 pstore_file_read 3 57288 NULL
90066 +vmalloc_node_58700 vmalloc_node 1 58700 NULL
90067 +xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
90068 +ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
90069 +vmw_cursor_update_image_16332 vmw_cursor_update_image 3-4 16332 NULL
90070 +compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
90071 +dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL
90072 +vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
90073 +named_prepare_buf_24532 named_prepare_buf 2 24532 NULL
90074 +hdpvr_read_9273 hdpvr_read 3 9273 NULL
90075 +alloc_dr_65495 alloc_dr 2 65495 NULL
90076 +do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
90077 +rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
90078 +ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
90079 +megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
90080 +total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
90081 +rbd_add_16366 rbd_add 3 16366 NULL
90082 +stats_read_ul_32751 stats_read_ul 3 32751 NULL
90083 +pt_read_49136 pt_read 3 49136 NULL
90084 +tsi148_alloc_resource_24563 tsi148_alloc_resource 2 24563 NULL
90085 +snd_vx_create_40948 snd_vx_create 4 40948 NULL
90086 +iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
90087 +check_mirror_57342 check_mirror 1-2 57342 NULL nohasharray
90088 +usblp_read_57342 usblp_read 3 57342 &check_mirror_57342
90089 +atyfb_setup_generic_49151 atyfb_setup_generic 3 49151 NULL
90090 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
90091 new file mode 100644
90092 index 0000000..6387ddc
90093 --- /dev/null
90094 +++ b/tools/gcc/size_overflow_plugin.c
90095 @@ -0,0 +1,1918 @@
90096 +/*
90097 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
90098 + * Licensed under the GPL v2, or (at your option) v3
90099 + *
90100 + * Homepage:
90101 + * http://www.grsecurity.net/~ephox/overflow_plugin/
90102 + *
90103 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
90104 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
90105 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
90106 + *
90107 + * Usage:
90108 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
90109 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
90110 + */
90111 +
90112 +#include "gcc-plugin.h"
90113 +#include "config.h"
90114 +#include "system.h"
90115 +#include "coretypes.h"
90116 +#include "tree.h"
90117 +#include "tree-pass.h"
90118 +#include "intl.h"
90119 +#include "plugin-version.h"
90120 +#include "tm.h"
90121 +#include "toplev.h"
90122 +#include "function.h"
90123 +#include "tree-flow.h"
90124 +#include "plugin.h"
90125 +#include "gimple.h"
90126 +#include "c-common.h"
90127 +#include "diagnostic.h"
90128 +#include "cfgloop.h"
90129 +
90130 +#if BUILDING_GCC_VERSION >= 4007
90131 +#include "c-tree.h"
90132 +#else
90133 +#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
90134 +#endif
90135 +
90136 +struct size_overflow_hash {
90137 + const struct size_overflow_hash * const next;
90138 + const char * const name;
90139 + const unsigned int param;
90140 +};
90141 +
90142 +#include "size_overflow_hash.h"
90143 +
90144 +enum marked {
90145 + MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL
90146 +};
90147 +
90148 +#define __unused __attribute__((__unused__))
90149 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
90150 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
90151 +#define BEFORE_STMT true
90152 +#define AFTER_STMT false
90153 +#define CREATE_NEW_VAR NULL_TREE
90154 +#define CODES_LIMIT 32
90155 +#define MAX_PARAM 32
90156 +#define MY_STMT GF_PLF_1
90157 +#define NO_CAST_CHECK GF_PLF_2
90158 +
90159 +#if BUILDING_GCC_VERSION == 4005
90160 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
90161 +#endif
90162 +
90163 +int plugin_is_GPL_compatible;
90164 +void debug_gimple_stmt(gimple gs);
90165 +
90166 +static tree expand(struct pointer_set_t *visited, tree lhs);
90167 +static bool pre_expand(struct pointer_set_t *visited, const_tree lhs);
90168 +static tree report_size_overflow_decl;
90169 +static const_tree const_char_ptr_type_node;
90170 +static unsigned int handle_function(void);
90171 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
90172 +static tree get_size_overflow_type(gimple stmt, const_tree node);
90173 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
90174 +
90175 +static unsigned int call_count=0;
90176 +
90177 +static struct plugin_info size_overflow_plugin_info = {
90178 + .version = "20121212beta",
90179 + .help = "no-size-overflow\tturn off size overflow checking\n",
90180 +};
90181 +
90182 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
90183 +{
90184 + unsigned int arg_count;
90185 + enum tree_code code = TREE_CODE(*node);
90186 +
90187 + switch (code) {
90188 + case FUNCTION_DECL:
90189 + arg_count = type_num_arguments(TREE_TYPE(*node));
90190 + break;
90191 + case FUNCTION_TYPE:
90192 + case METHOD_TYPE:
90193 + arg_count = type_num_arguments(*node);
90194 + break;
90195 + default:
90196 + *no_add_attrs = true;
90197 + error("%s: %qE attribute only applies to functions", __func__, name);
90198 + return NULL_TREE;
90199 + }
90200 +
90201 + for (; args; args = TREE_CHAIN(args)) {
90202 + tree position = TREE_VALUE(args);
90203 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
90204 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
90205 + *no_add_attrs = true;
90206 + }
90207 + }
90208 + return NULL_TREE;
90209 +}
90210 +
90211 +static const char* get_asm_name(tree node)
90212 +{
90213 + return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node));
90214 +}
90215 +
90216 +static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
90217 +{
90218 + unsigned int arg_count, arg_num;
90219 + enum tree_code code = TREE_CODE(*node);
90220 +
90221 + switch (code) {
90222 + case FUNCTION_DECL:
90223 + arg_count = type_num_arguments(TREE_TYPE(*node));
90224 + break;
90225 + case FUNCTION_TYPE:
90226 + case METHOD_TYPE:
90227 + arg_count = type_num_arguments(*node);
90228 + break;
90229 + case FIELD_DECL:
90230 + arg_num = TREE_INT_CST_LOW(TREE_VALUE(args));
90231 + if (arg_num != 0) {
90232 + *no_add_attrs = true;
90233 + error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name);
90234 + }
90235 + return NULL_TREE;
90236 + default:
90237 + *no_add_attrs = true;
90238 + error("%qE attribute only applies to functions", name);
90239 + return NULL_TREE;
90240 + }
90241 +
90242 + for (; args; args = TREE_CHAIN(args)) {
90243 + tree position = TREE_VALUE(args);
90244 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) > arg_count ) {
90245 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
90246 + *no_add_attrs = true;
90247 + }
90248 + }
90249 + return NULL_TREE;
90250 +}
90251 +
90252 +static struct attribute_spec size_overflow_attr = {
90253 + .name = "size_overflow",
90254 + .min_length = 1,
90255 + .max_length = -1,
90256 + .decl_required = true,
90257 + .type_required = false,
90258 + .function_type_required = false,
90259 + .handler = handle_size_overflow_attribute,
90260 +#if BUILDING_GCC_VERSION >= 4007
90261 + .affects_type_identity = false
90262 +#endif
90263 +};
90264 +
90265 +static struct attribute_spec intentional_overflow_attr = {
90266 + .name = "intentional_overflow",
90267 + .min_length = 1,
90268 + .max_length = -1,
90269 + .decl_required = true,
90270 + .type_required = false,
90271 + .function_type_required = false,
90272 + .handler = handle_intentional_overflow_attribute,
90273 +#if BUILDING_GCC_VERSION >= 4007
90274 + .affects_type_identity = false
90275 +#endif
90276 +};
90277 +
90278 +static void register_attributes(void __unused *event_data, void __unused *data)
90279 +{
90280 + register_attribute(&size_overflow_attr);
90281 + register_attribute(&intentional_overflow_attr);
90282 +}
90283 +
90284 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
90285 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
90286 +{
90287 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
90288 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
90289 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
90290 +
90291 + unsigned int m = 0x57559429;
90292 + unsigned int n = 0x5052acdb;
90293 + const unsigned int *key4 = (const unsigned int *)key;
90294 + unsigned int h = len;
90295 + unsigned int k = len + seed + n;
90296 + unsigned long long p;
90297 +
90298 + while (len >= 8) {
90299 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
90300 + len -= 8;
90301 + }
90302 + if (len >= 4) {
90303 + cwmixb(key4[0]) key4 += 1;
90304 + len -= 4;
90305 + }
90306 + if (len)
90307 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
90308 + cwmixb(h ^ (k + n));
90309 + return k ^ h;
90310 +
90311 +#undef cwfold
90312 +#undef cwmixa
90313 +#undef cwmixb
90314 +}
90315 +
90316 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
90317 +{
90318 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
90319 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
90320 + return fn ^ codes;
90321 +}
90322 +
90323 +static inline tree get_original_function_decl(tree fndecl)
90324 +{
90325 + if (DECL_ABSTRACT_ORIGIN(fndecl))
90326 + return DECL_ABSTRACT_ORIGIN(fndecl);
90327 + return fndecl;
90328 +}
90329 +
90330 +static inline gimple get_def_stmt(const_tree node)
90331 +{
90332 + gcc_assert(node != NULL_TREE);
90333 + gcc_assert(TREE_CODE(node) == SSA_NAME);
90334 + return SSA_NAME_DEF_STMT(node);
90335 +}
90336 +
90337 +static unsigned char get_tree_code(const_tree type)
90338 +{
90339 + switch (TREE_CODE(type)) {
90340 + case ARRAY_TYPE:
90341 + return 0;
90342 + case BOOLEAN_TYPE:
90343 + return 1;
90344 + case ENUMERAL_TYPE:
90345 + return 2;
90346 + case FUNCTION_TYPE:
90347 + return 3;
90348 + case INTEGER_TYPE:
90349 + return 4;
90350 + case POINTER_TYPE:
90351 + return 5;
90352 + case RECORD_TYPE:
90353 + return 6;
90354 + case UNION_TYPE:
90355 + return 7;
90356 + case VOID_TYPE:
90357 + return 8;
90358 + case REAL_TYPE:
90359 + return 9;
90360 + case VECTOR_TYPE:
90361 + return 10;
90362 + case REFERENCE_TYPE:
90363 + return 11;
90364 + case OFFSET_TYPE:
90365 + return 12;
90366 + case COMPLEX_TYPE:
90367 + return 13;
90368 + default:
90369 + debug_tree((tree)type);
90370 + gcc_unreachable();
90371 + }
90372 +}
90373 +
90374 +static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len)
90375 +{
90376 + gcc_assert(type != NULL_TREE);
90377 +
90378 + while (type && len < CODES_LIMIT) {
90379 + tree_codes[len] = get_tree_code(type);
90380 + len++;
90381 + type = TREE_TYPE(type);
90382 + }
90383 + return len;
90384 +}
90385 +
90386 +static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes)
90387 +{
90388 + const_tree arg, result, arg_field, type = TREE_TYPE(fndecl);
90389 + enum tree_code code = TREE_CODE(type);
90390 + size_t len = 0;
90391 +
90392 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
90393 +
90394 + arg = TYPE_ARG_TYPES(type);
90395 + // skip builtins __builtin_constant_p
90396 + if (!arg && DECL_BUILT_IN(fndecl))
90397 + return 0;
90398 +
90399 + if (TREE_CODE_CLASS(code) == tcc_type)
90400 + result = type;
90401 + else
90402 + result = DECL_RESULT(fndecl);
90403 +
90404 + gcc_assert(result != NULL_TREE);
90405 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
90406 +
90407 + if (arg == NULL_TREE) {
90408 + gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON));
90409 + arg_field = DECL_ARGUMENT_FLD(fndecl);
90410 + if (arg_field == NULL_TREE)
90411 + return 0;
90412 + arg = TREE_TYPE(arg_field);
90413 + len = add_type_codes(arg, tree_codes, len);
90414 + gcc_assert(len != 0);
90415 + return len;
90416 + }
90417 +
90418 + gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST);
90419 + while (arg && len < CODES_LIMIT) {
90420 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
90421 + arg = TREE_CHAIN(arg);
90422 + }
90423 +
90424 + gcc_assert(len != 0);
90425 + return len;
90426 +}
90427 +
90428 +static const struct size_overflow_hash *get_function_hash(tree fndecl)
90429 +{
90430 + unsigned int hash;
90431 + const struct size_overflow_hash *entry;
90432 + unsigned char tree_codes[CODES_LIMIT];
90433 + size_t len;
90434 + const char *func_name = get_asm_name(fndecl);
90435 +
90436 + len = get_function_decl(fndecl, tree_codes);
90437 + if (len == 0)
90438 + return NULL;
90439 +
90440 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
90441 +
90442 + entry = size_overflow_hash[hash];
90443 + while (entry) {
90444 + if (!strcmp(entry->name, func_name))
90445 + return entry;
90446 + entry = entry->next;
90447 + }
90448 +
90449 + return NULL;
90450 +}
90451 +
90452 +static void check_arg_type(const_tree arg)
90453 +{
90454 + const_tree type = TREE_TYPE(arg);
90455 + enum tree_code code = TREE_CODE(type);
90456 +
90457 + if (code == BOOLEAN_TYPE)
90458 + return;
90459 +
90460 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
90461 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
90462 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
90463 +}
90464 +
90465 +static int find_arg_number(const_tree arg, tree func)
90466 +{
90467 + tree var;
90468 + unsigned int argnum = 1;
90469 +
90470 + if (TREE_CODE(arg) == SSA_NAME)
90471 + arg = SSA_NAME_VAR(arg);
90472 +
90473 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
90474 + if (strcmp(NAME(arg), NAME(var))) {
90475 + argnum++;
90476 + continue;
90477 + }
90478 + check_arg_type(var);
90479 + return argnum;
90480 + }
90481 + gcc_unreachable();
90482 +}
90483 +
90484 +static tree create_new_var(tree type)
90485 +{
90486 + tree new_var = create_tmp_var(type, "cicus");
90487 +
90488 + add_referenced_var(new_var);
90489 + mark_sym_for_renaming(new_var);
90490 + return new_var;
90491 +}
90492 +
90493 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
90494 +{
90495 + gimple assign;
90496 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
90497 + tree type = TREE_TYPE(rhs1);
90498 + tree lhs = create_new_var(type);
90499 +
90500 + gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
90501 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
90502 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
90503 +
90504 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
90505 + update_stmt(assign);
90506 + gimple_set_plf(assign, MY_STMT, true);
90507 + return assign;
90508 +}
90509 +
90510 +static bool is_bool(const_tree node)
90511 +{
90512 + const_tree type;
90513 +
90514 + if (node == NULL_TREE)
90515 + return false;
90516 +
90517 + type = TREE_TYPE(node);
90518 + if (!INTEGRAL_TYPE_P(type))
90519 + return false;
90520 + if (TREE_CODE(type) == BOOLEAN_TYPE)
90521 + return true;
90522 + if (TYPE_PRECISION(type) == 1)
90523 + return true;
90524 + return false;
90525 +}
90526 +
90527 +static tree cast_a_tree(tree type, tree var)
90528 +{
90529 + gcc_assert(type != NULL_TREE);
90530 + gcc_assert(var != NULL_TREE);
90531 + gcc_assert(fold_convertible_p(type, var));
90532 +
90533 + return fold_convert(type, var);
90534 +}
90535 +
90536 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before)
90537 +{
90538 + gimple assign;
90539 +
90540 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
90541 + if (gsi_end_p(*gsi) && before == AFTER_STMT)
90542 + gcc_unreachable();
90543 +
90544 + if (lhs == CREATE_NEW_VAR)
90545 + lhs = create_new_var(dst_type);
90546 +
90547 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
90548 +
90549 + if (!gsi_end_p(*gsi)) {
90550 + location_t loc = gimple_location(gsi_stmt(*gsi));
90551 + gimple_set_location(assign, loc);
90552 + }
90553 +
90554 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
90555 +
90556 + if (before)
90557 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
90558 + else
90559 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
90560 + update_stmt(assign);
90561 + gimple_set_plf(assign, MY_STMT, true);
90562 +
90563 + return assign;
90564 +}
90565 +
90566 +static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
90567 +{
90568 + gimple assign;
90569 + gimple_stmt_iterator gsi;
90570 +
90571 + if (rhs == NULL_TREE)
90572 + return NULL_TREE;
90573 +
90574 + if (types_compatible_p(TREE_TYPE(rhs), size_overflow_type) && gimple_plf(stmt, MY_STMT))
90575 + return rhs;
90576 +
90577 + gsi = gsi_for_stmt(stmt);
90578 + assign = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before);
90579 + gimple_set_plf(assign, MY_STMT, true);
90580 + return gimple_get_lhs(assign);
90581 +}
90582 +
90583 +static tree cast_to_TI_type(gimple stmt, tree node)
90584 +{
90585 + gimple_stmt_iterator gsi;
90586 + gimple cast_stmt;
90587 + tree type = TREE_TYPE(node);
90588 +
90589 + if (types_compatible_p(type, intTI_type_node))
90590 + return node;
90591 +
90592 + gsi = gsi_for_stmt(stmt);
90593 + cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
90594 + gimple_set_plf(cast_stmt, MY_STMT, true);
90595 + return gimple_get_lhs(cast_stmt);
90596 +}
90597 +
90598 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
90599 +{
90600 + tree lhs;
90601 + gimple_stmt_iterator gsi;
90602 +
90603 + if (rhs1 == NULL_TREE) {
90604 + debug_gimple_stmt(oldstmt);
90605 + error("%s: rhs1 is NULL_TREE", __func__);
90606 + gcc_unreachable();
90607 + }
90608 +
90609 + switch (gimple_code(oldstmt)) {
90610 + case GIMPLE_ASM:
90611 + lhs = rhs1;
90612 + break;
90613 + case GIMPLE_CALL:
90614 + lhs = gimple_call_lhs(oldstmt);
90615 + break;
90616 + case GIMPLE_ASSIGN:
90617 + lhs = gimple_get_lhs(oldstmt);
90618 + break;
90619 + default:
90620 + debug_gimple_stmt(oldstmt);
90621 + gcc_unreachable();
90622 + }
90623 +
90624 + gsi = gsi_for_stmt(oldstmt);
90625 + pointer_set_insert(visited, oldstmt);
90626 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
90627 + basic_block next_bb, cur_bb;
90628 + const_edge e;
90629 +
90630 + gcc_assert(before == false);
90631 + gcc_assert(stmt_can_throw_internal(oldstmt));
90632 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
90633 + gcc_assert(!gsi_end_p(gsi));
90634 +
90635 + cur_bb = gimple_bb(oldstmt);
90636 + next_bb = cur_bb->next_bb;
90637 + e = find_edge(cur_bb, next_bb);
90638 + gcc_assert(e != NULL);
90639 + gcc_assert(e->flags & EDGE_FALLTHRU);
90640 +
90641 + gsi = gsi_after_labels(next_bb);
90642 + gcc_assert(!gsi_end_p(gsi));
90643 +
90644 + before = true;
90645 + oldstmt = gsi_stmt(gsi);
90646 + }
90647 +
90648 + return cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
90649 +}
90650 +
90651 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
90652 +{
90653 + gimple stmt;
90654 + gimple_stmt_iterator gsi;
90655 + tree size_overflow_type, new_var, lhs = gimple_get_lhs(oldstmt);
90656 +
90657 + if (gimple_plf(oldstmt, MY_STMT))
90658 + return lhs;
90659 +
90660 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
90661 + rhs1 = gimple_assign_rhs1(oldstmt);
90662 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
90663 + }
90664 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
90665 + rhs2 = gimple_assign_rhs2(oldstmt);
90666 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
90667 + }
90668 +
90669 + stmt = gimple_copy(oldstmt);
90670 + gimple_set_location(stmt, gimple_location(oldstmt));
90671 + gimple_set_plf(stmt, MY_STMT, true);
90672 +
90673 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
90674 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
90675 +
90676 + size_overflow_type = get_size_overflow_type(oldstmt, node);
90677 +
90678 + if (is_bool(lhs))
90679 + new_var = SSA_NAME_VAR(lhs);
90680 + else
90681 + new_var = create_new_var(size_overflow_type);
90682 + new_var = make_ssa_name(new_var, stmt);
90683 + gimple_set_lhs(stmt, new_var);
90684 +
90685 + if (rhs1 != NULL_TREE)
90686 + gimple_assign_set_rhs1(stmt, rhs1);
90687 +
90688 + if (rhs2 != NULL_TREE)
90689 + gimple_assign_set_rhs2(stmt, rhs2);
90690 +#if BUILDING_GCC_VERSION >= 4007
90691 + if (rhs3 != NULL_TREE)
90692 + gimple_assign_set_rhs3(stmt, rhs3);
90693 +#endif
90694 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
90695 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
90696 +
90697 + gsi = gsi_for_stmt(oldstmt);
90698 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
90699 + update_stmt(stmt);
90700 + pointer_set_insert(visited, oldstmt);
90701 + return gimple_get_lhs(stmt);
90702 +}
90703 +
90704 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
90705 +{
90706 + basic_block bb;
90707 + gimple phi;
90708 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
90709 +
90710 + bb = gsi_bb(gsi);
90711 +
90712 + phi = create_phi_node(result, bb);
90713 + gsi = gsi_last(phi_nodes(bb));
90714 + gsi_remove(&gsi, false);
90715 +
90716 + gsi = gsi_for_stmt(oldstmt);
90717 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
90718 + gimple_set_bb(phi, bb);
90719 + gimple_set_plf(phi, MY_STMT, true);
90720 + return phi;
90721 +}
90722 +
90723 +static basic_block create_a_first_bb(void)
90724 +{
90725 + basic_block first_bb;
90726 +
90727 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
90728 + if (dom_info_available_p(CDI_DOMINATORS))
90729 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
90730 + return first_bb;
90731 +}
90732 +
90733 +static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
90734 +{
90735 + basic_block bb;
90736 + const_gimple newstmt;
90737 + gimple_stmt_iterator gsi;
90738 + bool before = BEFORE_STMT;
90739 +
90740 + if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
90741 + gsi = gsi_for_stmt(get_def_stmt(arg));
90742 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
90743 + return gimple_get_lhs(newstmt);
90744 + }
90745 +
90746 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
90747 + gsi = gsi_after_labels(bb);
90748 + if (bb->index == 0) {
90749 + bb = create_a_first_bb();
90750 + gsi = gsi_start_bb(bb);
90751 + }
90752 + newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
90753 + return gimple_get_lhs(newstmt);
90754 +}
90755 +
90756 +static const_gimple handle_new_phi_arg(const_tree arg, tree new_var, tree new_rhs)
90757 +{
90758 + gimple newstmt;
90759 + gimple_stmt_iterator gsi;
90760 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
90761 + gimple def_newstmt = get_def_stmt(new_rhs);
90762 +
90763 + gsi_insert = gsi_insert_after;
90764 + gsi = gsi_for_stmt(def_newstmt);
90765 +
90766 + switch (gimple_code(get_def_stmt(arg))) {
90767 + case GIMPLE_PHI:
90768 + newstmt = gimple_build_assign(new_var, new_rhs);
90769 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
90770 + gsi_insert = gsi_insert_before;
90771 + break;
90772 + case GIMPLE_ASM:
90773 + case GIMPLE_CALL:
90774 + newstmt = gimple_build_assign(new_var, new_rhs);
90775 + break;
90776 + case GIMPLE_ASSIGN:
90777 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
90778 + break;
90779 + default:
90780 + /* unknown gimple_code (handle_build_new_phi_arg) */
90781 + gcc_unreachable();
90782 + }
90783 +
90784 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
90785 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
90786 + gimple_set_plf(newstmt, MY_STMT, true);
90787 + update_stmt(newstmt);
90788 + return newstmt;
90789 +}
90790 +
90791 +static tree build_new_phi_arg(struct pointer_set_t *visited, tree size_overflow_type, tree arg, tree new_var)
90792 +{
90793 + const_gimple newstmt;
90794 + gimple def_stmt;
90795 + tree new_rhs;
90796 +
90797 + new_rhs = expand(visited, arg);
90798 + if (new_rhs == NULL_TREE)
90799 + return NULL_TREE;
90800 +
90801 + def_stmt = get_def_stmt(new_rhs);
90802 + if (gimple_code(def_stmt) == GIMPLE_NOP)
90803 + return NULL_TREE;
90804 + new_rhs = cast_to_new_size_overflow_type(def_stmt, new_rhs, size_overflow_type, AFTER_STMT);
90805 +
90806 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
90807 + return gimple_get_lhs(newstmt);
90808 +}
90809 +
90810 +static tree build_new_phi(struct pointer_set_t *visited, tree orig_result)
90811 +{
90812 + gimple phi, oldstmt = get_def_stmt(orig_result);
90813 + tree new_result, size_overflow_type;
90814 + unsigned int i;
90815 + unsigned int n = gimple_phi_num_args(oldstmt);
90816 +
90817 + size_overflow_type = get_size_overflow_type(oldstmt, orig_result);
90818 +
90819 + new_result = create_new_var(size_overflow_type);
90820 +
90821 + pointer_set_insert(visited, oldstmt);
90822 + phi = overflow_create_phi_node(oldstmt, new_result);
90823 + for (i = 0; i < n; i++) {
90824 + tree arg, lhs;
90825 +
90826 + arg = gimple_phi_arg_def(oldstmt, i);
90827 + if (is_gimple_constant(arg))
90828 + arg = cast_a_tree(size_overflow_type, arg);
90829 + lhs = build_new_phi_arg(visited, size_overflow_type, arg, new_result);
90830 + if (lhs == NULL_TREE)
90831 + lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_result, i);
90832 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
90833 + }
90834 +
90835 + update_stmt(phi);
90836 + return gimple_phi_result(phi);
90837 +}
90838 +
90839 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
90840 +{
90841 + const_gimple assign;
90842 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
90843 + tree origtype = TREE_TYPE(orig_rhs);
90844 +
90845 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
90846 +
90847 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
90848 + return gimple_get_lhs(assign);
90849 +}
90850 +
90851 +static void change_rhs1(gimple stmt, tree new_rhs1)
90852 +{
90853 + tree assign_rhs;
90854 + const_tree rhs = gimple_assign_rhs1(stmt);
90855 +
90856 + assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
90857 + gimple_assign_set_rhs1(stmt, assign_rhs);
90858 + update_stmt(stmt);
90859 +}
90860 +
90861 +static bool check_mode_type(const_gimple stmt)
90862 +{
90863 + const_tree lhs = gimple_get_lhs(stmt);
90864 + const_tree lhs_type = TREE_TYPE(lhs);
90865 + const_tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
90866 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
90867 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
90868 +
90869 + if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
90870 + return false;
90871 +
90872 + if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
90873 + return false;
90874 +
90875 + return true;
90876 +}
90877 +
90878 +static bool check_undefined_integer_operation(const_gimple stmt)
90879 +{
90880 + const_gimple def_stmt;
90881 + const_tree lhs = gimple_get_lhs(stmt);
90882 + const_tree rhs1 = gimple_assign_rhs1(stmt);
90883 + const_tree rhs1_type = TREE_TYPE(rhs1);
90884 + const_tree lhs_type = TREE_TYPE(lhs);
90885 +
90886 + if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
90887 + return false;
90888 +
90889 + def_stmt = get_def_stmt(rhs1);
90890 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
90891 + return false;
90892 +
90893 + if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
90894 + return false;
90895 + return true;
90896 +}
90897 +
90898 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
90899 +{
90900 + const_tree rhs1, lhs, rhs1_type, lhs_type;
90901 + enum machine_mode lhs_mode, rhs_mode;
90902 + gimple def_stmt = get_def_stmt(no_const_rhs);
90903 +
90904 + if (!gimple_assign_cast_p(def_stmt))
90905 + return false;
90906 +
90907 + rhs1 = gimple_assign_rhs1(def_stmt);
90908 + lhs = gimple_get_lhs(def_stmt);
90909 + rhs1_type = TREE_TYPE(rhs1);
90910 + lhs_type = TREE_TYPE(lhs);
90911 + rhs_mode = TYPE_MODE(rhs1_type);
90912 + lhs_mode = TYPE_MODE(lhs_type);
90913 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
90914 + return false;
90915 +
90916 + return true;
90917 +}
90918 +
90919 +static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt)
90920 +{
90921 + gimple def_stmt;
90922 + tree size_overflow_type, lhs = gimple_get_lhs(stmt);
90923 + tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
90924 + const_tree rhs1_type = TREE_TYPE(rhs1);
90925 + const_tree lhs_type = TREE_TYPE(lhs);
90926 +
90927 + new_rhs1 = expand(visited, rhs1);
90928 +
90929 + if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
90930 + return create_assign(visited, stmt, lhs, AFTER_STMT);
90931 +
90932 + if (gimple_plf(stmt, MY_STMT))
90933 + return lhs;
90934 +
90935 + if (gimple_plf(stmt, NO_CAST_CHECK))
90936 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
90937 +
90938 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
90939 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
90940 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
90941 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
90942 + return create_assign(visited, stmt, lhs, AFTER_STMT);
90943 + }
90944 +
90945 + if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt))
90946 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
90947 +
90948 + size_overflow_type = get_size_overflow_type(stmt, rhs1);
90949 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
90950 +
90951 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
90952 +
90953 + rhs1 = gimple_assign_rhs1(stmt);
90954 + rhs1_type = TREE_TYPE(rhs1);
90955 + if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type)) {
90956 + def_stmt = get_def_stmt(new_rhs1);
90957 + rhs1 = gimple_assign_rhs1(def_stmt);
90958 + return create_assign(visited, stmt, rhs1, AFTER_STMT);
90959 + }
90960 + change_rhs1(stmt, new_rhs1);
90961 +
90962 + if (!check_mode_type(stmt))
90963 + return create_assign(visited, stmt, lhs, AFTER_STMT);
90964 +
90965 + size_overflow_type = get_size_overflow_type(stmt, lhs);
90966 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
90967 +
90968 + check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, BEFORE_STMT);
90969 +
90970 + return create_assign(visited, stmt, lhs, AFTER_STMT);
90971 +}
90972 +
90973 +static tree handle_unary_ops(struct pointer_set_t *visited, gimple stmt)
90974 +{
90975 + tree rhs1, lhs = gimple_get_lhs(stmt);
90976 + gimple def_stmt = get_def_stmt(lhs);
90977 +
90978 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
90979 + rhs1 = gimple_assign_rhs1(def_stmt);
90980 +
90981 + if (is_gimple_constant(rhs1))
90982 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
90983 +
90984 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
90985 + switch (TREE_CODE(rhs1)) {
90986 + case SSA_NAME:
90987 + return handle_unary_rhs(visited, def_stmt);
90988 + case ARRAY_REF:
90989 + case BIT_FIELD_REF:
90990 + case ADDR_EXPR:
90991 + case COMPONENT_REF:
90992 + case INDIRECT_REF:
90993 +#if BUILDING_GCC_VERSION >= 4006
90994 + case MEM_REF:
90995 +#endif
90996 + case TARGET_MEM_REF:
90997 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
90998 + case PARM_DECL:
90999 + case VAR_DECL:
91000 + return create_assign(visited, stmt, lhs, AFTER_STMT);
91001 +
91002 + default:
91003 + debug_gimple_stmt(def_stmt);
91004 + debug_tree(rhs1);
91005 + gcc_unreachable();
91006 + }
91007 +}
91008 +
91009 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
91010 +{
91011 + gimple cond_stmt;
91012 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
91013 +
91014 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
91015 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
91016 + update_stmt(cond_stmt);
91017 +}
91018 +
91019 +static tree create_string_param(tree string)
91020 +{
91021 + tree i_type, a_type;
91022 + const int length = TREE_STRING_LENGTH(string);
91023 +
91024 + gcc_assert(length > 0);
91025 +
91026 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
91027 + a_type = build_array_type(char_type_node, i_type);
91028 +
91029 + TREE_TYPE(string) = a_type;
91030 + TREE_CONSTANT(string) = 1;
91031 + TREE_READONLY(string) = 1;
91032 +
91033 + return build1(ADDR_EXPR, ptr_type_node, string);
91034 +}
91035 +
91036 +static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
91037 +{
91038 + gimple func_stmt;
91039 + const_gimple def_stmt;
91040 + const_tree loc_line;
91041 + tree loc_file, ssa_name, current_func;
91042 + expanded_location xloc;
91043 + char *ssa_name_buf;
91044 + int len;
91045 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
91046 +
91047 + def_stmt = get_def_stmt(arg);
91048 + xloc = expand_location(gimple_location(def_stmt));
91049 +
91050 + if (!gimple_has_location(def_stmt)) {
91051 + xloc = expand_location(gimple_location(stmt));
91052 + if (!gimple_has_location(stmt))
91053 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
91054 + }
91055 +
91056 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
91057 +
91058 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
91059 + loc_file = create_string_param(loc_file);
91060 +
91061 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
91062 + current_func = create_string_param(current_func);
91063 +
91064 + gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
91065 + call_count++;
91066 + len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
91067 + gcc_assert(len > 0);
91068 + ssa_name = build_string(len + 1, ssa_name_buf);
91069 + free(ssa_name_buf);
91070 + ssa_name = create_string_param(ssa_name);
91071 +
91072 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
91073 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
91074 +
91075 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
91076 +}
91077 +
91078 +static void __unused print_the_code_insertions(const_gimple stmt)
91079 +{
91080 + location_t loc = gimple_location(stmt);
91081 +
91082 + inform(loc, "Integer size_overflow check applied here.");
91083 +}
91084 +
91085 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
91086 +{
91087 + basic_block cond_bb, join_bb, bb_true;
91088 + edge e;
91089 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
91090 +
91091 + cond_bb = gimple_bb(stmt);
91092 + if (before)
91093 + gsi_prev(&gsi);
91094 + if (gsi_end_p(gsi))
91095 + e = split_block_after_labels(cond_bb);
91096 + else
91097 + e = split_block(cond_bb, gsi_stmt(gsi));
91098 + cond_bb = e->src;
91099 + join_bb = e->dest;
91100 + e->flags = EDGE_FALSE_VALUE;
91101 + e->probability = REG_BR_PROB_BASE;
91102 +
91103 + bb_true = create_empty_bb(cond_bb);
91104 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
91105 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
91106 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
91107 +
91108 + if (dom_info_available_p(CDI_DOMINATORS)) {
91109 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
91110 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
91111 + }
91112 +
91113 + if (current_loops != NULL) {
91114 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
91115 + add_bb_to_loop(bb_true, cond_bb->loop_father);
91116 + }
91117 +
91118 + insert_cond(cond_bb, arg, cond_code, type_value);
91119 + insert_cond_result(bb_true, stmt, arg, min);
91120 +
91121 +// print_the_code_insertions(stmt);
91122 +}
91123 +
91124 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
91125 +{
91126 + const_tree rhs_type = TREE_TYPE(rhs);
91127 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
91128 +
91129 + gcc_assert(rhs_type != NULL_TREE);
91130 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
91131 + return;
91132 +
91133 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
91134 +
91135 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
91136 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
91137 +
91138 + gcc_assert(!TREE_OVERFLOW(type_max));
91139 +
91140 + cast_rhs_type = TREE_TYPE(cast_rhs);
91141 + type_max_type = TREE_TYPE(type_max);
91142 + type_min_type = TREE_TYPE(type_min);
91143 + gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
91144 + gcc_assert(types_compatible_p(type_max_type, type_min_type));
91145 +
91146 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
91147 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
91148 +}
91149 +
91150 +static tree get_size_overflow_type_for_intentional_overflow(gimple def_stmt, tree change_rhs)
91151 +{
91152 + gimple change_rhs_def_stmt;
91153 + tree lhs = gimple_get_lhs(def_stmt);
91154 + tree lhs_type = TREE_TYPE(lhs);
91155 + tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
91156 + tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
91157 +
91158 + if (change_rhs == NULL_TREE)
91159 + return get_size_overflow_type(def_stmt, lhs);
91160 +
91161 + change_rhs_def_stmt = get_def_stmt(change_rhs);
91162 +
91163 + if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
91164 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
91165 +
91166 + if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
91167 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
91168 +
91169 + if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
91170 + return get_size_overflow_type(change_rhs_def_stmt, change_rhs);
91171 +
91172 + if (!types_compatible_p(lhs_type, rhs1_type) || !types_compatible_p(rhs1_type, rhs2_type)) {
91173 + debug_gimple_stmt(def_stmt);
91174 + gcc_unreachable();
91175 + }
91176 +
91177 + return get_size_overflow_type(def_stmt, lhs);
91178 +}
91179 +
91180 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
91181 +{
91182 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
91183 + return false;
91184 + if (!is_gimple_constant(rhs))
91185 + return false;
91186 + return true;
91187 +}
91188 +
91189 +static bool is_subtraction_special(const_gimple stmt)
91190 +{
91191 + gimple rhs1_def_stmt, rhs2_def_stmt;
91192 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
91193 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
91194 + const_tree rhs1 = gimple_assign_rhs1(stmt);
91195 + const_tree rhs2 = gimple_assign_rhs2(stmt);
91196 +
91197 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
91198 + return false;
91199 +
91200 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
91201 +
91202 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
91203 + return false;
91204 +
91205 + rhs1_def_stmt = get_def_stmt(rhs1);
91206 + rhs2_def_stmt = get_def_stmt(rhs2);
91207 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
91208 + return false;
91209 +
91210 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
91211 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
91212 + rhs1_def_stmt_lhs = gimple_get_lhs(rhs1_def_stmt);
91213 + rhs2_def_stmt_lhs = gimple_get_lhs(rhs2_def_stmt);
91214 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
91215 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
91216 + rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
91217 + rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
91218 + if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
91219 + return false;
91220 + if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
91221 + return false;
91222 +
91223 + gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
91224 + gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
91225 + return true;
91226 +}
91227 +
91228 +static tree get_def_stmt_rhs(const_tree var)
91229 +{
91230 + tree rhs1, def_stmt_rhs1;
91231 + gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
91232 +
91233 + def_stmt = get_def_stmt(var);
91234 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
91235 +
91236 + rhs1 = gimple_assign_rhs1(def_stmt);
91237 + rhs1_def_stmt = get_def_stmt(rhs1);
91238 + gcc_assert(gimple_code(rhs1_def_stmt) != GIMPLE_NOP);
91239 + if (!gimple_assign_cast_p(rhs1_def_stmt))
91240 + return rhs1;
91241 +
91242 + def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
91243 + def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
91244 +
91245 + switch (gimple_code(def_stmt_rhs1_def_stmt)) {
91246 + case GIMPLE_CALL:
91247 + case GIMPLE_NOP:
91248 + case GIMPLE_ASM:
91249 + return def_stmt_rhs1;
91250 + case GIMPLE_ASSIGN:
91251 + return rhs1;
91252 + default:
91253 + debug_gimple_stmt(def_stmt_rhs1_def_stmt);
91254 + gcc_unreachable();
91255 + }
91256 +}
91257 +
91258 +static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs)
91259 +{
91260 + tree new_rhs1, new_rhs2;
91261 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
91262 + gimple assign, stmt = get_def_stmt(lhs);
91263 + tree rhs1 = gimple_assign_rhs1(stmt);
91264 + tree rhs2 = gimple_assign_rhs2(stmt);
91265 +
91266 + if (!is_subtraction_special(stmt))
91267 + return NULL_TREE;
91268 +
91269 + new_rhs1 = expand(visited, rhs1);
91270 + new_rhs2 = expand(visited, rhs2);
91271 +
91272 + new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
91273 + new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
91274 +
91275 + if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
91276 + new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
91277 + new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
91278 + }
91279 +
91280 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
91281 + new_lhs = gimple_get_lhs(assign);
91282 + check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
91283 +
91284 + return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
91285 +}
91286 +
91287 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
91288 +{
91289 + const_gimple def_stmt;
91290 +
91291 + if (TREE_CODE(rhs) != SSA_NAME)
91292 + return false;
91293 +
91294 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
91295 + return false;
91296 +
91297 + def_stmt = get_def_stmt(rhs);
91298 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
91299 + return false;
91300 +
91301 + return true;
91302 +}
91303 +
91304 +static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs1, tree new_rhs2)
91305 +{
91306 + tree new_rhs, size_overflow_type, orig_rhs;
91307 + void (*gimple_assign_set_rhs)(gimple, tree);
91308 + tree rhs1 = gimple_assign_rhs1(stmt);
91309 + tree rhs2 = gimple_assign_rhs2(stmt);
91310 + tree lhs = gimple_get_lhs(stmt);
91311 +
91312 + if (change_rhs == NULL_TREE)
91313 + return create_assign(visited, stmt, lhs, AFTER_STMT);
91314 +
91315 + if (new_rhs2 == NULL_TREE) {
91316 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs1);
91317 + new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
91318 + orig_rhs = rhs1;
91319 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
91320 + } else {
91321 + size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs2);
91322 + new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
91323 + orig_rhs = rhs2;
91324 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
91325 + }
91326 +
91327 + change_rhs = cast_to_new_size_overflow_type(stmt, change_rhs, size_overflow_type, BEFORE_STMT);
91328 +
91329 + if (check_overflow)
91330 + check_size_overflow(stmt, size_overflow_type, change_rhs, orig_rhs, BEFORE_STMT);
91331 +
91332 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
91333 + gimple_assign_set_rhs(stmt, new_rhs);
91334 + update_stmt(stmt);
91335 +
91336 + return create_assign(visited, stmt, lhs, AFTER_STMT);
91337 +}
91338 +
91339 +static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs)
91340 +{
91341 + tree rhs1, rhs2, new_lhs;
91342 + gimple def_stmt = get_def_stmt(lhs);
91343 + tree new_rhs1 = NULL_TREE;
91344 + tree new_rhs2 = NULL_TREE;
91345 +
91346 + rhs1 = gimple_assign_rhs1(def_stmt);
91347 + rhs2 = gimple_assign_rhs2(def_stmt);
91348 +
91349 + /* no DImode/TImode division in the 32/64 bit kernel */
91350 + switch (gimple_assign_rhs_code(def_stmt)) {
91351 + case RDIV_EXPR:
91352 + case TRUNC_DIV_EXPR:
91353 + case CEIL_DIV_EXPR:
91354 + case FLOOR_DIV_EXPR:
91355 + case ROUND_DIV_EXPR:
91356 + case TRUNC_MOD_EXPR:
91357 + case CEIL_MOD_EXPR:
91358 + case FLOOR_MOD_EXPR:
91359 + case ROUND_MOD_EXPR:
91360 + case EXACT_DIV_EXPR:
91361 + case POINTER_PLUS_EXPR:
91362 + case BIT_AND_EXPR:
91363 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
91364 + default:
91365 + break;
91366 + }
91367 +
91368 + new_lhs = handle_integer_truncation(visited, lhs);
91369 + if (new_lhs != NULL_TREE)
91370 + return new_lhs;
91371 +
91372 + if (TREE_CODE(rhs1) == SSA_NAME)
91373 + new_rhs1 = expand(visited, rhs1);
91374 + if (TREE_CODE(rhs2) == SSA_NAME)
91375 + new_rhs2 = expand(visited, rhs2);
91376 +
91377 + if (is_a_neg_overflow(def_stmt, rhs2))
91378 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
91379 + if (is_a_neg_overflow(def_stmt, rhs1))
91380 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
91381 +
91382 + if (is_a_constant_overflow(def_stmt, rhs2))
91383 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, new_rhs1, NULL_TREE);
91384 + if (is_a_constant_overflow(def_stmt, rhs1))
91385 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2);
91386 +
91387 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
91388 +}
91389 +
91390 +#if BUILDING_GCC_VERSION >= 4007
91391 +static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs)
91392 +{
91393 + if (is_gimple_constant(rhs))
91394 + return cast_a_tree(size_overflow_type, rhs);
91395 + if (TREE_CODE(rhs) != SSA_NAME)
91396 + return NULL_TREE;
91397 + return expand(visited, rhs);
91398 +}
91399 +
91400 +static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs)
91401 +{
91402 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
91403 + gimple def_stmt = get_def_stmt(lhs);
91404 +
91405 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
91406 +
91407 + rhs1 = gimple_assign_rhs1(def_stmt);
91408 + rhs2 = gimple_assign_rhs2(def_stmt);
91409 + rhs3 = gimple_assign_rhs3(def_stmt);
91410 + new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1);
91411 + new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2);
91412 + new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3);
91413 +
91414 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
91415 +}
91416 +#endif
91417 +
91418 +static tree get_size_overflow_type(gimple stmt, const_tree node)
91419 +{
91420 + const_tree type;
91421 + tree new_type;
91422 +
91423 + gcc_assert(node != NULL_TREE);
91424 +
91425 + type = TREE_TYPE(node);
91426 +
91427 + if (gimple_plf(stmt, MY_STMT))
91428 + return TREE_TYPE(node);
91429 +
91430 + switch (TYPE_MODE(type)) {
91431 + case QImode:
91432 + new_type = (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
91433 + break;
91434 + case HImode:
91435 + new_type = (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
91436 + break;
91437 + case SImode:
91438 + new_type = (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
91439 + break;
91440 + case DImode:
91441 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
91442 + new_type = (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
91443 + else
91444 + new_type = (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
91445 + break;
91446 + default:
91447 + debug_tree((tree)node);
91448 + error("%s: unsupported gcc configuration.", __func__);
91449 + gcc_unreachable();
91450 + }
91451 +
91452 + if (TYPE_QUALS(type) != 0)
91453 + return build_qualified_type(new_type, TYPE_QUALS(type));
91454 + return new_type;
91455 +}
91456 +
91457 +static tree expand_visited(gimple def_stmt)
91458 +{
91459 + const_gimple next_stmt;
91460 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
91461 +
91462 + gsi_next(&gsi);
91463 + next_stmt = gsi_stmt(gsi);
91464 +
91465 + gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
91466 +
91467 + switch (gimple_code(next_stmt)) {
91468 + case GIMPLE_ASSIGN:
91469 + return gimple_get_lhs(next_stmt);
91470 + case GIMPLE_PHI:
91471 + return gimple_phi_result(next_stmt);
91472 + case GIMPLE_CALL:
91473 + return gimple_call_lhs(next_stmt);
91474 + default:
91475 + return NULL_TREE;
91476 + }
91477 +}
91478 +
91479 +static tree expand(struct pointer_set_t *visited, tree lhs)
91480 +{
91481 + gimple def_stmt;
91482 + enum tree_code code = TREE_CODE(TREE_TYPE(lhs));
91483 +
91484 + if (is_gimple_constant(lhs))
91485 + return NULL_TREE;
91486 +
91487 + if (TREE_CODE(lhs) == ADDR_EXPR)
91488 + return NULL_TREE;
91489 +
91490 + if (code == REAL_TYPE)
91491 + return NULL_TREE;
91492 +
91493 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
91494 +
91495 + def_stmt = get_def_stmt(lhs);
91496 +
91497 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
91498 + return NULL_TREE;
91499 +
91500 + if (gimple_plf(def_stmt, MY_STMT))
91501 + return lhs;
91502 +
91503 + // skip char type, except PHI (FIXME: only kernel)
91504 + if (TYPE_MODE(TREE_TYPE(lhs)) == QImode && gimple_code(def_stmt) != GIMPLE_PHI)
91505 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
91506 +
91507 + if (pointer_set_contains(visited, def_stmt))
91508 + return expand_visited(def_stmt);
91509 +
91510 + switch (gimple_code(def_stmt)) {
91511 + case GIMPLE_PHI:
91512 + return build_new_phi(visited, lhs);
91513 + case GIMPLE_CALL:
91514 + case GIMPLE_ASM:
91515 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
91516 + case GIMPLE_ASSIGN:
91517 + switch (gimple_num_ops(def_stmt)) {
91518 + case 2:
91519 + return handle_unary_ops(visited, def_stmt);
91520 + case 3:
91521 + return handle_binary_ops(visited, lhs);
91522 +#if BUILDING_GCC_VERSION >= 4007
91523 + case 4:
91524 + return handle_ternary_ops(visited, lhs);
91525 +#endif
91526 + }
91527 + default:
91528 + debug_gimple_stmt(def_stmt);
91529 + error("%s: unknown gimple code", __func__);
91530 + gcc_unreachable();
91531 + }
91532 +}
91533 +
91534 +static void change_function_arg(gimple stmt, const_tree origarg, unsigned int argnum, tree newarg)
91535 +{
91536 + const_gimple assign;
91537 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
91538 + tree origtype = TREE_TYPE(origarg);
91539 +
91540 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
91541 +
91542 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
91543 +
91544 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
91545 + update_stmt(stmt);
91546 +}
91547 +
91548 +static bool get_function_arg(unsigned int* argnum, const_tree fndecl)
91549 +{
91550 + const char *origid;
91551 + tree arg;
91552 + const_tree origarg;
91553 +
91554 + if (!DECL_ABSTRACT_ORIGIN(fndecl))
91555 + return true;
91556 +
91557 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
91558 + while (origarg && *argnum) {
91559 + (*argnum)--;
91560 + origarg = TREE_CHAIN(origarg);
91561 + }
91562 +
91563 + gcc_assert(*argnum == 0);
91564 +
91565 + gcc_assert(origarg != NULL_TREE);
91566 + origid = NAME(origarg);
91567 + *argnum = 0;
91568 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
91569 + if (!strcmp(origid, NAME(arg)))
91570 + return true;
91571 + (*argnum)++;
91572 + }
91573 + return false;
91574 +}
91575 +
91576 +static bool skip_types(const_tree var)
91577 +{
91578 + const_tree type;
91579 +
91580 + switch (TREE_CODE(var)) {
91581 + case ADDR_EXPR:
91582 +#if BUILDING_GCC_VERSION >= 4006
91583 + case MEM_REF:
91584 +#endif
91585 + case ARRAY_REF:
91586 + case BIT_FIELD_REF:
91587 + case INDIRECT_REF:
91588 + case TARGET_MEM_REF:
91589 + case VAR_DECL:
91590 + return true;
91591 + default:
91592 + break;
91593 + }
91594 +
91595 + type = TREE_TYPE(TREE_TYPE(var));
91596 + if (!type)
91597 + return false;
91598 + switch (TREE_CODE(type)) {
91599 + case RECORD_TYPE:
91600 + return true;
91601 + default:
91602 + break;
91603 + }
91604 +
91605 + return false;
91606 +}
91607 +
91608 +static bool walk_phi(struct pointer_set_t *visited, const_tree result)
91609 +{
91610 + gimple phi = get_def_stmt(result);
91611 + unsigned int i, n = gimple_phi_num_args(phi);
91612 +
91613 + if (!phi)
91614 + return false;
91615 +
91616 + pointer_set_insert(visited, phi);
91617 + for (i = 0; i < n; i++) {
91618 + const_tree arg = gimple_phi_arg_def(phi, i);
91619 + if (pre_expand(visited, arg))
91620 + return true;
91621 + }
91622 + return false;
91623 +}
91624 +
91625 +static bool walk_unary_ops(struct pointer_set_t *visited, const_tree lhs)
91626 +{
91627 + gimple def_stmt = get_def_stmt(lhs);
91628 + const_tree rhs;
91629 +
91630 + if (!def_stmt)
91631 + return false;
91632 +
91633 + rhs = gimple_assign_rhs1(def_stmt);
91634 + if (pre_expand(visited, rhs))
91635 + return true;
91636 + return false;
91637 +}
91638 +
91639 +static bool walk_binary_ops(struct pointer_set_t *visited, const_tree lhs)
91640 +{
91641 + bool rhs1_found, rhs2_found;
91642 + gimple def_stmt = get_def_stmt(lhs);
91643 + const_tree rhs1, rhs2;
91644 +
91645 + if (!def_stmt)
91646 + return false;
91647 +
91648 + rhs1 = gimple_assign_rhs1(def_stmt);
91649 + rhs2 = gimple_assign_rhs2(def_stmt);
91650 + rhs1_found = pre_expand(visited, rhs1);
91651 + rhs2_found = pre_expand(visited, rhs2);
91652 +
91653 + return rhs1_found || rhs2_found;
91654 +}
91655 +
91656 +static const_tree search_field_decl(const_tree comp_ref)
91657 +{
91658 + const_tree field = NULL_TREE;
91659 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
91660 +
91661 + for (i = 0; i < len; i++) {
91662 + field = TREE_OPERAND(comp_ref, i);
91663 + if (TREE_CODE(field) == FIELD_DECL)
91664 + break;
91665 + }
91666 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
91667 + return field;
91668 +}
91669 +
91670 +static enum marked mark_status(const_tree fndecl, unsigned int argnum)
91671 +{
91672 + const_tree attr, p;
91673 +
91674 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl));
91675 + if (!attr || !TREE_VALUE(attr))
91676 + return MARKED_NO;
91677 +
91678 + p = TREE_VALUE(attr);
91679 + if (!TREE_INT_CST_LOW(TREE_VALUE(p)))
91680 + return MARKED_NOT_INTENTIONAL;
91681 +
91682 + do {
91683 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p)))
91684 + return MARKED_YES;
91685 + p = TREE_CHAIN(p);
91686 + } while (p);
91687 +
91688 + return MARKED_NO;
91689 +}
91690 +
91691 +static void print_missing_msg(tree func, unsigned int argnum)
91692 +{
91693 + unsigned int new_hash;
91694 + size_t len;
91695 + unsigned char tree_codes[CODES_LIMIT];
91696 + location_t loc = DECL_SOURCE_LOCATION(func);
91697 + const char *curfunc = get_asm_name(func);
91698 +
91699 + len = get_function_decl(func, tree_codes);
91700 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
91701 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash);
91702 +}
91703 +
91704 +static unsigned int search_missing_attribute(const_tree arg)
91705 +{
91706 + const_tree type = TREE_TYPE(arg);
91707 + tree func = get_original_function_decl(current_function_decl);
91708 + unsigned int argnum;
91709 + const struct size_overflow_hash *hash;
91710 +
91711 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
91712 +
91713 + if (TREE_CODE(type) == POINTER_TYPE)
91714 + return 0;
91715 +
91716 + argnum = find_arg_number(arg, func);
91717 + if (argnum == 0)
91718 + return 0;
91719 +
91720 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
91721 + return argnum;
91722 +
91723 + hash = get_function_hash(func);
91724 + if (!hash || !(hash->param & (1U << argnum))) {
91725 + print_missing_msg(func, argnum);
91726 + return 0;
91727 + }
91728 + return argnum;
91729 +}
91730 +
91731 +static bool is_already_marked(const_tree lhs)
91732 +{
91733 + unsigned int argnum;
91734 + const_tree fndecl;
91735 +
91736 + argnum = search_missing_attribute(lhs);
91737 + fndecl = get_original_function_decl(current_function_decl);
91738 + if (argnum && mark_status(fndecl, argnum) == MARKED_YES)
91739 + return true;
91740 + return false;
91741 +}
91742 +
91743 +static bool pre_expand(struct pointer_set_t *visited, const_tree lhs)
91744 +{
91745 + const_gimple def_stmt;
91746 +
91747 + if (is_gimple_constant(lhs))
91748 + return false;
91749 +
91750 + if (skip_types(lhs))
91751 + return false;
91752 +
91753 + // skip char type (FIXME: only kernel)
91754 + if (TYPE_MODE(TREE_TYPE(lhs)) == QImode)
91755 + return false;
91756 +
91757 + if (TREE_CODE(lhs) == PARM_DECL)
91758 + return is_already_marked(lhs);
91759 +
91760 + if (TREE_CODE(lhs) == COMPONENT_REF) {
91761 + const_tree field, attr;
91762 +
91763 + field = search_field_decl(lhs);
91764 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field));
91765 + if (!attr || !TREE_VALUE(attr))
91766 + return false;
91767 + return true;
91768 + }
91769 +
91770 + def_stmt = get_def_stmt(lhs);
91771 +
91772 + if (!def_stmt)
91773 + return false;
91774 +
91775 + if (pointer_set_contains(visited, def_stmt))
91776 + return false;
91777 +
91778 + switch (gimple_code(def_stmt)) {
91779 + case GIMPLE_NOP:
91780 + if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL)
91781 + return is_already_marked(lhs);
91782 + return false;
91783 + case GIMPLE_PHI:
91784 + return walk_phi(visited, lhs);
91785 + case GIMPLE_CALL:
91786 + case GIMPLE_ASM:
91787 + return false;
91788 + case GIMPLE_ASSIGN:
91789 + switch (gimple_num_ops(def_stmt)) {
91790 + case 2:
91791 + return walk_unary_ops(visited, lhs);
91792 + case 3:
91793 + return walk_binary_ops(visited, lhs);
91794 + }
91795 + default:
91796 + debug_gimple_stmt((gimple)def_stmt);
91797 + error("%s: unknown gimple code", __func__);
91798 + gcc_unreachable();
91799 + }
91800 +}
91801 +
91802 +static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum)
91803 +{
91804 + struct pointer_set_t *visited;
91805 + bool is_found;
91806 + enum marked is_marked;
91807 + location_t loc;
91808 +
91809 + visited = pointer_set_create();
91810 + is_found = pre_expand(visited, arg);
91811 + pointer_set_destroy(visited);
91812 +
91813 + is_marked = mark_status(fndecl, argnum + 1);
91814 + if ((is_found && is_marked == MARKED_YES) || is_marked == MARKED_NOT_INTENTIONAL)
91815 + return true;
91816 +
91817 + if (is_found) {
91818 + loc = DECL_SOURCE_LOCATION(fndecl);
91819 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1);
91820 + return true;
91821 + }
91822 + return false;
91823 +}
91824 +
91825 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
91826 +{
91827 + struct pointer_set_t *visited;
91828 + tree arg, newarg;
91829 + bool match;
91830 +
91831 + match = get_function_arg(&argnum, fndecl);
91832 + if (!match)
91833 + return;
91834 + gcc_assert(gimple_call_num_args(stmt) > argnum);
91835 + arg = gimple_call_arg(stmt, argnum);
91836 + if (arg == NULL_TREE)
91837 + return;
91838 +
91839 + if (is_gimple_constant(arg))
91840 + return;
91841 +
91842 + if (search_attributes(fndecl, arg, argnum))
91843 + return;
91844 +
91845 + if (TREE_CODE(arg) != SSA_NAME)
91846 + return;
91847 +
91848 + check_arg_type(arg);
91849 +
91850 + visited = pointer_set_create();
91851 + newarg = expand(visited, arg);
91852 + pointer_set_destroy(visited);
91853 +
91854 + if (newarg == NULL_TREE)
91855 + return;
91856 +
91857 + change_function_arg(stmt, arg, argnum, newarg);
91858 +
91859 + check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, BEFORE_STMT);
91860 +}
91861 +
91862 +static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl)
91863 +{
91864 + tree p = TREE_VALUE(attr);
91865 + do {
91866 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
91867 + p = TREE_CHAIN(p);
91868 + } while (p);
91869 +}
91870 +
91871 +static void handle_function_by_hash(gimple stmt, tree fndecl)
91872 +{
91873 + tree orig_fndecl;
91874 + unsigned int num;
91875 + const struct size_overflow_hash *hash;
91876 +
91877 + orig_fndecl = get_original_function_decl(fndecl);
91878 + if (C_DECL_IMPLICIT(orig_fndecl))
91879 + return;
91880 + hash = get_function_hash(orig_fndecl);
91881 + if (!hash)
91882 + return;
91883 +
91884 + for (num = 1; num <= MAX_PARAM; num++)
91885 + if (hash->param & (1U << num))
91886 + handle_function_arg(stmt, fndecl, num - 1);
91887 +}
91888 +
91889 +static void set_plf_false(void)
91890 +{
91891 + basic_block bb;
91892 +
91893 + FOR_ALL_BB(bb) {
91894 + gimple_stmt_iterator si;
91895 +
91896 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
91897 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
91898 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
91899 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
91900 + }
91901 +}
91902 +
91903 +static unsigned int handle_function(void)
91904 +{
91905 + basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
91906 +
91907 + set_plf_false();
91908 +
91909 + do {
91910 + gimple_stmt_iterator gsi;
91911 + next = bb->next_bb;
91912 +
91913 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
91914 + tree fndecl, attr;
91915 + gimple stmt = gsi_stmt(gsi);
91916 +
91917 + if (!(is_gimple_call(stmt)))
91918 + continue;
91919 + fndecl = gimple_call_fndecl(stmt);
91920 + if (fndecl == NULL_TREE)
91921 + continue;
91922 + if (gimple_call_num_args(stmt) == 0)
91923 + continue;
91924 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
91925 + if (!attr || !TREE_VALUE(attr))
91926 + handle_function_by_hash(stmt, fndecl);
91927 + else
91928 + handle_function_by_attribute(stmt, attr, fndecl);
91929 + gsi = gsi_for_stmt(stmt);
91930 + next = gimple_bb(stmt)->next_bb;
91931 + }
91932 + bb = next;
91933 + } while (bb);
91934 + return 0;
91935 +}
91936 +
91937 +static struct gimple_opt_pass size_overflow_pass = {
91938 + .pass = {
91939 + .type = GIMPLE_PASS,
91940 + .name = "size_overflow",
91941 + .gate = NULL,
91942 + .execute = handle_function,
91943 + .sub = NULL,
91944 + .next = NULL,
91945 + .static_pass_number = 0,
91946 + .tv_id = TV_NONE,
91947 + .properties_required = PROP_cfg | PROP_referenced_vars,
91948 + .properties_provided = 0,
91949 + .properties_destroyed = 0,
91950 + .todo_flags_start = 0,
91951 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
91952 + }
91953 +};
91954 +
91955 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
91956 +{
91957 + tree fntype;
91958 +
91959 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
91960 +
91961 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
91962 + fntype = build_function_type_list(void_type_node,
91963 + const_char_ptr_type_node,
91964 + unsigned_type_node,
91965 + const_char_ptr_type_node,
91966 + const_char_ptr_type_node,
91967 + NULL_TREE);
91968 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
91969 +
91970 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
91971 + TREE_PUBLIC(report_size_overflow_decl) = 1;
91972 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
91973 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
91974 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
91975 +}
91976 +
91977 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
91978 +{
91979 + int i;
91980 + const char * const plugin_name = plugin_info->base_name;
91981 + const int argc = plugin_info->argc;
91982 + const struct plugin_argument * const argv = plugin_info->argv;
91983 + bool enable = true;
91984 +
91985 + struct register_pass_info size_overflow_pass_info = {
91986 + .pass = &size_overflow_pass.pass,
91987 + .reference_pass_name = "ssa",
91988 + .ref_pass_instance_number = 1,
91989 + .pos_op = PASS_POS_INSERT_AFTER
91990 + };
91991 +
91992 + if (!plugin_default_version_check(version, &gcc_version)) {
91993 + error(G_("incompatible gcc/plugin versions"));
91994 + return 1;
91995 + }
91996 +
91997 + for (i = 0; i < argc; ++i) {
91998 + if (!strcmp(argv[i].key, "no-size-overflow")) {
91999 + enable = false;
92000 + continue;
92001 + }
92002 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
92003 + }
92004 +
92005 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
92006 + if (enable) {
92007 + register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
92008 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
92009 + }
92010 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
92011 +
92012 + return 0;
92013 +}
92014 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
92015 new file mode 100644
92016 index 0000000..38d2014
92017 --- /dev/null
92018 +++ b/tools/gcc/stackleak_plugin.c
92019 @@ -0,0 +1,313 @@
92020 +/*
92021 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
92022 + * Licensed under the GPL v2
92023 + *
92024 + * Note: the choice of the license means that the compilation process is
92025 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
92026 + * but for the kernel it doesn't matter since it doesn't link against
92027 + * any of the gcc libraries
92028 + *
92029 + * gcc plugin to help implement various PaX features
92030 + *
92031 + * - track lowest stack pointer
92032 + *
92033 + * TODO:
92034 + * - initialize all local variables
92035 + *
92036 + * BUGS:
92037 + * - none known
92038 + */
92039 +#include "gcc-plugin.h"
92040 +#include "config.h"
92041 +#include "system.h"
92042 +#include "coretypes.h"
92043 +#include "tree.h"
92044 +#include "tree-pass.h"
92045 +#include "flags.h"
92046 +#include "intl.h"
92047 +#include "toplev.h"
92048 +#include "plugin.h"
92049 +//#include "expr.h" where are you...
92050 +#include "diagnostic.h"
92051 +#include "plugin-version.h"
92052 +#include "tm.h"
92053 +#include "function.h"
92054 +#include "basic-block.h"
92055 +#include "gimple.h"
92056 +#include "rtl.h"
92057 +#include "emit-rtl.h"
92058 +
92059 +extern void print_gimple_stmt(FILE *, gimple, int, int);
92060 +
92061 +int plugin_is_GPL_compatible;
92062 +
92063 +static int track_frame_size = -1;
92064 +static const char track_function[] = "pax_track_stack";
92065 +static const char check_function[] = "pax_check_alloca";
92066 +static bool init_locals;
92067 +
92068 +static struct plugin_info stackleak_plugin_info = {
92069 + .version = "201203140940",
92070 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
92071 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
92072 +};
92073 +
92074 +static bool gate_stackleak_track_stack(void);
92075 +static unsigned int execute_stackleak_tree_instrument(void);
92076 +static unsigned int execute_stackleak_final(void);
92077 +
92078 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
92079 + .pass = {
92080 + .type = GIMPLE_PASS,
92081 + .name = "stackleak_tree_instrument",
92082 + .gate = gate_stackleak_track_stack,
92083 + .execute = execute_stackleak_tree_instrument,
92084 + .sub = NULL,
92085 + .next = NULL,
92086 + .static_pass_number = 0,
92087 + .tv_id = TV_NONE,
92088 + .properties_required = PROP_gimple_leh | PROP_cfg,
92089 + .properties_provided = 0,
92090 + .properties_destroyed = 0,
92091 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
92092 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
92093 + }
92094 +};
92095 +
92096 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
92097 + .pass = {
92098 + .type = RTL_PASS,
92099 + .name = "stackleak_final",
92100 + .gate = gate_stackleak_track_stack,
92101 + .execute = execute_stackleak_final,
92102 + .sub = NULL,
92103 + .next = NULL,
92104 + .static_pass_number = 0,
92105 + .tv_id = TV_NONE,
92106 + .properties_required = 0,
92107 + .properties_provided = 0,
92108 + .properties_destroyed = 0,
92109 + .todo_flags_start = 0,
92110 + .todo_flags_finish = TODO_dump_func
92111 + }
92112 +};
92113 +
92114 +static bool gate_stackleak_track_stack(void)
92115 +{
92116 + return track_frame_size >= 0;
92117 +}
92118 +
92119 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
92120 +{
92121 + gimple check_alloca;
92122 + tree fntype, fndecl, alloca_size;
92123 +
92124 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
92125 + fndecl = build_fn_decl(check_function, fntype);
92126 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
92127 +
92128 + // insert call to void pax_check_alloca(unsigned long size)
92129 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
92130 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
92131 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
92132 +}
92133 +
92134 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
92135 +{
92136 + gimple track_stack;
92137 + tree fntype, fndecl;
92138 +
92139 + fntype = build_function_type_list(void_type_node, NULL_TREE);
92140 + fndecl = build_fn_decl(track_function, fntype);
92141 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
92142 +
92143 + // insert call to void pax_track_stack(void)
92144 + track_stack = gimple_build_call(fndecl, 0);
92145 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
92146 +}
92147 +
92148 +#if BUILDING_GCC_VERSION == 4005
92149 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
92150 +{
92151 + tree fndecl;
92152 +
92153 + if (!is_gimple_call(stmt))
92154 + return false;
92155 + fndecl = gimple_call_fndecl(stmt);
92156 + if (!fndecl)
92157 + return false;
92158 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
92159 + return false;
92160 +// print_node(stderr, "pax", fndecl, 4);
92161 + return DECL_FUNCTION_CODE(fndecl) == code;
92162 +}
92163 +#endif
92164 +
92165 +static bool is_alloca(gimple stmt)
92166 +{
92167 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
92168 + return true;
92169 +
92170 +#if BUILDING_GCC_VERSION >= 4007
92171 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
92172 + return true;
92173 +#endif
92174 +
92175 + return false;
92176 +}
92177 +
92178 +static unsigned int execute_stackleak_tree_instrument(void)
92179 +{
92180 + basic_block bb, entry_bb;
92181 + bool prologue_instrumented = false, is_leaf = true;
92182 +
92183 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
92184 +
92185 + // 1. loop through BBs and GIMPLE statements
92186 + FOR_EACH_BB(bb) {
92187 + gimple_stmt_iterator gsi;
92188 +
92189 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
92190 + gimple stmt;
92191 +
92192 + stmt = gsi_stmt(gsi);
92193 +
92194 + if (is_gimple_call(stmt))
92195 + is_leaf = false;
92196 +
92197 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
92198 + if (!is_alloca(stmt))
92199 + continue;
92200 +
92201 + // 2. insert stack overflow check before each __builtin_alloca call
92202 + stackleak_check_alloca(&gsi);
92203 +
92204 + // 3. insert track call after each __builtin_alloca call
92205 + stackleak_add_instrumentation(&gsi);
92206 + if (bb == entry_bb)
92207 + prologue_instrumented = true;
92208 + }
92209 + }
92210 +
92211 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
92212 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
92213 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
92214 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
92215 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
92216 + return 0;
92217 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
92218 + return 0;
92219 +
92220 + // 4. insert track call at the beginning
92221 + if (!prologue_instrumented) {
92222 + gimple_stmt_iterator gsi;
92223 +
92224 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
92225 + if (dom_info_available_p(CDI_DOMINATORS))
92226 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
92227 + gsi = gsi_start_bb(bb);
92228 + stackleak_add_instrumentation(&gsi);
92229 + }
92230 +
92231 + return 0;
92232 +}
92233 +
92234 +static unsigned int execute_stackleak_final(void)
92235 +{
92236 + rtx insn;
92237 +
92238 + if (cfun->calls_alloca)
92239 + return 0;
92240 +
92241 + // keep calls only if function frame is big enough
92242 + if (get_frame_size() >= track_frame_size)
92243 + return 0;
92244 +
92245 + // 1. find pax_track_stack calls
92246 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
92247 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
92248 + rtx body;
92249 +
92250 + if (!CALL_P(insn))
92251 + continue;
92252 + body = PATTERN(insn);
92253 + if (GET_CODE(body) != CALL)
92254 + continue;
92255 + body = XEXP(body, 0);
92256 + if (GET_CODE(body) != MEM)
92257 + continue;
92258 + body = XEXP(body, 0);
92259 + if (GET_CODE(body) != SYMBOL_REF)
92260 + continue;
92261 + if (strcmp(XSTR(body, 0), track_function))
92262 + continue;
92263 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
92264 + // 2. delete call
92265 + insn = delete_insn_and_edges(insn);
92266 +#if BUILDING_GCC_VERSION >= 4007
92267 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
92268 + insn = delete_insn_and_edges(insn);
92269 +#endif
92270 + }
92271 +
92272 +// print_simple_rtl(stderr, get_insns());
92273 +// print_rtl(stderr, get_insns());
92274 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
92275 +
92276 + return 0;
92277 +}
92278 +
92279 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
92280 +{
92281 + const char * const plugin_name = plugin_info->base_name;
92282 + const int argc = plugin_info->argc;
92283 + const struct plugin_argument * const argv = plugin_info->argv;
92284 + int i;
92285 + struct register_pass_info stackleak_tree_instrument_pass_info = {
92286 + .pass = &stackleak_tree_instrument_pass.pass,
92287 +// .reference_pass_name = "tree_profile",
92288 + .reference_pass_name = "optimized",
92289 + .ref_pass_instance_number = 1,
92290 + .pos_op = PASS_POS_INSERT_BEFORE
92291 + };
92292 + struct register_pass_info stackleak_final_pass_info = {
92293 + .pass = &stackleak_final_rtl_opt_pass.pass,
92294 + .reference_pass_name = "final",
92295 + .ref_pass_instance_number = 1,
92296 + .pos_op = PASS_POS_INSERT_BEFORE
92297 + };
92298 +
92299 + if (!plugin_default_version_check(version, &gcc_version)) {
92300 + error(G_("incompatible gcc/plugin versions"));
92301 + return 1;
92302 + }
92303 +
92304 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
92305 +
92306 + for (i = 0; i < argc; ++i) {
92307 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
92308 + if (!argv[i].value) {
92309 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
92310 + continue;
92311 + }
92312 + track_frame_size = atoi(argv[i].value);
92313 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
92314 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
92315 + continue;
92316 + }
92317 + if (!strcmp(argv[i].key, "initialize-locals")) {
92318 + if (argv[i].value) {
92319 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
92320 + continue;
92321 + }
92322 + init_locals = true;
92323 + continue;
92324 + }
92325 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
92326 + }
92327 +
92328 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
92329 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
92330 +
92331 + return 0;
92332 +}
92333 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
92334 index 6789d78..4afd019e 100644
92335 --- a/tools/perf/util/include/asm/alternative-asm.h
92336 +++ b/tools/perf/util/include/asm/alternative-asm.h
92337 @@ -5,4 +5,7 @@
92338
92339 #define altinstruction_entry #
92340
92341 + .macro pax_force_retaddr rip=0, reload=0
92342 + .endm
92343 +
92344 #endif
92345 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
92346 index be70035..739990f 100644
92347 --- a/virt/kvm/kvm_main.c
92348 +++ b/virt/kvm/kvm_main.c
92349 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
92350
92351 static cpumask_var_t cpus_hardware_enabled;
92352 static int kvm_usage_count = 0;
92353 -static atomic_t hardware_enable_failed;
92354 +static atomic_unchecked_t hardware_enable_failed;
92355
92356 struct kmem_cache *kvm_vcpu_cache;
92357 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
92358 @@ -727,7 +727,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
92359 /* We can read the guest memory with __xxx_user() later on. */
92360 if (user_alloc &&
92361 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
92362 - !access_ok(VERIFY_WRITE,
92363 + !__access_ok(VERIFY_WRITE,
92364 (void __user *)(unsigned long)mem->userspace_addr,
92365 mem->memory_size)))
92366 goto out;
92367 @@ -2453,7 +2453,7 @@ static void hardware_enable_nolock(void *junk)
92368
92369 if (r) {
92370 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
92371 - atomic_inc(&hardware_enable_failed);
92372 + atomic_inc_unchecked(&hardware_enable_failed);
92373 printk(KERN_INFO "kvm: enabling virtualization on "
92374 "CPU%d failed\n", cpu);
92375 }
92376 @@ -2507,10 +2507,10 @@ static int hardware_enable_all(void)
92377
92378 kvm_usage_count++;
92379 if (kvm_usage_count == 1) {
92380 - atomic_set(&hardware_enable_failed, 0);
92381 + atomic_set_unchecked(&hardware_enable_failed, 0);
92382 on_each_cpu(hardware_enable_nolock, NULL, 1);
92383
92384 - if (atomic_read(&hardware_enable_failed)) {
92385 + if (atomic_read_unchecked(&hardware_enable_failed)) {
92386 hardware_disable_all_nolock();
92387 r = -EBUSY;
92388 }
92389 @@ -2868,7 +2868,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
92390 kvm_arch_vcpu_put(vcpu);
92391 }
92392
92393 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
92394 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
92395 struct module *module)
92396 {
92397 int r;
92398 @@ -2904,7 +2904,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
92399 if (!vcpu_align)
92400 vcpu_align = __alignof__(struct kvm_vcpu);
92401 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
92402 - 0, NULL);
92403 + SLAB_USERCOPY, NULL);
92404 if (!kvm_vcpu_cache) {
92405 r = -ENOMEM;
92406 goto out_free_3;
92407 @@ -2914,9 +2914,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
92408 if (r)
92409 goto out_free;
92410
92411 - kvm_chardev_ops.owner = module;
92412 - kvm_vm_fops.owner = module;
92413 - kvm_vcpu_fops.owner = module;
92414 + pax_open_kernel();
92415 + *(void **)&kvm_chardev_ops.owner = module;
92416 + *(void **)&kvm_vm_fops.owner = module;
92417 + *(void **)&kvm_vcpu_fops.owner = module;
92418 + pax_close_kernel();
92419
92420 r = misc_register(&kvm_dev);
92421 if (r) {